1 /* 2 * Copyright (C) 2001 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm.h" 9 10 #include <linux/module.h> 11 #include <linux/vmalloc.h> 12 #include <linux/blkdev.h> 13 #include <linux/namei.h> 14 #include <linux/ctype.h> 15 #include <linux/string.h> 16 #include <linux/slab.h> 17 #include <linux/interrupt.h> 18 #include <linux/mutex.h> 19 #include <linux/delay.h> 20 #include <asm/atomic.h> 21 22 #define DM_MSG_PREFIX "table" 23 24 #define MAX_DEPTH 16 25 #define NODE_SIZE L1_CACHE_BYTES 26 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) 27 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) 28 29 /* 30 * The table has always exactly one reference from either mapped_device->map 31 * or hash_cell->new_map. This reference is not counted in table->holders. 32 * A pair of dm_create_table/dm_destroy_table functions is used for table 33 * creation/destruction. 34 * 35 * Temporary references from the other code increase table->holders. A pair 36 * of dm_table_get/dm_table_put functions is used to manipulate it. 37 * 38 * When the table is about to be destroyed, we wait for table->holders to 39 * drop to zero. 40 */ 41 42 struct dm_table { 43 struct mapped_device *md; 44 atomic_t holders; 45 unsigned type; 46 47 /* btree table */ 48 unsigned int depth; 49 unsigned int counts[MAX_DEPTH]; /* in nodes */ 50 sector_t *index[MAX_DEPTH]; 51 52 unsigned int num_targets; 53 unsigned int num_allocated; 54 sector_t *highs; 55 struct dm_target *targets; 56 57 /* 58 * Indicates the rw permissions for the new logical 59 * device. This should be a combination of FMODE_READ 60 * and FMODE_WRITE. 61 */ 62 fmode_t mode; 63 64 /* a list of devices used by this table */ 65 struct list_head devices; 66 67 /* events get handed up using this callback */ 68 void (*event_fn)(void *); 69 void *event_context; 70 71 struct dm_md_mempools *mempools; 72 }; 73 74 /* 75 * Similar to ceiling(log_size(n)) 76 */ 77 static unsigned int int_log(unsigned int n, unsigned int base) 78 { 79 int result = 0; 80 81 while (n > 1) { 82 n = dm_div_up(n, base); 83 result++; 84 } 85 86 return result; 87 } 88 89 /* 90 * Calculate the index of the child node of the n'th node k'th key. 91 */ 92 static inline unsigned int get_child(unsigned int n, unsigned int k) 93 { 94 return (n * CHILDREN_PER_NODE) + k; 95 } 96 97 /* 98 * Return the n'th node of level l from table t. 99 */ 100 static inline sector_t *get_node(struct dm_table *t, 101 unsigned int l, unsigned int n) 102 { 103 return t->index[l] + (n * KEYS_PER_NODE); 104 } 105 106 /* 107 * Return the highest key that you could lookup from the n'th 108 * node on level l of the btree. 109 */ 110 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) 111 { 112 for (; l < t->depth - 1; l++) 113 n = get_child(n, CHILDREN_PER_NODE - 1); 114 115 if (n >= t->counts[l]) 116 return (sector_t) - 1; 117 118 return get_node(t, l, n)[KEYS_PER_NODE - 1]; 119 } 120 121 /* 122 * Fills in a level of the btree based on the highs of the level 123 * below it. 124 */ 125 static int setup_btree_index(unsigned int l, struct dm_table *t) 126 { 127 unsigned int n, k; 128 sector_t *node; 129 130 for (n = 0U; n < t->counts[l]; n++) { 131 node = get_node(t, l, n); 132 133 for (k = 0U; k < KEYS_PER_NODE; k++) 134 node[k] = high(t, l + 1, get_child(n, k)); 135 } 136 137 return 0; 138 } 139 140 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) 141 { 142 unsigned long size; 143 void *addr; 144 145 /* 146 * Check that we're not going to overflow. 147 */ 148 if (nmemb > (ULONG_MAX / elem_size)) 149 return NULL; 150 151 size = nmemb * elem_size; 152 addr = vmalloc(size); 153 if (addr) 154 memset(addr, 0, size); 155 156 return addr; 157 } 158 159 /* 160 * highs, and targets are managed as dynamic arrays during a 161 * table load. 162 */ 163 static int alloc_targets(struct dm_table *t, unsigned int num) 164 { 165 sector_t *n_highs; 166 struct dm_target *n_targets; 167 int n = t->num_targets; 168 169 /* 170 * Allocate both the target array and offset array at once. 171 * Append an empty entry to catch sectors beyond the end of 172 * the device. 173 */ 174 n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + 175 sizeof(sector_t)); 176 if (!n_highs) 177 return -ENOMEM; 178 179 n_targets = (struct dm_target *) (n_highs + num); 180 181 if (n) { 182 memcpy(n_highs, t->highs, sizeof(*n_highs) * n); 183 memcpy(n_targets, t->targets, sizeof(*n_targets) * n); 184 } 185 186 memset(n_highs + n, -1, sizeof(*n_highs) * (num - n)); 187 vfree(t->highs); 188 189 t->num_allocated = num; 190 t->highs = n_highs; 191 t->targets = n_targets; 192 193 return 0; 194 } 195 196 int dm_table_create(struct dm_table **result, fmode_t mode, 197 unsigned num_targets, struct mapped_device *md) 198 { 199 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); 200 201 if (!t) 202 return -ENOMEM; 203 204 INIT_LIST_HEAD(&t->devices); 205 atomic_set(&t->holders, 0); 206 207 if (!num_targets) 208 num_targets = KEYS_PER_NODE; 209 210 num_targets = dm_round_up(num_targets, KEYS_PER_NODE); 211 212 if (alloc_targets(t, num_targets)) { 213 kfree(t); 214 t = NULL; 215 return -ENOMEM; 216 } 217 218 t->mode = mode; 219 t->md = md; 220 *result = t; 221 return 0; 222 } 223 224 static void free_devices(struct list_head *devices) 225 { 226 struct list_head *tmp, *next; 227 228 list_for_each_safe(tmp, next, devices) { 229 struct dm_dev_internal *dd = 230 list_entry(tmp, struct dm_dev_internal, list); 231 DMWARN("dm_table_destroy: dm_put_device call missing for %s", 232 dd->dm_dev.name); 233 kfree(dd); 234 } 235 } 236 237 void dm_table_destroy(struct dm_table *t) 238 { 239 unsigned int i; 240 241 if (!t) 242 return; 243 244 while (atomic_read(&t->holders)) 245 msleep(1); 246 smp_mb(); 247 248 /* free the indexes (see dm_table_complete) */ 249 if (t->depth >= 2) 250 vfree(t->index[t->depth - 2]); 251 252 /* free the targets */ 253 for (i = 0; i < t->num_targets; i++) { 254 struct dm_target *tgt = t->targets + i; 255 256 if (tgt->type->dtr) 257 tgt->type->dtr(tgt); 258 259 dm_put_target_type(tgt->type); 260 } 261 262 vfree(t->highs); 263 264 /* free the device list */ 265 if (t->devices.next != &t->devices) 266 free_devices(&t->devices); 267 268 dm_free_md_mempools(t->mempools); 269 270 kfree(t); 271 } 272 273 void dm_table_get(struct dm_table *t) 274 { 275 atomic_inc(&t->holders); 276 } 277 278 void dm_table_put(struct dm_table *t) 279 { 280 if (!t) 281 return; 282 283 smp_mb__before_atomic_dec(); 284 atomic_dec(&t->holders); 285 } 286 287 /* 288 * Checks to see if we need to extend highs or targets. 289 */ 290 static inline int check_space(struct dm_table *t) 291 { 292 if (t->num_targets >= t->num_allocated) 293 return alloc_targets(t, t->num_allocated * 2); 294 295 return 0; 296 } 297 298 /* 299 * See if we've already got a device in the list. 300 */ 301 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) 302 { 303 struct dm_dev_internal *dd; 304 305 list_for_each_entry (dd, l, list) 306 if (dd->dm_dev.bdev->bd_dev == dev) 307 return dd; 308 309 return NULL; 310 } 311 312 /* 313 * Open a device so we can use it as a map destination. 314 */ 315 static int open_dev(struct dm_dev_internal *d, dev_t dev, 316 struct mapped_device *md) 317 { 318 static char *_claim_ptr = "I belong to device-mapper"; 319 struct block_device *bdev; 320 321 int r; 322 323 BUG_ON(d->dm_dev.bdev); 324 325 bdev = open_by_devnum(dev, d->dm_dev.mode); 326 if (IS_ERR(bdev)) 327 return PTR_ERR(bdev); 328 r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); 329 if (r) 330 blkdev_put(bdev, d->dm_dev.mode); 331 else 332 d->dm_dev.bdev = bdev; 333 return r; 334 } 335 336 /* 337 * Close a device that we've been using. 338 */ 339 static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) 340 { 341 if (!d->dm_dev.bdev) 342 return; 343 344 bd_release_from_disk(d->dm_dev.bdev, dm_disk(md)); 345 blkdev_put(d->dm_dev.bdev, d->dm_dev.mode); 346 d->dm_dev.bdev = NULL; 347 } 348 349 /* 350 * If possible, this checks an area of a destination device is invalid. 351 */ 352 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, 353 sector_t start, sector_t len, void *data) 354 { 355 struct queue_limits *limits = data; 356 struct block_device *bdev = dev->bdev; 357 sector_t dev_size = 358 i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; 359 unsigned short logical_block_size_sectors = 360 limits->logical_block_size >> SECTOR_SHIFT; 361 char b[BDEVNAME_SIZE]; 362 363 if (!dev_size) 364 return 0; 365 366 if ((start >= dev_size) || (start + len > dev_size)) { 367 DMWARN("%s: %s too small for target: " 368 "start=%llu, len=%llu, dev_size=%llu", 369 dm_device_name(ti->table->md), bdevname(bdev, b), 370 (unsigned long long)start, 371 (unsigned long long)len, 372 (unsigned long long)dev_size); 373 return 1; 374 } 375 376 if (logical_block_size_sectors <= 1) 377 return 0; 378 379 if (start & (logical_block_size_sectors - 1)) { 380 DMWARN("%s: start=%llu not aligned to h/w " 381 "logical block size %u of %s", 382 dm_device_name(ti->table->md), 383 (unsigned long long)start, 384 limits->logical_block_size, bdevname(bdev, b)); 385 return 1; 386 } 387 388 if (len & (logical_block_size_sectors - 1)) { 389 DMWARN("%s: len=%llu not aligned to h/w " 390 "logical block size %u of %s", 391 dm_device_name(ti->table->md), 392 (unsigned long long)len, 393 limits->logical_block_size, bdevname(bdev, b)); 394 return 1; 395 } 396 397 return 0; 398 } 399 400 /* 401 * This upgrades the mode on an already open dm_dev, being 402 * careful to leave things as they were if we fail to reopen the 403 * device and not to touch the existing bdev field in case 404 * it is accessed concurrently inside dm_table_any_congested(). 405 */ 406 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, 407 struct mapped_device *md) 408 { 409 int r; 410 struct dm_dev_internal dd_new, dd_old; 411 412 dd_new = dd_old = *dd; 413 414 dd_new.dm_dev.mode |= new_mode; 415 dd_new.dm_dev.bdev = NULL; 416 417 r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md); 418 if (r) 419 return r; 420 421 dd->dm_dev.mode |= new_mode; 422 close_dev(&dd_old, md); 423 424 return 0; 425 } 426 427 /* 428 * Add a device to the list, or just increment the usage count if 429 * it's already present. 430 */ 431 static int __table_get_device(struct dm_table *t, struct dm_target *ti, 432 const char *path, sector_t start, sector_t len, 433 fmode_t mode, struct dm_dev **result) 434 { 435 int r; 436 dev_t uninitialized_var(dev); 437 struct dm_dev_internal *dd; 438 unsigned int major, minor; 439 440 BUG_ON(!t); 441 442 if (sscanf(path, "%u:%u", &major, &minor) == 2) { 443 /* Extract the major/minor numbers */ 444 dev = MKDEV(major, minor); 445 if (MAJOR(dev) != major || MINOR(dev) != minor) 446 return -EOVERFLOW; 447 } else { 448 /* convert the path to a device */ 449 struct block_device *bdev = lookup_bdev(path); 450 451 if (IS_ERR(bdev)) 452 return PTR_ERR(bdev); 453 dev = bdev->bd_dev; 454 bdput(bdev); 455 } 456 457 dd = find_device(&t->devices, dev); 458 if (!dd) { 459 dd = kmalloc(sizeof(*dd), GFP_KERNEL); 460 if (!dd) 461 return -ENOMEM; 462 463 dd->dm_dev.mode = mode; 464 dd->dm_dev.bdev = NULL; 465 466 if ((r = open_dev(dd, dev, t->md))) { 467 kfree(dd); 468 return r; 469 } 470 471 format_dev_t(dd->dm_dev.name, dev); 472 473 atomic_set(&dd->count, 0); 474 list_add(&dd->list, &t->devices); 475 476 } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) { 477 r = upgrade_mode(dd, mode, t->md); 478 if (r) 479 return r; 480 } 481 atomic_inc(&dd->count); 482 483 *result = &dd->dm_dev; 484 return 0; 485 } 486 487 /* 488 * Returns the minimum that is _not_ zero, unless both are zero. 489 */ 490 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) 491 492 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, 493 sector_t start, sector_t len, void *data) 494 { 495 struct queue_limits *limits = data; 496 struct block_device *bdev = dev->bdev; 497 struct request_queue *q = bdev_get_queue(bdev); 498 char b[BDEVNAME_SIZE]; 499 500 if (unlikely(!q)) { 501 DMWARN("%s: Cannot set limits for nonexistent device %s", 502 dm_device_name(ti->table->md), bdevname(bdev, b)); 503 return 0; 504 } 505 506 if (bdev_stack_limits(limits, bdev, start) < 0) 507 DMWARN("%s: adding target device %s caused an alignment inconsistency: " 508 "physical_block_size=%u, logical_block_size=%u, " 509 "alignment_offset=%u, start=%llu", 510 dm_device_name(ti->table->md), bdevname(bdev, b), 511 q->limits.physical_block_size, 512 q->limits.logical_block_size, 513 q->limits.alignment_offset, 514 (unsigned long long) start << SECTOR_SHIFT); 515 516 /* 517 * Check if merge fn is supported. 518 * If not we'll force DM to use PAGE_SIZE or 519 * smaller I/O, just to be safe. 520 */ 521 522 if (q->merge_bvec_fn && !ti->type->merge) 523 limits->max_sectors = 524 min_not_zero(limits->max_sectors, 525 (unsigned int) (PAGE_SIZE >> 9)); 526 return 0; 527 } 528 EXPORT_SYMBOL_GPL(dm_set_device_limits); 529 530 int dm_get_device(struct dm_target *ti, const char *path, sector_t start, 531 sector_t len, fmode_t mode, struct dm_dev **result) 532 { 533 return __table_get_device(ti->table, ti, path, 534 start, len, mode, result); 535 } 536 537 538 /* 539 * Decrement a devices use count and remove it if necessary. 540 */ 541 void dm_put_device(struct dm_target *ti, struct dm_dev *d) 542 { 543 struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal, 544 dm_dev); 545 546 if (atomic_dec_and_test(&dd->count)) { 547 close_dev(dd, ti->table->md); 548 list_del(&dd->list); 549 kfree(dd); 550 } 551 } 552 553 /* 554 * Checks to see if the target joins onto the end of the table. 555 */ 556 static int adjoin(struct dm_table *table, struct dm_target *ti) 557 { 558 struct dm_target *prev; 559 560 if (!table->num_targets) 561 return !ti->begin; 562 563 prev = &table->targets[table->num_targets - 1]; 564 return (ti->begin == (prev->begin + prev->len)); 565 } 566 567 /* 568 * Used to dynamically allocate the arg array. 569 */ 570 static char **realloc_argv(unsigned *array_size, char **old_argv) 571 { 572 char **argv; 573 unsigned new_size; 574 575 new_size = *array_size ? *array_size * 2 : 64; 576 argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL); 577 if (argv) { 578 memcpy(argv, old_argv, *array_size * sizeof(*argv)); 579 *array_size = new_size; 580 } 581 582 kfree(old_argv); 583 return argv; 584 } 585 586 /* 587 * Destructively splits up the argument list to pass to ctr. 588 */ 589 int dm_split_args(int *argc, char ***argvp, char *input) 590 { 591 char *start, *end = input, *out, **argv = NULL; 592 unsigned array_size = 0; 593 594 *argc = 0; 595 596 if (!input) { 597 *argvp = NULL; 598 return 0; 599 } 600 601 argv = realloc_argv(&array_size, argv); 602 if (!argv) 603 return -ENOMEM; 604 605 while (1) { 606 /* Skip whitespace */ 607 start = skip_spaces(end); 608 609 if (!*start) 610 break; /* success, we hit the end */ 611 612 /* 'out' is used to remove any back-quotes */ 613 end = out = start; 614 while (*end) { 615 /* Everything apart from '\0' can be quoted */ 616 if (*end == '\\' && *(end + 1)) { 617 *out++ = *(end + 1); 618 end += 2; 619 continue; 620 } 621 622 if (isspace(*end)) 623 break; /* end of token */ 624 625 *out++ = *end++; 626 } 627 628 /* have we already filled the array ? */ 629 if ((*argc + 1) > array_size) { 630 argv = realloc_argv(&array_size, argv); 631 if (!argv) 632 return -ENOMEM; 633 } 634 635 /* we know this is whitespace */ 636 if (*end) 637 end++; 638 639 /* terminate the string and put it in the array */ 640 *out = '\0'; 641 argv[*argc] = start; 642 (*argc)++; 643 } 644 645 *argvp = argv; 646 return 0; 647 } 648 649 /* 650 * Impose necessary and sufficient conditions on a devices's table such 651 * that any incoming bio which respects its logical_block_size can be 652 * processed successfully. If it falls across the boundary between 653 * two or more targets, the size of each piece it gets split into must 654 * be compatible with the logical_block_size of the target processing it. 655 */ 656 static int validate_hardware_logical_block_alignment(struct dm_table *table, 657 struct queue_limits *limits) 658 { 659 /* 660 * This function uses arithmetic modulo the logical_block_size 661 * (in units of 512-byte sectors). 662 */ 663 unsigned short device_logical_block_size_sects = 664 limits->logical_block_size >> SECTOR_SHIFT; 665 666 /* 667 * Offset of the start of the next table entry, mod logical_block_size. 668 */ 669 unsigned short next_target_start = 0; 670 671 /* 672 * Given an aligned bio that extends beyond the end of a 673 * target, how many sectors must the next target handle? 674 */ 675 unsigned short remaining = 0; 676 677 struct dm_target *uninitialized_var(ti); 678 struct queue_limits ti_limits; 679 unsigned i = 0; 680 681 /* 682 * Check each entry in the table in turn. 683 */ 684 while (i < dm_table_get_num_targets(table)) { 685 ti = dm_table_get_target(table, i++); 686 687 blk_set_default_limits(&ti_limits); 688 689 /* combine all target devices' limits */ 690 if (ti->type->iterate_devices) 691 ti->type->iterate_devices(ti, dm_set_device_limits, 692 &ti_limits); 693 694 /* 695 * If the remaining sectors fall entirely within this 696 * table entry are they compatible with its logical_block_size? 697 */ 698 if (remaining < ti->len && 699 remaining & ((ti_limits.logical_block_size >> 700 SECTOR_SHIFT) - 1)) 701 break; /* Error */ 702 703 next_target_start = 704 (unsigned short) ((next_target_start + ti->len) & 705 (device_logical_block_size_sects - 1)); 706 remaining = next_target_start ? 707 device_logical_block_size_sects - next_target_start : 0; 708 } 709 710 if (remaining) { 711 DMWARN("%s: table line %u (start sect %llu len %llu) " 712 "not aligned to h/w logical block size %u", 713 dm_device_name(table->md), i, 714 (unsigned long long) ti->begin, 715 (unsigned long long) ti->len, 716 limits->logical_block_size); 717 return -EINVAL; 718 } 719 720 return 0; 721 } 722 723 int dm_table_add_target(struct dm_table *t, const char *type, 724 sector_t start, sector_t len, char *params) 725 { 726 int r = -EINVAL, argc; 727 char **argv; 728 struct dm_target *tgt; 729 730 if ((r = check_space(t))) 731 return r; 732 733 tgt = t->targets + t->num_targets; 734 memset(tgt, 0, sizeof(*tgt)); 735 736 if (!len) { 737 DMERR("%s: zero-length target", dm_device_name(t->md)); 738 return -EINVAL; 739 } 740 741 tgt->type = dm_get_target_type(type); 742 if (!tgt->type) { 743 DMERR("%s: %s: unknown target type", dm_device_name(t->md), 744 type); 745 return -EINVAL; 746 } 747 748 tgt->table = t; 749 tgt->begin = start; 750 tgt->len = len; 751 tgt->error = "Unknown error"; 752 753 /* 754 * Does this target adjoin the previous one ? 755 */ 756 if (!adjoin(t, tgt)) { 757 tgt->error = "Gap in table"; 758 r = -EINVAL; 759 goto bad; 760 } 761 762 r = dm_split_args(&argc, &argv, params); 763 if (r) { 764 tgt->error = "couldn't split parameters (insufficient memory)"; 765 goto bad; 766 } 767 768 r = tgt->type->ctr(tgt, argc, argv); 769 kfree(argv); 770 if (r) 771 goto bad; 772 773 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; 774 775 return 0; 776 777 bad: 778 DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); 779 dm_put_target_type(tgt->type); 780 return r; 781 } 782 783 int dm_table_set_type(struct dm_table *t) 784 { 785 unsigned i; 786 unsigned bio_based = 0, request_based = 0; 787 struct dm_target *tgt; 788 struct dm_dev_internal *dd; 789 struct list_head *devices; 790 791 for (i = 0; i < t->num_targets; i++) { 792 tgt = t->targets + i; 793 if (dm_target_request_based(tgt)) 794 request_based = 1; 795 else 796 bio_based = 1; 797 798 if (bio_based && request_based) { 799 DMWARN("Inconsistent table: different target types" 800 " can't be mixed up"); 801 return -EINVAL; 802 } 803 } 804 805 if (bio_based) { 806 /* We must use this table as bio-based */ 807 t->type = DM_TYPE_BIO_BASED; 808 return 0; 809 } 810 811 BUG_ON(!request_based); /* No targets in this table */ 812 813 /* Non-request-stackable devices can't be used for request-based dm */ 814 devices = dm_table_get_devices(t); 815 list_for_each_entry(dd, devices, list) { 816 if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) { 817 DMWARN("table load rejected: including" 818 " non-request-stackable devices"); 819 return -EINVAL; 820 } 821 } 822 823 /* 824 * Request-based dm supports only tables that have a single target now. 825 * To support multiple targets, request splitting support is needed, 826 * and that needs lots of changes in the block-layer. 827 * (e.g. request completion process for partial completion.) 828 */ 829 if (t->num_targets > 1) { 830 DMWARN("Request-based dm doesn't support multiple targets yet"); 831 return -EINVAL; 832 } 833 834 t->type = DM_TYPE_REQUEST_BASED; 835 836 return 0; 837 } 838 839 unsigned dm_table_get_type(struct dm_table *t) 840 { 841 return t->type; 842 } 843 844 bool dm_table_request_based(struct dm_table *t) 845 { 846 return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; 847 } 848 849 int dm_table_alloc_md_mempools(struct dm_table *t) 850 { 851 unsigned type = dm_table_get_type(t); 852 853 if (unlikely(type == DM_TYPE_NONE)) { 854 DMWARN("no table type is set, can't allocate mempools"); 855 return -EINVAL; 856 } 857 858 t->mempools = dm_alloc_md_mempools(type); 859 if (!t->mempools) 860 return -ENOMEM; 861 862 return 0; 863 } 864 865 void dm_table_free_md_mempools(struct dm_table *t) 866 { 867 dm_free_md_mempools(t->mempools); 868 t->mempools = NULL; 869 } 870 871 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) 872 { 873 return t->mempools; 874 } 875 876 static int setup_indexes(struct dm_table *t) 877 { 878 int i; 879 unsigned int total = 0; 880 sector_t *indexes; 881 882 /* allocate the space for *all* the indexes */ 883 for (i = t->depth - 2; i >= 0; i--) { 884 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); 885 total += t->counts[i]; 886 } 887 888 indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE); 889 if (!indexes) 890 return -ENOMEM; 891 892 /* set up internal nodes, bottom-up */ 893 for (i = t->depth - 2; i >= 0; i--) { 894 t->index[i] = indexes; 895 indexes += (KEYS_PER_NODE * t->counts[i]); 896 setup_btree_index(i, t); 897 } 898 899 return 0; 900 } 901 902 /* 903 * Builds the btree to index the map. 904 */ 905 int dm_table_complete(struct dm_table *t) 906 { 907 int r = 0; 908 unsigned int leaf_nodes; 909 910 /* how many indexes will the btree have ? */ 911 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); 912 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); 913 914 /* leaf layer has already been set up */ 915 t->counts[t->depth - 1] = leaf_nodes; 916 t->index[t->depth - 1] = t->highs; 917 918 if (t->depth >= 2) 919 r = setup_indexes(t); 920 921 return r; 922 } 923 924 static DEFINE_MUTEX(_event_lock); 925 void dm_table_event_callback(struct dm_table *t, 926 void (*fn)(void *), void *context) 927 { 928 mutex_lock(&_event_lock); 929 t->event_fn = fn; 930 t->event_context = context; 931 mutex_unlock(&_event_lock); 932 } 933 934 void dm_table_event(struct dm_table *t) 935 { 936 /* 937 * You can no longer call dm_table_event() from interrupt 938 * context, use a bottom half instead. 939 */ 940 BUG_ON(in_interrupt()); 941 942 mutex_lock(&_event_lock); 943 if (t->event_fn) 944 t->event_fn(t->event_context); 945 mutex_unlock(&_event_lock); 946 } 947 948 sector_t dm_table_get_size(struct dm_table *t) 949 { 950 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; 951 } 952 953 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) 954 { 955 if (index >= t->num_targets) 956 return NULL; 957 958 return t->targets + index; 959 } 960 961 /* 962 * Search the btree for the correct target. 963 * 964 * Caller should check returned pointer with dm_target_is_valid() 965 * to trap I/O beyond end of device. 966 */ 967 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) 968 { 969 unsigned int l, n = 0, k = 0; 970 sector_t *node; 971 972 for (l = 0; l < t->depth; l++) { 973 n = get_child(n, k); 974 node = get_node(t, l, n); 975 976 for (k = 0; k < KEYS_PER_NODE; k++) 977 if (node[k] >= sector) 978 break; 979 } 980 981 return &t->targets[(KEYS_PER_NODE * n) + k]; 982 } 983 984 /* 985 * Establish the new table's queue_limits and validate them. 986 */ 987 int dm_calculate_queue_limits(struct dm_table *table, 988 struct queue_limits *limits) 989 { 990 struct dm_target *uninitialized_var(ti); 991 struct queue_limits ti_limits; 992 unsigned i = 0; 993 994 blk_set_default_limits(limits); 995 996 while (i < dm_table_get_num_targets(table)) { 997 blk_set_default_limits(&ti_limits); 998 999 ti = dm_table_get_target(table, i++); 1000 1001 if (!ti->type->iterate_devices) 1002 goto combine_limits; 1003 1004 /* 1005 * Combine queue limits of all the devices this target uses. 1006 */ 1007 ti->type->iterate_devices(ti, dm_set_device_limits, 1008 &ti_limits); 1009 1010 /* Set I/O hints portion of queue limits */ 1011 if (ti->type->io_hints) 1012 ti->type->io_hints(ti, &ti_limits); 1013 1014 /* 1015 * Check each device area is consistent with the target's 1016 * overall queue limits. 1017 */ 1018 if (ti->type->iterate_devices(ti, device_area_is_invalid, 1019 &ti_limits)) 1020 return -EINVAL; 1021 1022 combine_limits: 1023 /* 1024 * Merge this target's queue limits into the overall limits 1025 * for the table. 1026 */ 1027 if (blk_stack_limits(limits, &ti_limits, 0) < 0) 1028 DMWARN("%s: adding target device " 1029 "(start sect %llu len %llu) " 1030 "caused an alignment inconsistency", 1031 dm_device_name(table->md), 1032 (unsigned long long) ti->begin, 1033 (unsigned long long) ti->len); 1034 } 1035 1036 return validate_hardware_logical_block_alignment(table, limits); 1037 } 1038 1039 /* 1040 * Set the integrity profile for this device if all devices used have 1041 * matching profiles. 1042 */ 1043 static void dm_table_set_integrity(struct dm_table *t) 1044 { 1045 struct list_head *devices = dm_table_get_devices(t); 1046 struct dm_dev_internal *prev = NULL, *dd = NULL; 1047 1048 if (!blk_get_integrity(dm_disk(t->md))) 1049 return; 1050 1051 list_for_each_entry(dd, devices, list) { 1052 if (prev && 1053 blk_integrity_compare(prev->dm_dev.bdev->bd_disk, 1054 dd->dm_dev.bdev->bd_disk) < 0) { 1055 DMWARN("%s: integrity not set: %s and %s mismatch", 1056 dm_device_name(t->md), 1057 prev->dm_dev.bdev->bd_disk->disk_name, 1058 dd->dm_dev.bdev->bd_disk->disk_name); 1059 goto no_integrity; 1060 } 1061 prev = dd; 1062 } 1063 1064 if (!prev || !bdev_get_integrity(prev->dm_dev.bdev)) 1065 goto no_integrity; 1066 1067 blk_integrity_register(dm_disk(t->md), 1068 bdev_get_integrity(prev->dm_dev.bdev)); 1069 1070 return; 1071 1072 no_integrity: 1073 blk_integrity_register(dm_disk(t->md), NULL); 1074 1075 return; 1076 } 1077 1078 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1079 struct queue_limits *limits) 1080 { 1081 /* 1082 * Copy table's limits to the DM device's request_queue 1083 */ 1084 q->limits = *limits; 1085 1086 if (limits->no_cluster) 1087 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 1088 else 1089 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); 1090 1091 dm_table_set_integrity(t); 1092 1093 /* 1094 * QUEUE_FLAG_STACKABLE must be set after all queue settings are 1095 * visible to other CPUs because, once the flag is set, incoming bios 1096 * are processed by request-based dm, which refers to the queue 1097 * settings. 1098 * Until the flag set, bios are passed to bio-based dm and queued to 1099 * md->deferred where queue settings are not needed yet. 1100 * Those bios are passed to request-based dm at the resume time. 1101 */ 1102 smp_mb(); 1103 if (dm_table_request_based(t)) 1104 queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q); 1105 } 1106 1107 unsigned int dm_table_get_num_targets(struct dm_table *t) 1108 { 1109 return t->num_targets; 1110 } 1111 1112 struct list_head *dm_table_get_devices(struct dm_table *t) 1113 { 1114 return &t->devices; 1115 } 1116 1117 fmode_t dm_table_get_mode(struct dm_table *t) 1118 { 1119 return t->mode; 1120 } 1121 1122 static void suspend_targets(struct dm_table *t, unsigned postsuspend) 1123 { 1124 int i = t->num_targets; 1125 struct dm_target *ti = t->targets; 1126 1127 while (i--) { 1128 if (postsuspend) { 1129 if (ti->type->postsuspend) 1130 ti->type->postsuspend(ti); 1131 } else if (ti->type->presuspend) 1132 ti->type->presuspend(ti); 1133 1134 ti++; 1135 } 1136 } 1137 1138 void dm_table_presuspend_targets(struct dm_table *t) 1139 { 1140 if (!t) 1141 return; 1142 1143 suspend_targets(t, 0); 1144 } 1145 1146 void dm_table_postsuspend_targets(struct dm_table *t) 1147 { 1148 if (!t) 1149 return; 1150 1151 suspend_targets(t, 1); 1152 } 1153 1154 int dm_table_resume_targets(struct dm_table *t) 1155 { 1156 int i, r = 0; 1157 1158 for (i = 0; i < t->num_targets; i++) { 1159 struct dm_target *ti = t->targets + i; 1160 1161 if (!ti->type->preresume) 1162 continue; 1163 1164 r = ti->type->preresume(ti); 1165 if (r) 1166 return r; 1167 } 1168 1169 for (i = 0; i < t->num_targets; i++) { 1170 struct dm_target *ti = t->targets + i; 1171 1172 if (ti->type->resume) 1173 ti->type->resume(ti); 1174 } 1175 1176 return 0; 1177 } 1178 1179 int dm_table_any_congested(struct dm_table *t, int bdi_bits) 1180 { 1181 struct dm_dev_internal *dd; 1182 struct list_head *devices = dm_table_get_devices(t); 1183 int r = 0; 1184 1185 list_for_each_entry(dd, devices, list) { 1186 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); 1187 char b[BDEVNAME_SIZE]; 1188 1189 if (likely(q)) 1190 r |= bdi_congested(&q->backing_dev_info, bdi_bits); 1191 else 1192 DMWARN_LIMIT("%s: any_congested: nonexistent device %s", 1193 dm_device_name(t->md), 1194 bdevname(dd->dm_dev.bdev, b)); 1195 } 1196 1197 return r; 1198 } 1199 1200 int dm_table_any_busy_target(struct dm_table *t) 1201 { 1202 unsigned i; 1203 struct dm_target *ti; 1204 1205 for (i = 0; i < t->num_targets; i++) { 1206 ti = t->targets + i; 1207 if (ti->type->busy && ti->type->busy(ti)) 1208 return 1; 1209 } 1210 1211 return 0; 1212 } 1213 1214 void dm_table_unplug_all(struct dm_table *t) 1215 { 1216 struct dm_dev_internal *dd; 1217 struct list_head *devices = dm_table_get_devices(t); 1218 1219 list_for_each_entry(dd, devices, list) { 1220 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); 1221 char b[BDEVNAME_SIZE]; 1222 1223 if (likely(q)) 1224 blk_unplug(q); 1225 else 1226 DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s", 1227 dm_device_name(t->md), 1228 bdevname(dd->dm_dev.bdev, b)); 1229 } 1230 } 1231 1232 struct mapped_device *dm_table_get_md(struct dm_table *t) 1233 { 1234 dm_get(t->md); 1235 1236 return t->md; 1237 } 1238 1239 EXPORT_SYMBOL(dm_vcalloc); 1240 EXPORT_SYMBOL(dm_get_device); 1241 EXPORT_SYMBOL(dm_put_device); 1242 EXPORT_SYMBOL(dm_table_event); 1243 EXPORT_SYMBOL(dm_table_get_size); 1244 EXPORT_SYMBOL(dm_table_get_mode); 1245 EXPORT_SYMBOL(dm_table_get_md); 1246 EXPORT_SYMBOL(dm_table_put); 1247 EXPORT_SYMBOL(dm_table_get); 1248 EXPORT_SYMBOL(dm_table_unplug_all); 1249