1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 10 #include <linux/module.h> 11 #include <linux/vmalloc.h> 12 #include <linux/miscdevice.h> 13 #include <linux/sched/mm.h> 14 #include <linux/init.h> 15 #include <linux/wait.h> 16 #include <linux/slab.h> 17 #include <linux/dm-ioctl.h> 18 #include <linux/hdreg.h> 19 #include <linux/compat.h> 20 21 #include <linux/uaccess.h> 22 23 #define DM_MSG_PREFIX "ioctl" 24 #define DM_DRIVER_EMAIL "dm-devel@redhat.com" 25 26 /*----------------------------------------------------------------- 27 * The ioctl interface needs to be able to look up devices by 28 * name or uuid. 29 *---------------------------------------------------------------*/ 30 struct hash_cell { 31 struct list_head name_list; 32 struct list_head uuid_list; 33 34 char *name; 35 char *uuid; 36 struct mapped_device *md; 37 struct dm_table *new_map; 38 }; 39 40 /* 41 * A dummy definition to make RCU happy. 42 * struct dm_table should never be dereferenced in this file. 43 */ 44 struct dm_table { 45 int undefined__; 46 }; 47 48 struct vers_iter { 49 size_t param_size; 50 struct dm_target_versions *vers, *old_vers; 51 char *end; 52 uint32_t flags; 53 }; 54 55 56 #define NUM_BUCKETS 64 57 #define MASK_BUCKETS (NUM_BUCKETS - 1) 58 static struct list_head _name_buckets[NUM_BUCKETS]; 59 static struct list_head _uuid_buckets[NUM_BUCKETS]; 60 61 static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred); 62 63 /* 64 * Guards access to both hash tables. 65 */ 66 static DECLARE_RWSEM(_hash_lock); 67 68 /* 69 * Protects use of mdptr to obtain hash cell name and uuid from mapped device. 70 */ 71 static DEFINE_MUTEX(dm_hash_cells_mutex); 72 73 static void init_buckets(struct list_head *buckets) 74 { 75 unsigned int i; 76 77 for (i = 0; i < NUM_BUCKETS; i++) 78 INIT_LIST_HEAD(buckets + i); 79 } 80 81 static int dm_hash_init(void) 82 { 83 init_buckets(_name_buckets); 84 init_buckets(_uuid_buckets); 85 return 0; 86 } 87 88 static void dm_hash_exit(void) 89 { 90 dm_hash_remove_all(false, false, false); 91 } 92 93 /*----------------------------------------------------------------- 94 * Hash function: 95 * We're not really concerned with the str hash function being 96 * fast since it's only used by the ioctl interface. 97 *---------------------------------------------------------------*/ 98 static unsigned int hash_str(const char *str) 99 { 100 const unsigned int hash_mult = 2654435387U; 101 unsigned int h = 0; 102 103 while (*str) 104 h = (h + (unsigned int) *str++) * hash_mult; 105 106 return h & MASK_BUCKETS; 107 } 108 109 /*----------------------------------------------------------------- 110 * Code for looking up a device by name 111 *---------------------------------------------------------------*/ 112 static struct hash_cell *__get_name_cell(const char *str) 113 { 114 struct hash_cell *hc; 115 unsigned int h = hash_str(str); 116 117 list_for_each_entry (hc, _name_buckets + h, name_list) 118 if (!strcmp(hc->name, str)) { 119 dm_get(hc->md); 120 return hc; 121 } 122 123 return NULL; 124 } 125 126 static struct hash_cell *__get_uuid_cell(const char *str) 127 { 128 struct hash_cell *hc; 129 unsigned int h = hash_str(str); 130 131 list_for_each_entry (hc, _uuid_buckets + h, uuid_list) 132 if (!strcmp(hc->uuid, str)) { 133 dm_get(hc->md); 134 return hc; 135 } 136 137 return NULL; 138 } 139 140 static struct hash_cell *__get_dev_cell(uint64_t dev) 141 { 142 struct mapped_device *md; 143 struct hash_cell *hc; 144 145 md = dm_get_md(huge_decode_dev(dev)); 146 if (!md) 147 return NULL; 148 149 hc = dm_get_mdptr(md); 150 if (!hc) { 151 dm_put(md); 152 return NULL; 153 } 154 155 return hc; 156 } 157 158 /*----------------------------------------------------------------- 159 * Inserting, removing and renaming a device. 160 *---------------------------------------------------------------*/ 161 static struct hash_cell *alloc_cell(const char *name, const char *uuid, 162 struct mapped_device *md) 163 { 164 struct hash_cell *hc; 165 166 hc = kmalloc(sizeof(*hc), GFP_KERNEL); 167 if (!hc) 168 return NULL; 169 170 hc->name = kstrdup(name, GFP_KERNEL); 171 if (!hc->name) { 172 kfree(hc); 173 return NULL; 174 } 175 176 if (!uuid) 177 hc->uuid = NULL; 178 179 else { 180 hc->uuid = kstrdup(uuid, GFP_KERNEL); 181 if (!hc->uuid) { 182 kfree(hc->name); 183 kfree(hc); 184 return NULL; 185 } 186 } 187 188 INIT_LIST_HEAD(&hc->name_list); 189 INIT_LIST_HEAD(&hc->uuid_list); 190 hc->md = md; 191 hc->new_map = NULL; 192 return hc; 193 } 194 195 static void free_cell(struct hash_cell *hc) 196 { 197 if (hc) { 198 kfree(hc->name); 199 kfree(hc->uuid); 200 kfree(hc); 201 } 202 } 203 204 /* 205 * The kdev_t and uuid of a device can never change once it is 206 * initially inserted. 207 */ 208 static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md) 209 { 210 struct hash_cell *cell, *hc; 211 212 /* 213 * Allocate the new cells. 214 */ 215 cell = alloc_cell(name, uuid, md); 216 if (!cell) 217 return -ENOMEM; 218 219 /* 220 * Insert the cell into both hash tables. 221 */ 222 down_write(&_hash_lock); 223 hc = __get_name_cell(name); 224 if (hc) { 225 dm_put(hc->md); 226 goto bad; 227 } 228 229 list_add(&cell->name_list, _name_buckets + hash_str(name)); 230 231 if (uuid) { 232 hc = __get_uuid_cell(uuid); 233 if (hc) { 234 list_del(&cell->name_list); 235 dm_put(hc->md); 236 goto bad; 237 } 238 list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); 239 } 240 dm_get(md); 241 mutex_lock(&dm_hash_cells_mutex); 242 dm_set_mdptr(md, cell); 243 mutex_unlock(&dm_hash_cells_mutex); 244 up_write(&_hash_lock); 245 246 return 0; 247 248 bad: 249 up_write(&_hash_lock); 250 free_cell(cell); 251 return -EBUSY; 252 } 253 254 static struct dm_table *__hash_remove(struct hash_cell *hc) 255 { 256 struct dm_table *table; 257 int srcu_idx; 258 259 /* remove from the dev hash */ 260 list_del(&hc->uuid_list); 261 list_del(&hc->name_list); 262 mutex_lock(&dm_hash_cells_mutex); 263 dm_set_mdptr(hc->md, NULL); 264 mutex_unlock(&dm_hash_cells_mutex); 265 266 table = dm_get_live_table(hc->md, &srcu_idx); 267 if (table) 268 dm_table_event(table); 269 dm_put_live_table(hc->md, srcu_idx); 270 271 table = NULL; 272 if (hc->new_map) 273 table = hc->new_map; 274 dm_put(hc->md); 275 free_cell(hc); 276 277 return table; 278 } 279 280 static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred) 281 { 282 int i, dev_skipped; 283 struct hash_cell *hc; 284 struct mapped_device *md; 285 struct dm_table *t; 286 287 retry: 288 dev_skipped = 0; 289 290 down_write(&_hash_lock); 291 292 for (i = 0; i < NUM_BUCKETS; i++) { 293 list_for_each_entry(hc, _name_buckets + i, name_list) { 294 md = hc->md; 295 dm_get(md); 296 297 if (keep_open_devices && 298 dm_lock_for_deletion(md, mark_deferred, only_deferred)) { 299 dm_put(md); 300 dev_skipped++; 301 continue; 302 } 303 304 t = __hash_remove(hc); 305 306 up_write(&_hash_lock); 307 308 if (t) { 309 dm_sync_table(md); 310 dm_table_destroy(t); 311 } 312 dm_put(md); 313 if (likely(keep_open_devices)) 314 dm_destroy(md); 315 else 316 dm_destroy_immediate(md); 317 318 /* 319 * Some mapped devices may be using other mapped 320 * devices, so repeat until we make no further 321 * progress. If a new mapped device is created 322 * here it will also get removed. 323 */ 324 goto retry; 325 } 326 } 327 328 up_write(&_hash_lock); 329 330 if (dev_skipped) 331 DMWARN("remove_all left %d open device(s)", dev_skipped); 332 } 333 334 /* 335 * Set the uuid of a hash_cell that isn't already set. 336 */ 337 static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid) 338 { 339 mutex_lock(&dm_hash_cells_mutex); 340 hc->uuid = new_uuid; 341 mutex_unlock(&dm_hash_cells_mutex); 342 343 list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid)); 344 } 345 346 /* 347 * Changes the name of a hash_cell and returns the old name for 348 * the caller to free. 349 */ 350 static char *__change_cell_name(struct hash_cell *hc, char *new_name) 351 { 352 char *old_name; 353 354 /* 355 * Rename and move the name cell. 356 */ 357 list_del(&hc->name_list); 358 old_name = hc->name; 359 360 mutex_lock(&dm_hash_cells_mutex); 361 hc->name = new_name; 362 mutex_unlock(&dm_hash_cells_mutex); 363 364 list_add(&hc->name_list, _name_buckets + hash_str(new_name)); 365 366 return old_name; 367 } 368 369 static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, 370 const char *new) 371 { 372 char *new_data, *old_name = NULL; 373 struct hash_cell *hc; 374 struct dm_table *table; 375 struct mapped_device *md; 376 unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; 377 int srcu_idx; 378 379 /* 380 * duplicate new. 381 */ 382 new_data = kstrdup(new, GFP_KERNEL); 383 if (!new_data) 384 return ERR_PTR(-ENOMEM); 385 386 down_write(&_hash_lock); 387 388 /* 389 * Is new free ? 390 */ 391 if (change_uuid) 392 hc = __get_uuid_cell(new); 393 else 394 hc = __get_name_cell(new); 395 396 if (hc) { 397 DMWARN("Unable to change %s on mapped device %s to one that " 398 "already exists: %s", 399 change_uuid ? "uuid" : "name", 400 param->name, new); 401 dm_put(hc->md); 402 up_write(&_hash_lock); 403 kfree(new_data); 404 return ERR_PTR(-EBUSY); 405 } 406 407 /* 408 * Is there such a device as 'old' ? 409 */ 410 hc = __get_name_cell(param->name); 411 if (!hc) { 412 DMWARN("Unable to rename non-existent device, %s to %s%s", 413 param->name, change_uuid ? "uuid " : "", new); 414 up_write(&_hash_lock); 415 kfree(new_data); 416 return ERR_PTR(-ENXIO); 417 } 418 419 /* 420 * Does this device already have a uuid? 421 */ 422 if (change_uuid && hc->uuid) { 423 DMWARN("Unable to change uuid of mapped device %s to %s " 424 "because uuid is already set to %s", 425 param->name, new, hc->uuid); 426 dm_put(hc->md); 427 up_write(&_hash_lock); 428 kfree(new_data); 429 return ERR_PTR(-EINVAL); 430 } 431 432 if (change_uuid) 433 __set_cell_uuid(hc, new_data); 434 else 435 old_name = __change_cell_name(hc, new_data); 436 437 /* 438 * Wake up any dm event waiters. 439 */ 440 table = dm_get_live_table(hc->md, &srcu_idx); 441 if (table) 442 dm_table_event(table); 443 dm_put_live_table(hc->md, srcu_idx); 444 445 if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr)) 446 param->flags |= DM_UEVENT_GENERATED_FLAG; 447 448 md = hc->md; 449 up_write(&_hash_lock); 450 kfree(old_name); 451 452 return md; 453 } 454 455 void dm_deferred_remove(void) 456 { 457 dm_hash_remove_all(true, false, true); 458 } 459 460 /*----------------------------------------------------------------- 461 * Implementation of the ioctl commands 462 *---------------------------------------------------------------*/ 463 /* 464 * All the ioctl commands get dispatched to functions with this 465 * prototype. 466 */ 467 typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size); 468 469 static int remove_all(struct dm_ioctl *param, size_t param_size) 470 { 471 dm_hash_remove_all(true, !!(param->flags & DM_DEFERRED_REMOVE), false); 472 param->data_size = 0; 473 return 0; 474 } 475 476 /* 477 * Round up the ptr to an 8-byte boundary. 478 */ 479 #define ALIGN_MASK 7 480 static inline void *align_ptr(void *ptr) 481 { 482 return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK); 483 } 484 485 /* 486 * Retrieves the data payload buffer from an already allocated 487 * struct dm_ioctl. 488 */ 489 static void *get_result_buffer(struct dm_ioctl *param, size_t param_size, 490 size_t *len) 491 { 492 param->data_start = align_ptr(param + 1) - (void *) param; 493 494 if (param->data_start < param_size) 495 *len = param_size - param->data_start; 496 else 497 *len = 0; 498 499 return ((void *) param) + param->data_start; 500 } 501 502 static int list_devices(struct dm_ioctl *param, size_t param_size) 503 { 504 unsigned int i; 505 struct hash_cell *hc; 506 size_t len, needed = 0; 507 struct gendisk *disk; 508 struct dm_name_list *nl, *old_nl = NULL; 509 510 down_write(&_hash_lock); 511 512 /* 513 * Loop through all the devices working out how much 514 * space we need. 515 */ 516 for (i = 0; i < NUM_BUCKETS; i++) { 517 list_for_each_entry (hc, _name_buckets + i, name_list) { 518 needed += sizeof(struct dm_name_list); 519 needed += strlen(hc->name) + 1; 520 needed += ALIGN_MASK; 521 } 522 } 523 524 /* 525 * Grab our output buffer. 526 */ 527 nl = get_result_buffer(param, param_size, &len); 528 if (len < needed) { 529 param->flags |= DM_BUFFER_FULL_FLAG; 530 goto out; 531 } 532 param->data_size = param->data_start + needed; 533 534 nl->dev = 0; /* Flags no data */ 535 536 /* 537 * Now loop through filling out the names. 538 */ 539 for (i = 0; i < NUM_BUCKETS; i++) { 540 list_for_each_entry (hc, _name_buckets + i, name_list) { 541 if (old_nl) 542 old_nl->next = (uint32_t) ((void *) nl - 543 (void *) old_nl); 544 disk = dm_disk(hc->md); 545 nl->dev = huge_encode_dev(disk_devt(disk)); 546 nl->next = 0; 547 strcpy(nl->name, hc->name); 548 549 old_nl = nl; 550 nl = align_ptr(((void *) ++nl) + strlen(hc->name) + 1); 551 } 552 } 553 554 out: 555 up_write(&_hash_lock); 556 return 0; 557 } 558 559 static void list_version_get_needed(struct target_type *tt, void *needed_param) 560 { 561 size_t *needed = needed_param; 562 563 *needed += sizeof(struct dm_target_versions); 564 *needed += strlen(tt->name); 565 *needed += ALIGN_MASK; 566 } 567 568 static void list_version_get_info(struct target_type *tt, void *param) 569 { 570 struct vers_iter *info = param; 571 572 /* Check space - it might have changed since the first iteration */ 573 if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 > 574 info->end) { 575 576 info->flags = DM_BUFFER_FULL_FLAG; 577 return; 578 } 579 580 if (info->old_vers) 581 info->old_vers->next = (uint32_t) ((void *)info->vers - 582 (void *)info->old_vers); 583 info->vers->version[0] = tt->version[0]; 584 info->vers->version[1] = tt->version[1]; 585 info->vers->version[2] = tt->version[2]; 586 info->vers->next = 0; 587 strcpy(info->vers->name, tt->name); 588 589 info->old_vers = info->vers; 590 info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1); 591 } 592 593 static int list_versions(struct dm_ioctl *param, size_t param_size) 594 { 595 size_t len, needed = 0; 596 struct dm_target_versions *vers; 597 struct vers_iter iter_info; 598 599 /* 600 * Loop through all the devices working out how much 601 * space we need. 602 */ 603 dm_target_iterate(list_version_get_needed, &needed); 604 605 /* 606 * Grab our output buffer. 607 */ 608 vers = get_result_buffer(param, param_size, &len); 609 if (len < needed) { 610 param->flags |= DM_BUFFER_FULL_FLAG; 611 goto out; 612 } 613 param->data_size = param->data_start + needed; 614 615 iter_info.param_size = param_size; 616 iter_info.old_vers = NULL; 617 iter_info.vers = vers; 618 iter_info.flags = 0; 619 iter_info.end = (char *)vers+len; 620 621 /* 622 * Now loop through filling out the names & versions. 623 */ 624 dm_target_iterate(list_version_get_info, &iter_info); 625 param->flags |= iter_info.flags; 626 627 out: 628 return 0; 629 } 630 631 static int check_name(const char *name) 632 { 633 if (strchr(name, '/')) { 634 DMWARN("invalid device name"); 635 return -EINVAL; 636 } 637 638 return 0; 639 } 640 641 /* 642 * On successful return, the caller must not attempt to acquire 643 * _hash_lock without first calling dm_put_live_table, because dm_table_destroy 644 * waits for this dm_put_live_table and could be called under this lock. 645 */ 646 static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *srcu_idx) 647 { 648 struct hash_cell *hc; 649 struct dm_table *table = NULL; 650 651 /* increment rcu count, we don't care about the table pointer */ 652 dm_get_live_table(md, srcu_idx); 653 654 down_read(&_hash_lock); 655 hc = dm_get_mdptr(md); 656 if (!hc || hc->md != md) { 657 DMWARN("device has been removed from the dev hash table."); 658 goto out; 659 } 660 661 table = hc->new_map; 662 663 out: 664 up_read(&_hash_lock); 665 666 return table; 667 } 668 669 static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, 670 struct dm_ioctl *param, 671 int *srcu_idx) 672 { 673 return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ? 674 dm_get_inactive_table(md, srcu_idx) : dm_get_live_table(md, srcu_idx); 675 } 676 677 /* 678 * Fills in a dm_ioctl structure, ready for sending back to 679 * userland. 680 */ 681 static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) 682 { 683 struct gendisk *disk = dm_disk(md); 684 struct dm_table *table; 685 int srcu_idx; 686 687 param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | 688 DM_ACTIVE_PRESENT_FLAG | DM_INTERNAL_SUSPEND_FLAG); 689 690 if (dm_suspended_md(md)) 691 param->flags |= DM_SUSPEND_FLAG; 692 693 if (dm_suspended_internally_md(md)) 694 param->flags |= DM_INTERNAL_SUSPEND_FLAG; 695 696 if (dm_test_deferred_remove_flag(md)) 697 param->flags |= DM_DEFERRED_REMOVE; 698 699 param->dev = huge_encode_dev(disk_devt(disk)); 700 701 /* 702 * Yes, this will be out of date by the time it gets back 703 * to userland, but it is still very useful for 704 * debugging. 705 */ 706 param->open_count = dm_open_count(md); 707 708 param->event_nr = dm_get_event_nr(md); 709 param->target_count = 0; 710 711 table = dm_get_live_table(md, &srcu_idx); 712 if (table) { 713 if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) { 714 if (get_disk_ro(disk)) 715 param->flags |= DM_READONLY_FLAG; 716 param->target_count = dm_table_get_num_targets(table); 717 } 718 719 param->flags |= DM_ACTIVE_PRESENT_FLAG; 720 } 721 dm_put_live_table(md, srcu_idx); 722 723 if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) { 724 int srcu_idx; 725 table = dm_get_inactive_table(md, &srcu_idx); 726 if (table) { 727 if (!(dm_table_get_mode(table) & FMODE_WRITE)) 728 param->flags |= DM_READONLY_FLAG; 729 param->target_count = dm_table_get_num_targets(table); 730 } 731 dm_put_live_table(md, srcu_idx); 732 } 733 } 734 735 static int dev_create(struct dm_ioctl *param, size_t param_size) 736 { 737 int r, m = DM_ANY_MINOR; 738 struct mapped_device *md; 739 740 r = check_name(param->name); 741 if (r) 742 return r; 743 744 if (param->flags & DM_PERSISTENT_DEV_FLAG) 745 m = MINOR(huge_decode_dev(param->dev)); 746 747 r = dm_create(m, &md); 748 if (r) 749 return r; 750 751 r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); 752 if (r) { 753 dm_put(md); 754 dm_destroy(md); 755 return r; 756 } 757 758 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; 759 760 __dev_status(md, param); 761 762 dm_put(md); 763 764 return 0; 765 } 766 767 /* 768 * Always use UUID for lookups if it's present, otherwise use name or dev. 769 */ 770 static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) 771 { 772 struct hash_cell *hc = NULL; 773 774 if (*param->uuid) { 775 if (*param->name || param->dev) 776 return NULL; 777 778 hc = __get_uuid_cell(param->uuid); 779 if (!hc) 780 return NULL; 781 } else if (*param->name) { 782 if (param->dev) 783 return NULL; 784 785 hc = __get_name_cell(param->name); 786 if (!hc) 787 return NULL; 788 } else if (param->dev) { 789 hc = __get_dev_cell(param->dev); 790 if (!hc) 791 return NULL; 792 } else 793 return NULL; 794 795 /* 796 * Sneakily write in both the name and the uuid 797 * while we have the cell. 798 */ 799 strlcpy(param->name, hc->name, sizeof(param->name)); 800 if (hc->uuid) 801 strlcpy(param->uuid, hc->uuid, sizeof(param->uuid)); 802 else 803 param->uuid[0] = '\0'; 804 805 if (hc->new_map) 806 param->flags |= DM_INACTIVE_PRESENT_FLAG; 807 else 808 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; 809 810 return hc; 811 } 812 813 static struct mapped_device *find_device(struct dm_ioctl *param) 814 { 815 struct hash_cell *hc; 816 struct mapped_device *md = NULL; 817 818 down_read(&_hash_lock); 819 hc = __find_device_hash_cell(param); 820 if (hc) 821 md = hc->md; 822 up_read(&_hash_lock); 823 824 return md; 825 } 826 827 static int dev_remove(struct dm_ioctl *param, size_t param_size) 828 { 829 struct hash_cell *hc; 830 struct mapped_device *md; 831 int r; 832 struct dm_table *t; 833 834 down_write(&_hash_lock); 835 hc = __find_device_hash_cell(param); 836 837 if (!hc) { 838 DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); 839 up_write(&_hash_lock); 840 return -ENXIO; 841 } 842 843 md = hc->md; 844 845 /* 846 * Ensure the device is not open and nothing further can open it. 847 */ 848 r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false); 849 if (r) { 850 if (r == -EBUSY && param->flags & DM_DEFERRED_REMOVE) { 851 up_write(&_hash_lock); 852 dm_put(md); 853 return 0; 854 } 855 DMDEBUG_LIMIT("unable to remove open device %s", hc->name); 856 up_write(&_hash_lock); 857 dm_put(md); 858 return r; 859 } 860 861 t = __hash_remove(hc); 862 up_write(&_hash_lock); 863 864 if (t) { 865 dm_sync_table(md); 866 dm_table_destroy(t); 867 } 868 869 param->flags &= ~DM_DEFERRED_REMOVE; 870 871 if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr)) 872 param->flags |= DM_UEVENT_GENERATED_FLAG; 873 874 dm_put(md); 875 dm_destroy(md); 876 return 0; 877 } 878 879 /* 880 * Check a string doesn't overrun the chunk of 881 * memory we copied from userland. 882 */ 883 static int invalid_str(char *str, void *end) 884 { 885 while ((void *) str < end) 886 if (!*str++) 887 return 0; 888 889 return -EINVAL; 890 } 891 892 static int dev_rename(struct dm_ioctl *param, size_t param_size) 893 { 894 int r; 895 char *new_data = (char *) param + param->data_start; 896 struct mapped_device *md; 897 unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; 898 899 if (new_data < param->data || 900 invalid_str(new_data, (void *) param + param_size) || !*new_data || 901 strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) { 902 DMWARN("Invalid new mapped device name or uuid string supplied."); 903 return -EINVAL; 904 } 905 906 if (!change_uuid) { 907 r = check_name(new_data); 908 if (r) 909 return r; 910 } 911 912 md = dm_hash_rename(param, new_data); 913 if (IS_ERR(md)) 914 return PTR_ERR(md); 915 916 __dev_status(md, param); 917 dm_put(md); 918 919 return 0; 920 } 921 922 static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) 923 { 924 int r = -EINVAL, x; 925 struct mapped_device *md; 926 struct hd_geometry geometry; 927 unsigned long indata[4]; 928 char *geostr = (char *) param + param->data_start; 929 char dummy; 930 931 md = find_device(param); 932 if (!md) 933 return -ENXIO; 934 935 if (geostr < param->data || 936 invalid_str(geostr, (void *) param + param_size)) { 937 DMWARN("Invalid geometry supplied."); 938 goto out; 939 } 940 941 x = sscanf(geostr, "%lu %lu %lu %lu%c", indata, 942 indata + 1, indata + 2, indata + 3, &dummy); 943 944 if (x != 4) { 945 DMWARN("Unable to interpret geometry settings."); 946 goto out; 947 } 948 949 if (indata[0] > 65535 || indata[1] > 255 || 950 indata[2] > 255 || indata[3] > ULONG_MAX) { 951 DMWARN("Geometry exceeds range limits."); 952 goto out; 953 } 954 955 geometry.cylinders = indata[0]; 956 geometry.heads = indata[1]; 957 geometry.sectors = indata[2]; 958 geometry.start = indata[3]; 959 960 r = dm_set_geometry(md, &geometry); 961 962 param->data_size = 0; 963 964 out: 965 dm_put(md); 966 return r; 967 } 968 969 static int do_suspend(struct dm_ioctl *param) 970 { 971 int r = 0; 972 unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; 973 struct mapped_device *md; 974 975 md = find_device(param); 976 if (!md) 977 return -ENXIO; 978 979 if (param->flags & DM_SKIP_LOCKFS_FLAG) 980 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; 981 if (param->flags & DM_NOFLUSH_FLAG) 982 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; 983 984 if (!dm_suspended_md(md)) { 985 r = dm_suspend(md, suspend_flags); 986 if (r) 987 goto out; 988 } 989 990 __dev_status(md, param); 991 992 out: 993 dm_put(md); 994 995 return r; 996 } 997 998 static int do_resume(struct dm_ioctl *param) 999 { 1000 int r = 0; 1001 unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; 1002 struct hash_cell *hc; 1003 struct mapped_device *md; 1004 struct dm_table *new_map, *old_map = NULL; 1005 1006 down_write(&_hash_lock); 1007 1008 hc = __find_device_hash_cell(param); 1009 if (!hc) { 1010 DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); 1011 up_write(&_hash_lock); 1012 return -ENXIO; 1013 } 1014 1015 md = hc->md; 1016 1017 new_map = hc->new_map; 1018 hc->new_map = NULL; 1019 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; 1020 1021 up_write(&_hash_lock); 1022 1023 /* Do we need to load a new map ? */ 1024 if (new_map) { 1025 /* Suspend if it isn't already suspended */ 1026 if (param->flags & DM_SKIP_LOCKFS_FLAG) 1027 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; 1028 if (param->flags & DM_NOFLUSH_FLAG) 1029 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; 1030 if (!dm_suspended_md(md)) 1031 dm_suspend(md, suspend_flags); 1032 1033 old_map = dm_swap_table(md, new_map); 1034 if (IS_ERR(old_map)) { 1035 dm_sync_table(md); 1036 dm_table_destroy(new_map); 1037 dm_put(md); 1038 return PTR_ERR(old_map); 1039 } 1040 1041 if (dm_table_get_mode(new_map) & FMODE_WRITE) 1042 set_disk_ro(dm_disk(md), 0); 1043 else 1044 set_disk_ro(dm_disk(md), 1); 1045 } 1046 1047 if (dm_suspended_md(md)) { 1048 r = dm_resume(md); 1049 if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr)) 1050 param->flags |= DM_UEVENT_GENERATED_FLAG; 1051 } 1052 1053 /* 1054 * Since dm_swap_table synchronizes RCU, nobody should be in 1055 * read-side critical section already. 1056 */ 1057 if (old_map) 1058 dm_table_destroy(old_map); 1059 1060 if (!r) 1061 __dev_status(md, param); 1062 1063 dm_put(md); 1064 return r; 1065 } 1066 1067 /* 1068 * Set or unset the suspension state of a device. 1069 * If the device already is in the requested state we just return its status. 1070 */ 1071 static int dev_suspend(struct dm_ioctl *param, size_t param_size) 1072 { 1073 if (param->flags & DM_SUSPEND_FLAG) 1074 return do_suspend(param); 1075 1076 return do_resume(param); 1077 } 1078 1079 /* 1080 * Copies device info back to user space, used by 1081 * the create and info ioctls. 1082 */ 1083 static int dev_status(struct dm_ioctl *param, size_t param_size) 1084 { 1085 struct mapped_device *md; 1086 1087 md = find_device(param); 1088 if (!md) 1089 return -ENXIO; 1090 1091 __dev_status(md, param); 1092 dm_put(md); 1093 1094 return 0; 1095 } 1096 1097 /* 1098 * Build up the status struct for each target 1099 */ 1100 static void retrieve_status(struct dm_table *table, 1101 struct dm_ioctl *param, size_t param_size) 1102 { 1103 unsigned int i, num_targets; 1104 struct dm_target_spec *spec; 1105 char *outbuf, *outptr; 1106 status_type_t type; 1107 size_t remaining, len, used = 0; 1108 unsigned status_flags = 0; 1109 1110 outptr = outbuf = get_result_buffer(param, param_size, &len); 1111 1112 if (param->flags & DM_STATUS_TABLE_FLAG) 1113 type = STATUSTYPE_TABLE; 1114 else 1115 type = STATUSTYPE_INFO; 1116 1117 /* Get all the target info */ 1118 num_targets = dm_table_get_num_targets(table); 1119 for (i = 0; i < num_targets; i++) { 1120 struct dm_target *ti = dm_table_get_target(table, i); 1121 size_t l; 1122 1123 remaining = len - (outptr - outbuf); 1124 if (remaining <= sizeof(struct dm_target_spec)) { 1125 param->flags |= DM_BUFFER_FULL_FLAG; 1126 break; 1127 } 1128 1129 spec = (struct dm_target_spec *) outptr; 1130 1131 spec->status = 0; 1132 spec->sector_start = ti->begin; 1133 spec->length = ti->len; 1134 strncpy(spec->target_type, ti->type->name, 1135 sizeof(spec->target_type)); 1136 1137 outptr += sizeof(struct dm_target_spec); 1138 remaining = len - (outptr - outbuf); 1139 if (remaining <= 0) { 1140 param->flags |= DM_BUFFER_FULL_FLAG; 1141 break; 1142 } 1143 1144 /* Get the status/table string from the target driver */ 1145 if (ti->type->status) { 1146 if (param->flags & DM_NOFLUSH_FLAG) 1147 status_flags |= DM_STATUS_NOFLUSH_FLAG; 1148 ti->type->status(ti, type, status_flags, outptr, remaining); 1149 } else 1150 outptr[0] = '\0'; 1151 1152 l = strlen(outptr) + 1; 1153 if (l == remaining) { 1154 param->flags |= DM_BUFFER_FULL_FLAG; 1155 break; 1156 } 1157 1158 outptr += l; 1159 used = param->data_start + (outptr - outbuf); 1160 1161 outptr = align_ptr(outptr); 1162 spec->next = outptr - outbuf; 1163 } 1164 1165 if (used) 1166 param->data_size = used; 1167 1168 param->target_count = num_targets; 1169 } 1170 1171 /* 1172 * Wait for a device to report an event 1173 */ 1174 static int dev_wait(struct dm_ioctl *param, size_t param_size) 1175 { 1176 int r = 0; 1177 struct mapped_device *md; 1178 struct dm_table *table; 1179 int srcu_idx; 1180 1181 md = find_device(param); 1182 if (!md) 1183 return -ENXIO; 1184 1185 /* 1186 * Wait for a notification event 1187 */ 1188 if (dm_wait_event(md, param->event_nr)) { 1189 r = -ERESTARTSYS; 1190 goto out; 1191 } 1192 1193 /* 1194 * The userland program is going to want to know what 1195 * changed to trigger the event, so we may as well tell 1196 * him and save an ioctl. 1197 */ 1198 __dev_status(md, param); 1199 1200 table = dm_get_live_or_inactive_table(md, param, &srcu_idx); 1201 if (table) 1202 retrieve_status(table, param, param_size); 1203 dm_put_live_table(md, srcu_idx); 1204 1205 out: 1206 dm_put(md); 1207 1208 return r; 1209 } 1210 1211 static inline fmode_t get_mode(struct dm_ioctl *param) 1212 { 1213 fmode_t mode = FMODE_READ | FMODE_WRITE; 1214 1215 if (param->flags & DM_READONLY_FLAG) 1216 mode = FMODE_READ; 1217 1218 return mode; 1219 } 1220 1221 static int next_target(struct dm_target_spec *last, uint32_t next, void *end, 1222 struct dm_target_spec **spec, char **target_params) 1223 { 1224 *spec = (struct dm_target_spec *) ((unsigned char *) last + next); 1225 *target_params = (char *) (*spec + 1); 1226 1227 if (*spec < (last + 1)) 1228 return -EINVAL; 1229 1230 return invalid_str(*target_params, end); 1231 } 1232 1233 static int populate_table(struct dm_table *table, 1234 struct dm_ioctl *param, size_t param_size) 1235 { 1236 int r; 1237 unsigned int i = 0; 1238 struct dm_target_spec *spec = (struct dm_target_spec *) param; 1239 uint32_t next = param->data_start; 1240 void *end = (void *) param + param_size; 1241 char *target_params; 1242 1243 if (!param->target_count) { 1244 DMWARN("populate_table: no targets specified"); 1245 return -EINVAL; 1246 } 1247 1248 for (i = 0; i < param->target_count; i++) { 1249 1250 r = next_target(spec, next, end, &spec, &target_params); 1251 if (r) { 1252 DMWARN("unable to find target"); 1253 return r; 1254 } 1255 1256 r = dm_table_add_target(table, spec->target_type, 1257 (sector_t) spec->sector_start, 1258 (sector_t) spec->length, 1259 target_params); 1260 if (r) { 1261 DMWARN("error adding target to table"); 1262 return r; 1263 } 1264 1265 next = spec->next; 1266 } 1267 1268 return dm_table_complete(table); 1269 } 1270 1271 static bool is_valid_type(unsigned cur, unsigned new) 1272 { 1273 if (cur == new || 1274 (cur == DM_TYPE_BIO_BASED && new == DM_TYPE_DAX_BIO_BASED)) 1275 return true; 1276 1277 return false; 1278 } 1279 1280 static int table_load(struct dm_ioctl *param, size_t param_size) 1281 { 1282 int r; 1283 struct hash_cell *hc; 1284 struct dm_table *t, *old_map = NULL; 1285 struct mapped_device *md; 1286 struct target_type *immutable_target_type; 1287 1288 md = find_device(param); 1289 if (!md) 1290 return -ENXIO; 1291 1292 r = dm_table_create(&t, get_mode(param), param->target_count, md); 1293 if (r) 1294 goto err; 1295 1296 /* Protect md->type and md->queue against concurrent table loads. */ 1297 dm_lock_md_type(md); 1298 r = populate_table(t, param, param_size); 1299 if (r) 1300 goto err_unlock_md_type; 1301 1302 immutable_target_type = dm_get_immutable_target_type(md); 1303 if (immutable_target_type && 1304 (immutable_target_type != dm_table_get_immutable_target_type(t)) && 1305 !dm_table_get_wildcard_target(t)) { 1306 DMWARN("can't replace immutable target type %s", 1307 immutable_target_type->name); 1308 r = -EINVAL; 1309 goto err_unlock_md_type; 1310 } 1311 1312 if (dm_get_md_type(md) == DM_TYPE_NONE) { 1313 /* Initial table load: acquire type of table. */ 1314 dm_set_md_type(md, dm_table_get_type(t)); 1315 1316 /* setup md->queue to reflect md's type (may block) */ 1317 r = dm_setup_md_queue(md, t); 1318 if (r) { 1319 DMWARN("unable to set up device queue for new table."); 1320 goto err_unlock_md_type; 1321 } 1322 } else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) { 1323 DMWARN("can't change device type after initial table load."); 1324 r = -EINVAL; 1325 goto err_unlock_md_type; 1326 } 1327 1328 dm_unlock_md_type(md); 1329 1330 /* stage inactive table */ 1331 down_write(&_hash_lock); 1332 hc = dm_get_mdptr(md); 1333 if (!hc || hc->md != md) { 1334 DMWARN("device has been removed from the dev hash table."); 1335 up_write(&_hash_lock); 1336 r = -ENXIO; 1337 goto err_destroy_table; 1338 } 1339 1340 if (hc->new_map) 1341 old_map = hc->new_map; 1342 hc->new_map = t; 1343 up_write(&_hash_lock); 1344 1345 param->flags |= DM_INACTIVE_PRESENT_FLAG; 1346 __dev_status(md, param); 1347 1348 if (old_map) { 1349 dm_sync_table(md); 1350 dm_table_destroy(old_map); 1351 } 1352 1353 dm_put(md); 1354 1355 return 0; 1356 1357 err_unlock_md_type: 1358 dm_unlock_md_type(md); 1359 err_destroy_table: 1360 dm_table_destroy(t); 1361 err: 1362 dm_put(md); 1363 1364 return r; 1365 } 1366 1367 static int table_clear(struct dm_ioctl *param, size_t param_size) 1368 { 1369 struct hash_cell *hc; 1370 struct mapped_device *md; 1371 struct dm_table *old_map = NULL; 1372 1373 down_write(&_hash_lock); 1374 1375 hc = __find_device_hash_cell(param); 1376 if (!hc) { 1377 DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); 1378 up_write(&_hash_lock); 1379 return -ENXIO; 1380 } 1381 1382 if (hc->new_map) { 1383 old_map = hc->new_map; 1384 hc->new_map = NULL; 1385 } 1386 1387 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; 1388 1389 __dev_status(hc->md, param); 1390 md = hc->md; 1391 up_write(&_hash_lock); 1392 if (old_map) { 1393 dm_sync_table(md); 1394 dm_table_destroy(old_map); 1395 } 1396 dm_put(md); 1397 1398 return 0; 1399 } 1400 1401 /* 1402 * Retrieves a list of devices used by a particular dm device. 1403 */ 1404 static void retrieve_deps(struct dm_table *table, 1405 struct dm_ioctl *param, size_t param_size) 1406 { 1407 unsigned int count = 0; 1408 struct list_head *tmp; 1409 size_t len, needed; 1410 struct dm_dev_internal *dd; 1411 struct dm_target_deps *deps; 1412 1413 deps = get_result_buffer(param, param_size, &len); 1414 1415 /* 1416 * Count the devices. 1417 */ 1418 list_for_each (tmp, dm_table_get_devices(table)) 1419 count++; 1420 1421 /* 1422 * Check we have enough space. 1423 */ 1424 needed = sizeof(*deps) + (sizeof(*deps->dev) * count); 1425 if (len < needed) { 1426 param->flags |= DM_BUFFER_FULL_FLAG; 1427 return; 1428 } 1429 1430 /* 1431 * Fill in the devices. 1432 */ 1433 deps->count = count; 1434 count = 0; 1435 list_for_each_entry (dd, dm_table_get_devices(table), list) 1436 deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev); 1437 1438 param->data_size = param->data_start + needed; 1439 } 1440 1441 static int table_deps(struct dm_ioctl *param, size_t param_size) 1442 { 1443 struct mapped_device *md; 1444 struct dm_table *table; 1445 int srcu_idx; 1446 1447 md = find_device(param); 1448 if (!md) 1449 return -ENXIO; 1450 1451 __dev_status(md, param); 1452 1453 table = dm_get_live_or_inactive_table(md, param, &srcu_idx); 1454 if (table) 1455 retrieve_deps(table, param, param_size); 1456 dm_put_live_table(md, srcu_idx); 1457 1458 dm_put(md); 1459 1460 return 0; 1461 } 1462 1463 /* 1464 * Return the status of a device as a text string for each 1465 * target. 1466 */ 1467 static int table_status(struct dm_ioctl *param, size_t param_size) 1468 { 1469 struct mapped_device *md; 1470 struct dm_table *table; 1471 int srcu_idx; 1472 1473 md = find_device(param); 1474 if (!md) 1475 return -ENXIO; 1476 1477 __dev_status(md, param); 1478 1479 table = dm_get_live_or_inactive_table(md, param, &srcu_idx); 1480 if (table) 1481 retrieve_status(table, param, param_size); 1482 dm_put_live_table(md, srcu_idx); 1483 1484 dm_put(md); 1485 1486 return 0; 1487 } 1488 1489 /* 1490 * Process device-mapper dependent messages. Messages prefixed with '@' 1491 * are processed by the DM core. All others are delivered to the target. 1492 * Returns a number <= 1 if message was processed by device mapper. 1493 * Returns 2 if message should be delivered to the target. 1494 */ 1495 static int message_for_md(struct mapped_device *md, unsigned argc, char **argv, 1496 char *result, unsigned maxlen) 1497 { 1498 int r; 1499 1500 if (**argv != '@') 1501 return 2; /* no '@' prefix, deliver to target */ 1502 1503 if (!strcasecmp(argv[0], "@cancel_deferred_remove")) { 1504 if (argc != 1) { 1505 DMERR("Invalid arguments for @cancel_deferred_remove"); 1506 return -EINVAL; 1507 } 1508 return dm_cancel_deferred_remove(md); 1509 } 1510 1511 r = dm_stats_message(md, argc, argv, result, maxlen); 1512 if (r < 2) 1513 return r; 1514 1515 DMERR("Unsupported message sent to DM core: %s", argv[0]); 1516 return -EINVAL; 1517 } 1518 1519 /* 1520 * Pass a message to the target that's at the supplied device offset. 1521 */ 1522 static int target_message(struct dm_ioctl *param, size_t param_size) 1523 { 1524 int r, argc; 1525 char **argv; 1526 struct mapped_device *md; 1527 struct dm_table *table; 1528 struct dm_target *ti; 1529 struct dm_target_msg *tmsg = (void *) param + param->data_start; 1530 size_t maxlen; 1531 char *result = get_result_buffer(param, param_size, &maxlen); 1532 int srcu_idx; 1533 1534 md = find_device(param); 1535 if (!md) 1536 return -ENXIO; 1537 1538 if (tmsg < (struct dm_target_msg *) param->data || 1539 invalid_str(tmsg->message, (void *) param + param_size)) { 1540 DMWARN("Invalid target message parameters."); 1541 r = -EINVAL; 1542 goto out; 1543 } 1544 1545 r = dm_split_args(&argc, &argv, tmsg->message); 1546 if (r) { 1547 DMWARN("Failed to split target message parameters"); 1548 goto out; 1549 } 1550 1551 if (!argc) { 1552 DMWARN("Empty message received."); 1553 goto out_argv; 1554 } 1555 1556 r = message_for_md(md, argc, argv, result, maxlen); 1557 if (r <= 1) 1558 goto out_argv; 1559 1560 table = dm_get_live_table(md, &srcu_idx); 1561 if (!table) 1562 goto out_table; 1563 1564 if (dm_deleting_md(md)) { 1565 r = -ENXIO; 1566 goto out_table; 1567 } 1568 1569 ti = dm_table_find_target(table, tmsg->sector); 1570 if (!dm_target_is_valid(ti)) { 1571 DMWARN("Target message sector outside device."); 1572 r = -EINVAL; 1573 } else if (ti->type->message) 1574 r = ti->type->message(ti, argc, argv); 1575 else { 1576 DMWARN("Target type does not support messages"); 1577 r = -EINVAL; 1578 } 1579 1580 out_table: 1581 dm_put_live_table(md, srcu_idx); 1582 out_argv: 1583 kfree(argv); 1584 out: 1585 if (r >= 0) 1586 __dev_status(md, param); 1587 1588 if (r == 1) { 1589 param->flags |= DM_DATA_OUT_FLAG; 1590 if (dm_message_test_buffer_overflow(result, maxlen)) 1591 param->flags |= DM_BUFFER_FULL_FLAG; 1592 else 1593 param->data_size = param->data_start + strlen(result) + 1; 1594 r = 0; 1595 } 1596 1597 dm_put(md); 1598 return r; 1599 } 1600 1601 /* 1602 * The ioctl parameter block consists of two parts, a dm_ioctl struct 1603 * followed by a data buffer. This flag is set if the second part, 1604 * which has a variable size, is not used by the function processing 1605 * the ioctl. 1606 */ 1607 #define IOCTL_FLAGS_NO_PARAMS 1 1608 1609 /*----------------------------------------------------------------- 1610 * Implementation of open/close/ioctl on the special char 1611 * device. 1612 *---------------------------------------------------------------*/ 1613 static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) 1614 { 1615 static struct { 1616 int cmd; 1617 int flags; 1618 ioctl_fn fn; 1619 } _ioctls[] = { 1620 {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */ 1621 {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all}, 1622 {DM_LIST_DEVICES_CMD, 0, list_devices}, 1623 1624 {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create}, 1625 {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove}, 1626 {DM_DEV_RENAME_CMD, 0, dev_rename}, 1627 {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend}, 1628 {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status}, 1629 {DM_DEV_WAIT_CMD, 0, dev_wait}, 1630 1631 {DM_TABLE_LOAD_CMD, 0, table_load}, 1632 {DM_TABLE_CLEAR_CMD, IOCTL_FLAGS_NO_PARAMS, table_clear}, 1633 {DM_TABLE_DEPS_CMD, 0, table_deps}, 1634 {DM_TABLE_STATUS_CMD, 0, table_status}, 1635 1636 {DM_LIST_VERSIONS_CMD, 0, list_versions}, 1637 1638 {DM_TARGET_MSG_CMD, 0, target_message}, 1639 {DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry} 1640 }; 1641 1642 if (unlikely(cmd >= ARRAY_SIZE(_ioctls))) 1643 return NULL; 1644 1645 *ioctl_flags = _ioctls[cmd].flags; 1646 return _ioctls[cmd].fn; 1647 } 1648 1649 /* 1650 * As well as checking the version compatibility this always 1651 * copies the kernel interface version out. 1652 */ 1653 static int check_version(unsigned int cmd, struct dm_ioctl __user *user) 1654 { 1655 uint32_t version[3]; 1656 int r = 0; 1657 1658 if (copy_from_user(version, user->version, sizeof(version))) 1659 return -EFAULT; 1660 1661 if ((DM_VERSION_MAJOR != version[0]) || 1662 (DM_VERSION_MINOR < version[1])) { 1663 DMWARN("ioctl interface mismatch: " 1664 "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", 1665 DM_VERSION_MAJOR, DM_VERSION_MINOR, 1666 DM_VERSION_PATCHLEVEL, 1667 version[0], version[1], version[2], cmd); 1668 r = -EINVAL; 1669 } 1670 1671 /* 1672 * Fill in the kernel version. 1673 */ 1674 version[0] = DM_VERSION_MAJOR; 1675 version[1] = DM_VERSION_MINOR; 1676 version[2] = DM_VERSION_PATCHLEVEL; 1677 if (copy_to_user(user->version, version, sizeof(version))) 1678 return -EFAULT; 1679 1680 return r; 1681 } 1682 1683 #define DM_PARAMS_MALLOC 0x0001 /* Params allocated with kvmalloc() */ 1684 #define DM_WIPE_BUFFER 0x0010 /* Wipe input buffer before returning from ioctl */ 1685 1686 static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags) 1687 { 1688 if (param_flags & DM_WIPE_BUFFER) 1689 memset(param, 0, param_size); 1690 1691 if (param_flags & DM_PARAMS_MALLOC) 1692 kvfree(param); 1693 } 1694 1695 static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel, 1696 int ioctl_flags, 1697 struct dm_ioctl **param, int *param_flags) 1698 { 1699 struct dm_ioctl *dmi; 1700 int secure_data; 1701 const size_t minimum_data_size = offsetof(struct dm_ioctl, data); 1702 1703 if (copy_from_user(param_kernel, user, minimum_data_size)) 1704 return -EFAULT; 1705 1706 if (param_kernel->data_size < minimum_data_size) 1707 return -EINVAL; 1708 1709 secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG; 1710 1711 *param_flags = secure_data ? DM_WIPE_BUFFER : 0; 1712 1713 if (ioctl_flags & IOCTL_FLAGS_NO_PARAMS) { 1714 dmi = param_kernel; 1715 dmi->data_size = minimum_data_size; 1716 goto data_copied; 1717 } 1718 1719 /* 1720 * Try to avoid low memory issues when a device is suspended. 1721 * Use kmalloc() rather than vmalloc() when we can. 1722 */ 1723 dmi = NULL; 1724 if (param_kernel->data_size <= KMALLOC_MAX_SIZE) 1725 dmi = kmalloc(param_kernel->data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); 1726 1727 if (!dmi) { 1728 unsigned noio_flag; 1729 noio_flag = memalloc_noio_save(); 1730 dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_HIGH | __GFP_HIGHMEM, PAGE_KERNEL); 1731 memalloc_noio_restore(noio_flag); 1732 } 1733 1734 if (!dmi) { 1735 if (secure_data && clear_user(user, param_kernel->data_size)) 1736 return -EFAULT; 1737 return -ENOMEM; 1738 } 1739 1740 *param_flags |= DM_PARAMS_MALLOC; 1741 1742 if (copy_from_user(dmi, user, param_kernel->data_size)) 1743 goto bad; 1744 1745 data_copied: 1746 /* 1747 * Abort if something changed the ioctl data while it was being copied. 1748 */ 1749 if (dmi->data_size != param_kernel->data_size) { 1750 DMERR("rejecting ioctl: data size modified while processing parameters"); 1751 goto bad; 1752 } 1753 1754 /* Wipe the user buffer so we do not return it to userspace */ 1755 if (secure_data && clear_user(user, param_kernel->data_size)) 1756 goto bad; 1757 1758 *param = dmi; 1759 return 0; 1760 1761 bad: 1762 free_params(dmi, param_kernel->data_size, *param_flags); 1763 1764 return -EFAULT; 1765 } 1766 1767 static int validate_params(uint cmd, struct dm_ioctl *param) 1768 { 1769 /* Always clear this flag */ 1770 param->flags &= ~DM_BUFFER_FULL_FLAG; 1771 param->flags &= ~DM_UEVENT_GENERATED_FLAG; 1772 param->flags &= ~DM_SECURE_DATA_FLAG; 1773 param->flags &= ~DM_DATA_OUT_FLAG; 1774 1775 /* Ignores parameters */ 1776 if (cmd == DM_REMOVE_ALL_CMD || 1777 cmd == DM_LIST_DEVICES_CMD || 1778 cmd == DM_LIST_VERSIONS_CMD) 1779 return 0; 1780 1781 if ((cmd == DM_DEV_CREATE_CMD)) { 1782 if (!*param->name) { 1783 DMWARN("name not supplied when creating device"); 1784 return -EINVAL; 1785 } 1786 } else if ((*param->uuid && *param->name)) { 1787 DMWARN("only supply one of name or uuid, cmd(%u)", cmd); 1788 return -EINVAL; 1789 } 1790 1791 /* Ensure strings are terminated */ 1792 param->name[DM_NAME_LEN - 1] = '\0'; 1793 param->uuid[DM_UUID_LEN - 1] = '\0'; 1794 1795 return 0; 1796 } 1797 1798 static int ctl_ioctl(uint command, struct dm_ioctl __user *user) 1799 { 1800 int r = 0; 1801 int ioctl_flags; 1802 int param_flags; 1803 unsigned int cmd; 1804 struct dm_ioctl *uninitialized_var(param); 1805 ioctl_fn fn = NULL; 1806 size_t input_param_size; 1807 struct dm_ioctl param_kernel; 1808 1809 /* only root can play with this */ 1810 if (!capable(CAP_SYS_ADMIN)) 1811 return -EACCES; 1812 1813 if (_IOC_TYPE(command) != DM_IOCTL) 1814 return -ENOTTY; 1815 1816 cmd = _IOC_NR(command); 1817 1818 /* 1819 * Check the interface version passed in. This also 1820 * writes out the kernel's interface version. 1821 */ 1822 r = check_version(cmd, user); 1823 if (r) 1824 return r; 1825 1826 /* 1827 * Nothing more to do for the version command. 1828 */ 1829 if (cmd == DM_VERSION_CMD) 1830 return 0; 1831 1832 fn = lookup_ioctl(cmd, &ioctl_flags); 1833 if (!fn) { 1834 DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); 1835 return -ENOTTY; 1836 } 1837 1838 /* 1839 * Copy the parameters into kernel space. 1840 */ 1841 r = copy_params(user, ¶m_kernel, ioctl_flags, ¶m, ¶m_flags); 1842 1843 if (r) 1844 return r; 1845 1846 input_param_size = param->data_size; 1847 r = validate_params(cmd, param); 1848 if (r) 1849 goto out; 1850 1851 param->data_size = sizeof(*param); 1852 r = fn(param, input_param_size); 1853 1854 if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) && 1855 unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS)) 1856 DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd); 1857 1858 /* 1859 * Copy the results back to userland. 1860 */ 1861 if (!r && copy_to_user(user, param, param->data_size)) 1862 r = -EFAULT; 1863 1864 out: 1865 free_params(param, input_param_size, param_flags); 1866 return r; 1867 } 1868 1869 static long dm_ctl_ioctl(struct file *file, uint command, ulong u) 1870 { 1871 return (long)ctl_ioctl(command, (struct dm_ioctl __user *)u); 1872 } 1873 1874 #ifdef CONFIG_COMPAT 1875 static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u) 1876 { 1877 return (long)dm_ctl_ioctl(file, command, (ulong) compat_ptr(u)); 1878 } 1879 #else 1880 #define dm_compat_ctl_ioctl NULL 1881 #endif 1882 1883 static const struct file_operations _ctl_fops = { 1884 .open = nonseekable_open, 1885 .unlocked_ioctl = dm_ctl_ioctl, 1886 .compat_ioctl = dm_compat_ctl_ioctl, 1887 .owner = THIS_MODULE, 1888 .llseek = noop_llseek, 1889 }; 1890 1891 static struct miscdevice _dm_misc = { 1892 .minor = MAPPER_CTRL_MINOR, 1893 .name = DM_NAME, 1894 .nodename = DM_DIR "/" DM_CONTROL_NODE, 1895 .fops = &_ctl_fops 1896 }; 1897 1898 MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR); 1899 MODULE_ALIAS("devname:" DM_DIR "/" DM_CONTROL_NODE); 1900 1901 /* 1902 * Create misc character device and link to DM_DIR/control. 1903 */ 1904 int __init dm_interface_init(void) 1905 { 1906 int r; 1907 1908 r = dm_hash_init(); 1909 if (r) 1910 return r; 1911 1912 r = misc_register(&_dm_misc); 1913 if (r) { 1914 DMERR("misc_register failed for control device"); 1915 dm_hash_exit(); 1916 return r; 1917 } 1918 1919 DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR, 1920 DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA, 1921 DM_DRIVER_EMAIL); 1922 return 0; 1923 } 1924 1925 void dm_interface_exit(void) 1926 { 1927 misc_deregister(&_dm_misc); 1928 dm_hash_exit(); 1929 } 1930 1931 /** 1932 * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers 1933 * @md: Pointer to mapped_device 1934 * @name: Buffer (size DM_NAME_LEN) for name 1935 * @uuid: Buffer (size DM_UUID_LEN) for uuid or empty string if uuid not defined 1936 */ 1937 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) 1938 { 1939 int r = 0; 1940 struct hash_cell *hc; 1941 1942 if (!md) 1943 return -ENXIO; 1944 1945 mutex_lock(&dm_hash_cells_mutex); 1946 hc = dm_get_mdptr(md); 1947 if (!hc || hc->md != md) { 1948 r = -ENXIO; 1949 goto out; 1950 } 1951 1952 if (name) 1953 strcpy(name, hc->name); 1954 if (uuid) 1955 strcpy(uuid, hc->uuid ? : ""); 1956 1957 out: 1958 mutex_unlock(&dm_hash_cells_mutex); 1959 1960 return r; 1961 } 1962