1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 10 #include <linux/module.h> 11 #include <linux/vmalloc.h> 12 #include <linux/miscdevice.h> 13 #include <linux/sched/mm.h> 14 #include <linux/init.h> 15 #include <linux/wait.h> 16 #include <linux/slab.h> 17 #include <linux/dm-ioctl.h> 18 #include <linux/hdreg.h> 19 #include <linux/compat.h> 20 21 #include <linux/uaccess.h> 22 23 #define DM_MSG_PREFIX "ioctl" 24 #define DM_DRIVER_EMAIL "dm-devel@redhat.com" 25 26 struct dm_file { 27 /* 28 * poll will wait until the global event number is greater than 29 * this value. 30 */ 31 volatile unsigned global_event_nr; 32 }; 33 34 /*----------------------------------------------------------------- 35 * The ioctl interface needs to be able to look up devices by 36 * name or uuid. 37 *---------------------------------------------------------------*/ 38 struct hash_cell { 39 struct list_head name_list; 40 struct list_head uuid_list; 41 42 char *name; 43 char *uuid; 44 struct mapped_device *md; 45 struct dm_table *new_map; 46 }; 47 48 struct vers_iter { 49 size_t param_size; 50 struct dm_target_versions *vers, *old_vers; 51 char *end; 52 uint32_t flags; 53 }; 54 55 56 #define NUM_BUCKETS 64 57 #define MASK_BUCKETS (NUM_BUCKETS - 1) 58 static struct list_head _name_buckets[NUM_BUCKETS]; 59 static struct list_head _uuid_buckets[NUM_BUCKETS]; 60 61 static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred); 62 63 /* 64 * Guards access to both hash tables. 65 */ 66 static DECLARE_RWSEM(_hash_lock); 67 68 /* 69 * Protects use of mdptr to obtain hash cell name and uuid from mapped device. 70 */ 71 static DEFINE_MUTEX(dm_hash_cells_mutex); 72 73 static void init_buckets(struct list_head *buckets) 74 { 75 unsigned int i; 76 77 for (i = 0; i < NUM_BUCKETS; i++) 78 INIT_LIST_HEAD(buckets + i); 79 } 80 81 static int dm_hash_init(void) 82 { 83 init_buckets(_name_buckets); 84 init_buckets(_uuid_buckets); 85 return 0; 86 } 87 88 static void dm_hash_exit(void) 89 { 90 dm_hash_remove_all(false, false, false); 91 } 92 93 /*----------------------------------------------------------------- 94 * Hash function: 95 * We're not really concerned with the str hash function being 96 * fast since it's only used by the ioctl interface. 97 *---------------------------------------------------------------*/ 98 static unsigned int hash_str(const char *str) 99 { 100 const unsigned int hash_mult = 2654435387U; 101 unsigned int h = 0; 102 103 while (*str) 104 h = (h + (unsigned int) *str++) * hash_mult; 105 106 return h & MASK_BUCKETS; 107 } 108 109 /*----------------------------------------------------------------- 110 * Code for looking up a device by name 111 *---------------------------------------------------------------*/ 112 static struct hash_cell *__get_name_cell(const char *str) 113 { 114 struct hash_cell *hc; 115 unsigned int h = hash_str(str); 116 117 list_for_each_entry (hc, _name_buckets + h, name_list) 118 if (!strcmp(hc->name, str)) { 119 dm_get(hc->md); 120 return hc; 121 } 122 123 return NULL; 124 } 125 126 static struct hash_cell *__get_uuid_cell(const char *str) 127 { 128 struct hash_cell *hc; 129 unsigned int h = hash_str(str); 130 131 list_for_each_entry (hc, _uuid_buckets + h, uuid_list) 132 if (!strcmp(hc->uuid, str)) { 133 dm_get(hc->md); 134 return hc; 135 } 136 137 return NULL; 138 } 139 140 static struct hash_cell *__get_dev_cell(uint64_t dev) 141 { 142 struct mapped_device *md; 143 struct hash_cell *hc; 144 145 md = dm_get_md(huge_decode_dev(dev)); 146 if (!md) 147 return NULL; 148 149 hc = dm_get_mdptr(md); 150 if (!hc) { 151 dm_put(md); 152 return NULL; 153 } 154 155 return hc; 156 } 157 158 /*----------------------------------------------------------------- 159 * Inserting, removing and renaming a device. 160 *---------------------------------------------------------------*/ 161 static struct hash_cell *alloc_cell(const char *name, const char *uuid, 162 struct mapped_device *md) 163 { 164 struct hash_cell *hc; 165 166 hc = kmalloc(sizeof(*hc), GFP_KERNEL); 167 if (!hc) 168 return NULL; 169 170 hc->name = kstrdup(name, GFP_KERNEL); 171 if (!hc->name) { 172 kfree(hc); 173 return NULL; 174 } 175 176 if (!uuid) 177 hc->uuid = NULL; 178 179 else { 180 hc->uuid = kstrdup(uuid, GFP_KERNEL); 181 if (!hc->uuid) { 182 kfree(hc->name); 183 kfree(hc); 184 return NULL; 185 } 186 } 187 188 INIT_LIST_HEAD(&hc->name_list); 189 INIT_LIST_HEAD(&hc->uuid_list); 190 hc->md = md; 191 hc->new_map = NULL; 192 return hc; 193 } 194 195 static void free_cell(struct hash_cell *hc) 196 { 197 if (hc) { 198 kfree(hc->name); 199 kfree(hc->uuid); 200 kfree(hc); 201 } 202 } 203 204 /* 205 * The kdev_t and uuid of a device can never change once it is 206 * initially inserted. 207 */ 208 static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md) 209 { 210 struct hash_cell *cell, *hc; 211 212 /* 213 * Allocate the new cells. 214 */ 215 cell = alloc_cell(name, uuid, md); 216 if (!cell) 217 return -ENOMEM; 218 219 /* 220 * Insert the cell into both hash tables. 221 */ 222 down_write(&_hash_lock); 223 hc = __get_name_cell(name); 224 if (hc) { 225 dm_put(hc->md); 226 goto bad; 227 } 228 229 list_add(&cell->name_list, _name_buckets + hash_str(name)); 230 231 if (uuid) { 232 hc = __get_uuid_cell(uuid); 233 if (hc) { 234 list_del(&cell->name_list); 235 dm_put(hc->md); 236 goto bad; 237 } 238 list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); 239 } 240 dm_get(md); 241 mutex_lock(&dm_hash_cells_mutex); 242 dm_set_mdptr(md, cell); 243 mutex_unlock(&dm_hash_cells_mutex); 244 up_write(&_hash_lock); 245 246 return 0; 247 248 bad: 249 up_write(&_hash_lock); 250 free_cell(cell); 251 return -EBUSY; 252 } 253 254 static struct dm_table *__hash_remove(struct hash_cell *hc) 255 { 256 struct dm_table *table; 257 int srcu_idx; 258 259 /* remove from the dev hash */ 260 list_del(&hc->uuid_list); 261 list_del(&hc->name_list); 262 mutex_lock(&dm_hash_cells_mutex); 263 dm_set_mdptr(hc->md, NULL); 264 mutex_unlock(&dm_hash_cells_mutex); 265 266 table = dm_get_live_table(hc->md, &srcu_idx); 267 if (table) 268 dm_table_event(table); 269 dm_put_live_table(hc->md, srcu_idx); 270 271 table = NULL; 272 if (hc->new_map) 273 table = hc->new_map; 274 dm_put(hc->md); 275 free_cell(hc); 276 277 return table; 278 } 279 280 static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred) 281 { 282 int i, dev_skipped; 283 struct hash_cell *hc; 284 struct mapped_device *md; 285 struct dm_table *t; 286 287 retry: 288 dev_skipped = 0; 289 290 down_write(&_hash_lock); 291 292 for (i = 0; i < NUM_BUCKETS; i++) { 293 list_for_each_entry(hc, _name_buckets + i, name_list) { 294 md = hc->md; 295 dm_get(md); 296 297 if (keep_open_devices && 298 dm_lock_for_deletion(md, mark_deferred, only_deferred)) { 299 dm_put(md); 300 dev_skipped++; 301 continue; 302 } 303 304 t = __hash_remove(hc); 305 306 up_write(&_hash_lock); 307 308 if (t) { 309 dm_sync_table(md); 310 dm_table_destroy(t); 311 } 312 dm_put(md); 313 if (likely(keep_open_devices)) 314 dm_destroy(md); 315 else 316 dm_destroy_immediate(md); 317 318 /* 319 * Some mapped devices may be using other mapped 320 * devices, so repeat until we make no further 321 * progress. If a new mapped device is created 322 * here it will also get removed. 323 */ 324 goto retry; 325 } 326 } 327 328 up_write(&_hash_lock); 329 330 if (dev_skipped) 331 DMWARN("remove_all left %d open device(s)", dev_skipped); 332 } 333 334 /* 335 * Set the uuid of a hash_cell that isn't already set. 336 */ 337 static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid) 338 { 339 mutex_lock(&dm_hash_cells_mutex); 340 hc->uuid = new_uuid; 341 mutex_unlock(&dm_hash_cells_mutex); 342 343 list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid)); 344 } 345 346 /* 347 * Changes the name of a hash_cell and returns the old name for 348 * the caller to free. 349 */ 350 static char *__change_cell_name(struct hash_cell *hc, char *new_name) 351 { 352 char *old_name; 353 354 /* 355 * Rename and move the name cell. 356 */ 357 list_del(&hc->name_list); 358 old_name = hc->name; 359 360 mutex_lock(&dm_hash_cells_mutex); 361 hc->name = new_name; 362 mutex_unlock(&dm_hash_cells_mutex); 363 364 list_add(&hc->name_list, _name_buckets + hash_str(new_name)); 365 366 return old_name; 367 } 368 369 static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, 370 const char *new) 371 { 372 char *new_data, *old_name = NULL; 373 struct hash_cell *hc; 374 struct dm_table *table; 375 struct mapped_device *md; 376 unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; 377 int srcu_idx; 378 379 /* 380 * duplicate new. 381 */ 382 new_data = kstrdup(new, GFP_KERNEL); 383 if (!new_data) 384 return ERR_PTR(-ENOMEM); 385 386 down_write(&_hash_lock); 387 388 /* 389 * Is new free ? 390 */ 391 if (change_uuid) 392 hc = __get_uuid_cell(new); 393 else 394 hc = __get_name_cell(new); 395 396 if (hc) { 397 DMWARN("Unable to change %s on mapped device %s to one that " 398 "already exists: %s", 399 change_uuid ? "uuid" : "name", 400 param->name, new); 401 dm_put(hc->md); 402 up_write(&_hash_lock); 403 kfree(new_data); 404 return ERR_PTR(-EBUSY); 405 } 406 407 /* 408 * Is there such a device as 'old' ? 409 */ 410 hc = __get_name_cell(param->name); 411 if (!hc) { 412 DMWARN("Unable to rename non-existent device, %s to %s%s", 413 param->name, change_uuid ? "uuid " : "", new); 414 up_write(&_hash_lock); 415 kfree(new_data); 416 return ERR_PTR(-ENXIO); 417 } 418 419 /* 420 * Does this device already have a uuid? 421 */ 422 if (change_uuid && hc->uuid) { 423 DMWARN("Unable to change uuid of mapped device %s to %s " 424 "because uuid is already set to %s", 425 param->name, new, hc->uuid); 426 dm_put(hc->md); 427 up_write(&_hash_lock); 428 kfree(new_data); 429 return ERR_PTR(-EINVAL); 430 } 431 432 if (change_uuid) 433 __set_cell_uuid(hc, new_data); 434 else 435 old_name = __change_cell_name(hc, new_data); 436 437 /* 438 * Wake up any dm event waiters. 439 */ 440 table = dm_get_live_table(hc->md, &srcu_idx); 441 if (table) 442 dm_table_event(table); 443 dm_put_live_table(hc->md, srcu_idx); 444 445 if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr)) 446 param->flags |= DM_UEVENT_GENERATED_FLAG; 447 448 md = hc->md; 449 up_write(&_hash_lock); 450 kfree(old_name); 451 452 return md; 453 } 454 455 void dm_deferred_remove(void) 456 { 457 dm_hash_remove_all(true, false, true); 458 } 459 460 /*----------------------------------------------------------------- 461 * Implementation of the ioctl commands 462 *---------------------------------------------------------------*/ 463 /* 464 * All the ioctl commands get dispatched to functions with this 465 * prototype. 466 */ 467 typedef int (*ioctl_fn)(struct file *filp, struct dm_ioctl *param, size_t param_size); 468 469 static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_size) 470 { 471 dm_hash_remove_all(true, !!(param->flags & DM_DEFERRED_REMOVE), false); 472 param->data_size = 0; 473 return 0; 474 } 475 476 /* 477 * Round up the ptr to an 8-byte boundary. 478 */ 479 #define ALIGN_MASK 7 480 static inline void *align_ptr(void *ptr) 481 { 482 return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK); 483 } 484 485 /* 486 * Retrieves the data payload buffer from an already allocated 487 * struct dm_ioctl. 488 */ 489 static void *get_result_buffer(struct dm_ioctl *param, size_t param_size, 490 size_t *len) 491 { 492 param->data_start = align_ptr(param + 1) - (void *) param; 493 494 if (param->data_start < param_size) 495 *len = param_size - param->data_start; 496 else 497 *len = 0; 498 499 return ((void *) param) + param->data_start; 500 } 501 502 static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_size) 503 { 504 unsigned int i; 505 struct hash_cell *hc; 506 size_t len, needed = 0; 507 struct gendisk *disk; 508 struct dm_name_list *nl, *old_nl = NULL; 509 uint32_t *event_nr; 510 511 down_write(&_hash_lock); 512 513 /* 514 * Loop through all the devices working out how much 515 * space we need. 516 */ 517 for (i = 0; i < NUM_BUCKETS; i++) { 518 list_for_each_entry (hc, _name_buckets + i, name_list) { 519 needed += sizeof(struct dm_name_list); 520 needed += strlen(hc->name) + 1; 521 needed += ALIGN_MASK; 522 needed += (sizeof(uint32_t) + ALIGN_MASK) & ~ALIGN_MASK; 523 } 524 } 525 526 /* 527 * Grab our output buffer. 528 */ 529 nl = get_result_buffer(param, param_size, &len); 530 if (len < needed) { 531 param->flags |= DM_BUFFER_FULL_FLAG; 532 goto out; 533 } 534 param->data_size = param->data_start + needed; 535 536 nl->dev = 0; /* Flags no data */ 537 538 /* 539 * Now loop through filling out the names. 540 */ 541 for (i = 0; i < NUM_BUCKETS; i++) { 542 list_for_each_entry (hc, _name_buckets + i, name_list) { 543 if (old_nl) 544 old_nl->next = (uint32_t) ((void *) nl - 545 (void *) old_nl); 546 disk = dm_disk(hc->md); 547 nl->dev = huge_encode_dev(disk_devt(disk)); 548 nl->next = 0; 549 strcpy(nl->name, hc->name); 550 551 old_nl = nl; 552 event_nr = align_ptr(((void *) (nl + 1)) + strlen(hc->name) + 1); 553 *event_nr = dm_get_event_nr(hc->md); 554 nl = align_ptr(event_nr + 1); 555 } 556 } 557 558 out: 559 up_write(&_hash_lock); 560 return 0; 561 } 562 563 static void list_version_get_needed(struct target_type *tt, void *needed_param) 564 { 565 size_t *needed = needed_param; 566 567 *needed += sizeof(struct dm_target_versions); 568 *needed += strlen(tt->name); 569 *needed += ALIGN_MASK; 570 } 571 572 static void list_version_get_info(struct target_type *tt, void *param) 573 { 574 struct vers_iter *info = param; 575 576 /* Check space - it might have changed since the first iteration */ 577 if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 > 578 info->end) { 579 580 info->flags = DM_BUFFER_FULL_FLAG; 581 return; 582 } 583 584 if (info->old_vers) 585 info->old_vers->next = (uint32_t) ((void *)info->vers - 586 (void *)info->old_vers); 587 info->vers->version[0] = tt->version[0]; 588 info->vers->version[1] = tt->version[1]; 589 info->vers->version[2] = tt->version[2]; 590 info->vers->next = 0; 591 strcpy(info->vers->name, tt->name); 592 593 info->old_vers = info->vers; 594 info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1); 595 } 596 597 static int list_versions(struct file *filp, struct dm_ioctl *param, size_t param_size) 598 { 599 size_t len, needed = 0; 600 struct dm_target_versions *vers; 601 struct vers_iter iter_info; 602 603 /* 604 * Loop through all the devices working out how much 605 * space we need. 606 */ 607 dm_target_iterate(list_version_get_needed, &needed); 608 609 /* 610 * Grab our output buffer. 611 */ 612 vers = get_result_buffer(param, param_size, &len); 613 if (len < needed) { 614 param->flags |= DM_BUFFER_FULL_FLAG; 615 goto out; 616 } 617 param->data_size = param->data_start + needed; 618 619 iter_info.param_size = param_size; 620 iter_info.old_vers = NULL; 621 iter_info.vers = vers; 622 iter_info.flags = 0; 623 iter_info.end = (char *)vers+len; 624 625 /* 626 * Now loop through filling out the names & versions. 627 */ 628 dm_target_iterate(list_version_get_info, &iter_info); 629 param->flags |= iter_info.flags; 630 631 out: 632 return 0; 633 } 634 635 static int check_name(const char *name) 636 { 637 if (strchr(name, '/')) { 638 DMWARN("invalid device name"); 639 return -EINVAL; 640 } 641 642 return 0; 643 } 644 645 /* 646 * On successful return, the caller must not attempt to acquire 647 * _hash_lock without first calling dm_put_live_table, because dm_table_destroy 648 * waits for this dm_put_live_table and could be called under this lock. 649 */ 650 static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *srcu_idx) 651 { 652 struct hash_cell *hc; 653 struct dm_table *table = NULL; 654 655 /* increment rcu count, we don't care about the table pointer */ 656 dm_get_live_table(md, srcu_idx); 657 658 down_read(&_hash_lock); 659 hc = dm_get_mdptr(md); 660 if (!hc || hc->md != md) { 661 DMWARN("device has been removed from the dev hash table."); 662 goto out; 663 } 664 665 table = hc->new_map; 666 667 out: 668 up_read(&_hash_lock); 669 670 return table; 671 } 672 673 static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, 674 struct dm_ioctl *param, 675 int *srcu_idx) 676 { 677 return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ? 678 dm_get_inactive_table(md, srcu_idx) : dm_get_live_table(md, srcu_idx); 679 } 680 681 /* 682 * Fills in a dm_ioctl structure, ready for sending back to 683 * userland. 684 */ 685 static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) 686 { 687 struct gendisk *disk = dm_disk(md); 688 struct dm_table *table; 689 int srcu_idx; 690 691 param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | 692 DM_ACTIVE_PRESENT_FLAG | DM_INTERNAL_SUSPEND_FLAG); 693 694 if (dm_suspended_md(md)) 695 param->flags |= DM_SUSPEND_FLAG; 696 697 if (dm_suspended_internally_md(md)) 698 param->flags |= DM_INTERNAL_SUSPEND_FLAG; 699 700 if (dm_test_deferred_remove_flag(md)) 701 param->flags |= DM_DEFERRED_REMOVE; 702 703 param->dev = huge_encode_dev(disk_devt(disk)); 704 705 /* 706 * Yes, this will be out of date by the time it gets back 707 * to userland, but it is still very useful for 708 * debugging. 709 */ 710 param->open_count = dm_open_count(md); 711 712 param->event_nr = dm_get_event_nr(md); 713 param->target_count = 0; 714 715 table = dm_get_live_table(md, &srcu_idx); 716 if (table) { 717 if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) { 718 if (get_disk_ro(disk)) 719 param->flags |= DM_READONLY_FLAG; 720 param->target_count = dm_table_get_num_targets(table); 721 } 722 723 param->flags |= DM_ACTIVE_PRESENT_FLAG; 724 } 725 dm_put_live_table(md, srcu_idx); 726 727 if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) { 728 int srcu_idx; 729 table = dm_get_inactive_table(md, &srcu_idx); 730 if (table) { 731 if (!(dm_table_get_mode(table) & FMODE_WRITE)) 732 param->flags |= DM_READONLY_FLAG; 733 param->target_count = dm_table_get_num_targets(table); 734 } 735 dm_put_live_table(md, srcu_idx); 736 } 737 } 738 739 static int dev_create(struct file *filp, struct dm_ioctl *param, size_t param_size) 740 { 741 int r, m = DM_ANY_MINOR; 742 struct mapped_device *md; 743 744 r = check_name(param->name); 745 if (r) 746 return r; 747 748 if (param->flags & DM_PERSISTENT_DEV_FLAG) 749 m = MINOR(huge_decode_dev(param->dev)); 750 751 r = dm_create(m, &md); 752 if (r) 753 return r; 754 755 r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); 756 if (r) { 757 dm_put(md); 758 dm_destroy(md); 759 return r; 760 } 761 762 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; 763 764 __dev_status(md, param); 765 766 dm_put(md); 767 768 return 0; 769 } 770 771 /* 772 * Always use UUID for lookups if it's present, otherwise use name or dev. 773 */ 774 static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) 775 { 776 struct hash_cell *hc = NULL; 777 778 if (*param->uuid) { 779 if (*param->name || param->dev) 780 return NULL; 781 782 hc = __get_uuid_cell(param->uuid); 783 if (!hc) 784 return NULL; 785 } else if (*param->name) { 786 if (param->dev) 787 return NULL; 788 789 hc = __get_name_cell(param->name); 790 if (!hc) 791 return NULL; 792 } else if (param->dev) { 793 hc = __get_dev_cell(param->dev); 794 if (!hc) 795 return NULL; 796 } else 797 return NULL; 798 799 /* 800 * Sneakily write in both the name and the uuid 801 * while we have the cell. 802 */ 803 strlcpy(param->name, hc->name, sizeof(param->name)); 804 if (hc->uuid) 805 strlcpy(param->uuid, hc->uuid, sizeof(param->uuid)); 806 else 807 param->uuid[0] = '\0'; 808 809 if (hc->new_map) 810 param->flags |= DM_INACTIVE_PRESENT_FLAG; 811 else 812 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; 813 814 return hc; 815 } 816 817 static struct mapped_device *find_device(struct dm_ioctl *param) 818 { 819 struct hash_cell *hc; 820 struct mapped_device *md = NULL; 821 822 down_read(&_hash_lock); 823 hc = __find_device_hash_cell(param); 824 if (hc) 825 md = hc->md; 826 up_read(&_hash_lock); 827 828 return md; 829 } 830 831 static int dev_remove(struct file *filp, struct dm_ioctl *param, size_t param_size) 832 { 833 struct hash_cell *hc; 834 struct mapped_device *md; 835 int r; 836 struct dm_table *t; 837 838 down_write(&_hash_lock); 839 hc = __find_device_hash_cell(param); 840 841 if (!hc) { 842 DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); 843 up_write(&_hash_lock); 844 return -ENXIO; 845 } 846 847 md = hc->md; 848 849 /* 850 * Ensure the device is not open and nothing further can open it. 851 */ 852 r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false); 853 if (r) { 854 if (r == -EBUSY && param->flags & DM_DEFERRED_REMOVE) { 855 up_write(&_hash_lock); 856 dm_put(md); 857 return 0; 858 } 859 DMDEBUG_LIMIT("unable to remove open device %s", hc->name); 860 up_write(&_hash_lock); 861 dm_put(md); 862 return r; 863 } 864 865 t = __hash_remove(hc); 866 up_write(&_hash_lock); 867 868 if (t) { 869 dm_sync_table(md); 870 dm_table_destroy(t); 871 } 872 873 param->flags &= ~DM_DEFERRED_REMOVE; 874 875 if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr)) 876 param->flags |= DM_UEVENT_GENERATED_FLAG; 877 878 dm_put(md); 879 dm_destroy(md); 880 return 0; 881 } 882 883 /* 884 * Check a string doesn't overrun the chunk of 885 * memory we copied from userland. 886 */ 887 static int invalid_str(char *str, void *end) 888 { 889 while ((void *) str < end) 890 if (!*str++) 891 return 0; 892 893 return -EINVAL; 894 } 895 896 static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_size) 897 { 898 int r; 899 char *new_data = (char *) param + param->data_start; 900 struct mapped_device *md; 901 unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; 902 903 if (new_data < param->data || 904 invalid_str(new_data, (void *) param + param_size) || !*new_data || 905 strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) { 906 DMWARN("Invalid new mapped device name or uuid string supplied."); 907 return -EINVAL; 908 } 909 910 if (!change_uuid) { 911 r = check_name(new_data); 912 if (r) 913 return r; 914 } 915 916 md = dm_hash_rename(param, new_data); 917 if (IS_ERR(md)) 918 return PTR_ERR(md); 919 920 __dev_status(md, param); 921 dm_put(md); 922 923 return 0; 924 } 925 926 static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t param_size) 927 { 928 int r = -EINVAL, x; 929 struct mapped_device *md; 930 struct hd_geometry geometry; 931 unsigned long indata[4]; 932 char *geostr = (char *) param + param->data_start; 933 char dummy; 934 935 md = find_device(param); 936 if (!md) 937 return -ENXIO; 938 939 if (geostr < param->data || 940 invalid_str(geostr, (void *) param + param_size)) { 941 DMWARN("Invalid geometry supplied."); 942 goto out; 943 } 944 945 x = sscanf(geostr, "%lu %lu %lu %lu%c", indata, 946 indata + 1, indata + 2, indata + 3, &dummy); 947 948 if (x != 4) { 949 DMWARN("Unable to interpret geometry settings."); 950 goto out; 951 } 952 953 if (indata[0] > 65535 || indata[1] > 255 || 954 indata[2] > 255 || indata[3] > ULONG_MAX) { 955 DMWARN("Geometry exceeds range limits."); 956 goto out; 957 } 958 959 geometry.cylinders = indata[0]; 960 geometry.heads = indata[1]; 961 geometry.sectors = indata[2]; 962 geometry.start = indata[3]; 963 964 r = dm_set_geometry(md, &geometry); 965 966 param->data_size = 0; 967 968 out: 969 dm_put(md); 970 return r; 971 } 972 973 static int do_suspend(struct dm_ioctl *param) 974 { 975 int r = 0; 976 unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; 977 struct mapped_device *md; 978 979 md = find_device(param); 980 if (!md) 981 return -ENXIO; 982 983 if (param->flags & DM_SKIP_LOCKFS_FLAG) 984 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; 985 if (param->flags & DM_NOFLUSH_FLAG) 986 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; 987 988 if (!dm_suspended_md(md)) { 989 r = dm_suspend(md, suspend_flags); 990 if (r) 991 goto out; 992 } 993 994 __dev_status(md, param); 995 996 out: 997 dm_put(md); 998 999 return r; 1000 } 1001 1002 static int do_resume(struct dm_ioctl *param) 1003 { 1004 int r = 0; 1005 unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; 1006 struct hash_cell *hc; 1007 struct mapped_device *md; 1008 struct dm_table *new_map, *old_map = NULL; 1009 1010 down_write(&_hash_lock); 1011 1012 hc = __find_device_hash_cell(param); 1013 if (!hc) { 1014 DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); 1015 up_write(&_hash_lock); 1016 return -ENXIO; 1017 } 1018 1019 md = hc->md; 1020 1021 new_map = hc->new_map; 1022 hc->new_map = NULL; 1023 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; 1024 1025 up_write(&_hash_lock); 1026 1027 /* Do we need to load a new map ? */ 1028 if (new_map) { 1029 /* Suspend if it isn't already suspended */ 1030 if (param->flags & DM_SKIP_LOCKFS_FLAG) 1031 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; 1032 if (param->flags & DM_NOFLUSH_FLAG) 1033 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; 1034 if (!dm_suspended_md(md)) 1035 dm_suspend(md, suspend_flags); 1036 1037 old_map = dm_swap_table(md, new_map); 1038 if (IS_ERR(old_map)) { 1039 dm_sync_table(md); 1040 dm_table_destroy(new_map); 1041 dm_put(md); 1042 return PTR_ERR(old_map); 1043 } 1044 1045 if (dm_table_get_mode(new_map) & FMODE_WRITE) 1046 set_disk_ro(dm_disk(md), 0); 1047 else 1048 set_disk_ro(dm_disk(md), 1); 1049 } 1050 1051 if (dm_suspended_md(md)) { 1052 r = dm_resume(md); 1053 if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr)) 1054 param->flags |= DM_UEVENT_GENERATED_FLAG; 1055 } 1056 1057 /* 1058 * Since dm_swap_table synchronizes RCU, nobody should be in 1059 * read-side critical section already. 1060 */ 1061 if (old_map) 1062 dm_table_destroy(old_map); 1063 1064 if (!r) 1065 __dev_status(md, param); 1066 1067 dm_put(md); 1068 return r; 1069 } 1070 1071 /* 1072 * Set or unset the suspension state of a device. 1073 * If the device already is in the requested state we just return its status. 1074 */ 1075 static int dev_suspend(struct file *filp, struct dm_ioctl *param, size_t param_size) 1076 { 1077 if (param->flags & DM_SUSPEND_FLAG) 1078 return do_suspend(param); 1079 1080 return do_resume(param); 1081 } 1082 1083 /* 1084 * Copies device info back to user space, used by 1085 * the create and info ioctls. 1086 */ 1087 static int dev_status(struct file *filp, struct dm_ioctl *param, size_t param_size) 1088 { 1089 struct mapped_device *md; 1090 1091 md = find_device(param); 1092 if (!md) 1093 return -ENXIO; 1094 1095 __dev_status(md, param); 1096 dm_put(md); 1097 1098 return 0; 1099 } 1100 1101 /* 1102 * Build up the status struct for each target 1103 */ 1104 static void retrieve_status(struct dm_table *table, 1105 struct dm_ioctl *param, size_t param_size) 1106 { 1107 unsigned int i, num_targets; 1108 struct dm_target_spec *spec; 1109 char *outbuf, *outptr; 1110 status_type_t type; 1111 size_t remaining, len, used = 0; 1112 unsigned status_flags = 0; 1113 1114 outptr = outbuf = get_result_buffer(param, param_size, &len); 1115 1116 if (param->flags & DM_STATUS_TABLE_FLAG) 1117 type = STATUSTYPE_TABLE; 1118 else 1119 type = STATUSTYPE_INFO; 1120 1121 /* Get all the target info */ 1122 num_targets = dm_table_get_num_targets(table); 1123 for (i = 0; i < num_targets; i++) { 1124 struct dm_target *ti = dm_table_get_target(table, i); 1125 size_t l; 1126 1127 remaining = len - (outptr - outbuf); 1128 if (remaining <= sizeof(struct dm_target_spec)) { 1129 param->flags |= DM_BUFFER_FULL_FLAG; 1130 break; 1131 } 1132 1133 spec = (struct dm_target_spec *) outptr; 1134 1135 spec->status = 0; 1136 spec->sector_start = ti->begin; 1137 spec->length = ti->len; 1138 strncpy(spec->target_type, ti->type->name, 1139 sizeof(spec->target_type)); 1140 1141 outptr += sizeof(struct dm_target_spec); 1142 remaining = len - (outptr - outbuf); 1143 if (remaining <= 0) { 1144 param->flags |= DM_BUFFER_FULL_FLAG; 1145 break; 1146 } 1147 1148 /* Get the status/table string from the target driver */ 1149 if (ti->type->status) { 1150 if (param->flags & DM_NOFLUSH_FLAG) 1151 status_flags |= DM_STATUS_NOFLUSH_FLAG; 1152 ti->type->status(ti, type, status_flags, outptr, remaining); 1153 } else 1154 outptr[0] = '\0'; 1155 1156 l = strlen(outptr) + 1; 1157 if (l == remaining) { 1158 param->flags |= DM_BUFFER_FULL_FLAG; 1159 break; 1160 } 1161 1162 outptr += l; 1163 used = param->data_start + (outptr - outbuf); 1164 1165 outptr = align_ptr(outptr); 1166 spec->next = outptr - outbuf; 1167 } 1168 1169 if (used) 1170 param->data_size = used; 1171 1172 param->target_count = num_targets; 1173 } 1174 1175 /* 1176 * Wait for a device to report an event 1177 */ 1178 static int dev_wait(struct file *filp, struct dm_ioctl *param, size_t param_size) 1179 { 1180 int r = 0; 1181 struct mapped_device *md; 1182 struct dm_table *table; 1183 int srcu_idx; 1184 1185 md = find_device(param); 1186 if (!md) 1187 return -ENXIO; 1188 1189 /* 1190 * Wait for a notification event 1191 */ 1192 if (dm_wait_event(md, param->event_nr)) { 1193 r = -ERESTARTSYS; 1194 goto out; 1195 } 1196 1197 /* 1198 * The userland program is going to want to know what 1199 * changed to trigger the event, so we may as well tell 1200 * him and save an ioctl. 1201 */ 1202 __dev_status(md, param); 1203 1204 table = dm_get_live_or_inactive_table(md, param, &srcu_idx); 1205 if (table) 1206 retrieve_status(table, param, param_size); 1207 dm_put_live_table(md, srcu_idx); 1208 1209 out: 1210 dm_put(md); 1211 1212 return r; 1213 } 1214 1215 /* 1216 * Remember the global event number and make it possible to poll 1217 * for further events. 1218 */ 1219 static int dev_arm_poll(struct file *filp, struct dm_ioctl *param, size_t param_size) 1220 { 1221 struct dm_file *priv = filp->private_data; 1222 1223 priv->global_event_nr = atomic_read(&dm_global_event_nr); 1224 1225 return 0; 1226 } 1227 1228 static inline fmode_t get_mode(struct dm_ioctl *param) 1229 { 1230 fmode_t mode = FMODE_READ | FMODE_WRITE; 1231 1232 if (param->flags & DM_READONLY_FLAG) 1233 mode = FMODE_READ; 1234 1235 return mode; 1236 } 1237 1238 static int next_target(struct dm_target_spec *last, uint32_t next, void *end, 1239 struct dm_target_spec **spec, char **target_params) 1240 { 1241 *spec = (struct dm_target_spec *) ((unsigned char *) last + next); 1242 *target_params = (char *) (*spec + 1); 1243 1244 if (*spec < (last + 1)) 1245 return -EINVAL; 1246 1247 return invalid_str(*target_params, end); 1248 } 1249 1250 static int populate_table(struct dm_table *table, 1251 struct dm_ioctl *param, size_t param_size) 1252 { 1253 int r; 1254 unsigned int i = 0; 1255 struct dm_target_spec *spec = (struct dm_target_spec *) param; 1256 uint32_t next = param->data_start; 1257 void *end = (void *) param + param_size; 1258 char *target_params; 1259 1260 if (!param->target_count) { 1261 DMWARN("populate_table: no targets specified"); 1262 return -EINVAL; 1263 } 1264 1265 for (i = 0; i < param->target_count; i++) { 1266 1267 r = next_target(spec, next, end, &spec, &target_params); 1268 if (r) { 1269 DMWARN("unable to find target"); 1270 return r; 1271 } 1272 1273 r = dm_table_add_target(table, spec->target_type, 1274 (sector_t) spec->sector_start, 1275 (sector_t) spec->length, 1276 target_params); 1277 if (r) { 1278 DMWARN("error adding target to table"); 1279 return r; 1280 } 1281 1282 next = spec->next; 1283 } 1284 1285 return dm_table_complete(table); 1286 } 1287 1288 static bool is_valid_type(enum dm_queue_mode cur, enum dm_queue_mode new) 1289 { 1290 if (cur == new || 1291 (cur == DM_TYPE_BIO_BASED && new == DM_TYPE_DAX_BIO_BASED)) 1292 return true; 1293 1294 return false; 1295 } 1296 1297 static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_size) 1298 { 1299 int r; 1300 struct hash_cell *hc; 1301 struct dm_table *t, *old_map = NULL; 1302 struct mapped_device *md; 1303 struct target_type *immutable_target_type; 1304 1305 md = find_device(param); 1306 if (!md) 1307 return -ENXIO; 1308 1309 r = dm_table_create(&t, get_mode(param), param->target_count, md); 1310 if (r) 1311 goto err; 1312 1313 /* Protect md->type and md->queue against concurrent table loads. */ 1314 dm_lock_md_type(md); 1315 r = populate_table(t, param, param_size); 1316 if (r) 1317 goto err_unlock_md_type; 1318 1319 immutable_target_type = dm_get_immutable_target_type(md); 1320 if (immutable_target_type && 1321 (immutable_target_type != dm_table_get_immutable_target_type(t)) && 1322 !dm_table_get_wildcard_target(t)) { 1323 DMWARN("can't replace immutable target type %s", 1324 immutable_target_type->name); 1325 r = -EINVAL; 1326 goto err_unlock_md_type; 1327 } 1328 1329 if (dm_get_md_type(md) == DM_TYPE_NONE) { 1330 /* Initial table load: acquire type of table. */ 1331 dm_set_md_type(md, dm_table_get_type(t)); 1332 1333 /* setup md->queue to reflect md's type (may block) */ 1334 r = dm_setup_md_queue(md, t); 1335 if (r) { 1336 DMWARN("unable to set up device queue for new table."); 1337 goto err_unlock_md_type; 1338 } 1339 } else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) { 1340 DMWARN("can't change device type after initial table load."); 1341 r = -EINVAL; 1342 goto err_unlock_md_type; 1343 } 1344 1345 dm_unlock_md_type(md); 1346 1347 /* stage inactive table */ 1348 down_write(&_hash_lock); 1349 hc = dm_get_mdptr(md); 1350 if (!hc || hc->md != md) { 1351 DMWARN("device has been removed from the dev hash table."); 1352 up_write(&_hash_lock); 1353 r = -ENXIO; 1354 goto err_destroy_table; 1355 } 1356 1357 if (hc->new_map) 1358 old_map = hc->new_map; 1359 hc->new_map = t; 1360 up_write(&_hash_lock); 1361 1362 param->flags |= DM_INACTIVE_PRESENT_FLAG; 1363 __dev_status(md, param); 1364 1365 if (old_map) { 1366 dm_sync_table(md); 1367 dm_table_destroy(old_map); 1368 } 1369 1370 dm_put(md); 1371 1372 return 0; 1373 1374 err_unlock_md_type: 1375 dm_unlock_md_type(md); 1376 err_destroy_table: 1377 dm_table_destroy(t); 1378 err: 1379 dm_put(md); 1380 1381 return r; 1382 } 1383 1384 static int table_clear(struct file *filp, struct dm_ioctl *param, size_t param_size) 1385 { 1386 struct hash_cell *hc; 1387 struct mapped_device *md; 1388 struct dm_table *old_map = NULL; 1389 1390 down_write(&_hash_lock); 1391 1392 hc = __find_device_hash_cell(param); 1393 if (!hc) { 1394 DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); 1395 up_write(&_hash_lock); 1396 return -ENXIO; 1397 } 1398 1399 if (hc->new_map) { 1400 old_map = hc->new_map; 1401 hc->new_map = NULL; 1402 } 1403 1404 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; 1405 1406 __dev_status(hc->md, param); 1407 md = hc->md; 1408 up_write(&_hash_lock); 1409 if (old_map) { 1410 dm_sync_table(md); 1411 dm_table_destroy(old_map); 1412 } 1413 dm_put(md); 1414 1415 return 0; 1416 } 1417 1418 /* 1419 * Retrieves a list of devices used by a particular dm device. 1420 */ 1421 static void retrieve_deps(struct dm_table *table, 1422 struct dm_ioctl *param, size_t param_size) 1423 { 1424 unsigned int count = 0; 1425 struct list_head *tmp; 1426 size_t len, needed; 1427 struct dm_dev_internal *dd; 1428 struct dm_target_deps *deps; 1429 1430 deps = get_result_buffer(param, param_size, &len); 1431 1432 /* 1433 * Count the devices. 1434 */ 1435 list_for_each (tmp, dm_table_get_devices(table)) 1436 count++; 1437 1438 /* 1439 * Check we have enough space. 1440 */ 1441 needed = sizeof(*deps) + (sizeof(*deps->dev) * count); 1442 if (len < needed) { 1443 param->flags |= DM_BUFFER_FULL_FLAG; 1444 return; 1445 } 1446 1447 /* 1448 * Fill in the devices. 1449 */ 1450 deps->count = count; 1451 count = 0; 1452 list_for_each_entry (dd, dm_table_get_devices(table), list) 1453 deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev); 1454 1455 param->data_size = param->data_start + needed; 1456 } 1457 1458 static int table_deps(struct file *filp, struct dm_ioctl *param, size_t param_size) 1459 { 1460 struct mapped_device *md; 1461 struct dm_table *table; 1462 int srcu_idx; 1463 1464 md = find_device(param); 1465 if (!md) 1466 return -ENXIO; 1467 1468 __dev_status(md, param); 1469 1470 table = dm_get_live_or_inactive_table(md, param, &srcu_idx); 1471 if (table) 1472 retrieve_deps(table, param, param_size); 1473 dm_put_live_table(md, srcu_idx); 1474 1475 dm_put(md); 1476 1477 return 0; 1478 } 1479 1480 /* 1481 * Return the status of a device as a text string for each 1482 * target. 1483 */ 1484 static int table_status(struct file *filp, struct dm_ioctl *param, size_t param_size) 1485 { 1486 struct mapped_device *md; 1487 struct dm_table *table; 1488 int srcu_idx; 1489 1490 md = find_device(param); 1491 if (!md) 1492 return -ENXIO; 1493 1494 __dev_status(md, param); 1495 1496 table = dm_get_live_or_inactive_table(md, param, &srcu_idx); 1497 if (table) 1498 retrieve_status(table, param, param_size); 1499 dm_put_live_table(md, srcu_idx); 1500 1501 dm_put(md); 1502 1503 return 0; 1504 } 1505 1506 /* 1507 * Process device-mapper dependent messages. Messages prefixed with '@' 1508 * are processed by the DM core. All others are delivered to the target. 1509 * Returns a number <= 1 if message was processed by device mapper. 1510 * Returns 2 if message should be delivered to the target. 1511 */ 1512 static int message_for_md(struct mapped_device *md, unsigned argc, char **argv, 1513 char *result, unsigned maxlen) 1514 { 1515 int r; 1516 1517 if (**argv != '@') 1518 return 2; /* no '@' prefix, deliver to target */ 1519 1520 if (!strcasecmp(argv[0], "@cancel_deferred_remove")) { 1521 if (argc != 1) { 1522 DMERR("Invalid arguments for @cancel_deferred_remove"); 1523 return -EINVAL; 1524 } 1525 return dm_cancel_deferred_remove(md); 1526 } 1527 1528 r = dm_stats_message(md, argc, argv, result, maxlen); 1529 if (r < 2) 1530 return r; 1531 1532 DMERR("Unsupported message sent to DM core: %s", argv[0]); 1533 return -EINVAL; 1534 } 1535 1536 /* 1537 * Pass a message to the target that's at the supplied device offset. 1538 */ 1539 static int target_message(struct file *filp, struct dm_ioctl *param, size_t param_size) 1540 { 1541 int r, argc; 1542 char **argv; 1543 struct mapped_device *md; 1544 struct dm_table *table; 1545 struct dm_target *ti; 1546 struct dm_target_msg *tmsg = (void *) param + param->data_start; 1547 size_t maxlen; 1548 char *result = get_result_buffer(param, param_size, &maxlen); 1549 int srcu_idx; 1550 1551 md = find_device(param); 1552 if (!md) 1553 return -ENXIO; 1554 1555 if (tmsg < (struct dm_target_msg *) param->data || 1556 invalid_str(tmsg->message, (void *) param + param_size)) { 1557 DMWARN("Invalid target message parameters."); 1558 r = -EINVAL; 1559 goto out; 1560 } 1561 1562 r = dm_split_args(&argc, &argv, tmsg->message); 1563 if (r) { 1564 DMWARN("Failed to split target message parameters"); 1565 goto out; 1566 } 1567 1568 if (!argc) { 1569 DMWARN("Empty message received."); 1570 goto out_argv; 1571 } 1572 1573 r = message_for_md(md, argc, argv, result, maxlen); 1574 if (r <= 1) 1575 goto out_argv; 1576 1577 table = dm_get_live_table(md, &srcu_idx); 1578 if (!table) 1579 goto out_table; 1580 1581 if (dm_deleting_md(md)) { 1582 r = -ENXIO; 1583 goto out_table; 1584 } 1585 1586 ti = dm_table_find_target(table, tmsg->sector); 1587 if (!dm_target_is_valid(ti)) { 1588 DMWARN("Target message sector outside device."); 1589 r = -EINVAL; 1590 } else if (ti->type->message) 1591 r = ti->type->message(ti, argc, argv); 1592 else { 1593 DMWARN("Target type does not support messages"); 1594 r = -EINVAL; 1595 } 1596 1597 out_table: 1598 dm_put_live_table(md, srcu_idx); 1599 out_argv: 1600 kfree(argv); 1601 out: 1602 if (r >= 0) 1603 __dev_status(md, param); 1604 1605 if (r == 1) { 1606 param->flags |= DM_DATA_OUT_FLAG; 1607 if (dm_message_test_buffer_overflow(result, maxlen)) 1608 param->flags |= DM_BUFFER_FULL_FLAG; 1609 else 1610 param->data_size = param->data_start + strlen(result) + 1; 1611 r = 0; 1612 } 1613 1614 dm_put(md); 1615 return r; 1616 } 1617 1618 /* 1619 * The ioctl parameter block consists of two parts, a dm_ioctl struct 1620 * followed by a data buffer. This flag is set if the second part, 1621 * which has a variable size, is not used by the function processing 1622 * the ioctl. 1623 */ 1624 #define IOCTL_FLAGS_NO_PARAMS 1 1625 1626 /*----------------------------------------------------------------- 1627 * Implementation of open/close/ioctl on the special char 1628 * device. 1629 *---------------------------------------------------------------*/ 1630 static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) 1631 { 1632 static struct { 1633 int cmd; 1634 int flags; 1635 ioctl_fn fn; 1636 } _ioctls[] = { 1637 {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */ 1638 {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all}, 1639 {DM_LIST_DEVICES_CMD, 0, list_devices}, 1640 1641 {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create}, 1642 {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove}, 1643 {DM_DEV_RENAME_CMD, 0, dev_rename}, 1644 {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend}, 1645 {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status}, 1646 {DM_DEV_WAIT_CMD, 0, dev_wait}, 1647 1648 {DM_TABLE_LOAD_CMD, 0, table_load}, 1649 {DM_TABLE_CLEAR_CMD, IOCTL_FLAGS_NO_PARAMS, table_clear}, 1650 {DM_TABLE_DEPS_CMD, 0, table_deps}, 1651 {DM_TABLE_STATUS_CMD, 0, table_status}, 1652 1653 {DM_LIST_VERSIONS_CMD, 0, list_versions}, 1654 1655 {DM_TARGET_MSG_CMD, 0, target_message}, 1656 {DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry}, 1657 {DM_DEV_ARM_POLL, IOCTL_FLAGS_NO_PARAMS, dev_arm_poll}, 1658 }; 1659 1660 if (unlikely(cmd >= ARRAY_SIZE(_ioctls))) 1661 return NULL; 1662 1663 *ioctl_flags = _ioctls[cmd].flags; 1664 return _ioctls[cmd].fn; 1665 } 1666 1667 /* 1668 * As well as checking the version compatibility this always 1669 * copies the kernel interface version out. 1670 */ 1671 static int check_version(unsigned int cmd, struct dm_ioctl __user *user) 1672 { 1673 uint32_t version[3]; 1674 int r = 0; 1675 1676 if (copy_from_user(version, user->version, sizeof(version))) 1677 return -EFAULT; 1678 1679 if ((DM_VERSION_MAJOR != version[0]) || 1680 (DM_VERSION_MINOR < version[1])) { 1681 DMWARN("ioctl interface mismatch: " 1682 "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", 1683 DM_VERSION_MAJOR, DM_VERSION_MINOR, 1684 DM_VERSION_PATCHLEVEL, 1685 version[0], version[1], version[2], cmd); 1686 r = -EINVAL; 1687 } 1688 1689 /* 1690 * Fill in the kernel version. 1691 */ 1692 version[0] = DM_VERSION_MAJOR; 1693 version[1] = DM_VERSION_MINOR; 1694 version[2] = DM_VERSION_PATCHLEVEL; 1695 if (copy_to_user(user->version, version, sizeof(version))) 1696 return -EFAULT; 1697 1698 return r; 1699 } 1700 1701 #define DM_PARAMS_MALLOC 0x0001 /* Params allocated with kvmalloc() */ 1702 #define DM_WIPE_BUFFER 0x0010 /* Wipe input buffer before returning from ioctl */ 1703 1704 static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags) 1705 { 1706 if (param_flags & DM_WIPE_BUFFER) 1707 memset(param, 0, param_size); 1708 1709 if (param_flags & DM_PARAMS_MALLOC) 1710 kvfree(param); 1711 } 1712 1713 static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel, 1714 int ioctl_flags, 1715 struct dm_ioctl **param, int *param_flags) 1716 { 1717 struct dm_ioctl *dmi; 1718 int secure_data; 1719 const size_t minimum_data_size = offsetof(struct dm_ioctl, data); 1720 unsigned noio_flag; 1721 1722 if (copy_from_user(param_kernel, user, minimum_data_size)) 1723 return -EFAULT; 1724 1725 if (param_kernel->data_size < minimum_data_size) 1726 return -EINVAL; 1727 1728 secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG; 1729 1730 *param_flags = secure_data ? DM_WIPE_BUFFER : 0; 1731 1732 if (ioctl_flags & IOCTL_FLAGS_NO_PARAMS) { 1733 dmi = param_kernel; 1734 dmi->data_size = minimum_data_size; 1735 goto data_copied; 1736 } 1737 1738 /* 1739 * Use __GFP_HIGH to avoid low memory issues when a device is 1740 * suspended and the ioctl is needed to resume it. 1741 * Use kmalloc() rather than vmalloc() when we can. 1742 */ 1743 dmi = NULL; 1744 noio_flag = memalloc_noio_save(); 1745 dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH); 1746 memalloc_noio_restore(noio_flag); 1747 1748 if (!dmi) { 1749 if (secure_data && clear_user(user, param_kernel->data_size)) 1750 return -EFAULT; 1751 return -ENOMEM; 1752 } 1753 1754 *param_flags |= DM_PARAMS_MALLOC; 1755 1756 if (copy_from_user(dmi, user, param_kernel->data_size)) 1757 goto bad; 1758 1759 data_copied: 1760 /* 1761 * Abort if something changed the ioctl data while it was being copied. 1762 */ 1763 if (dmi->data_size != param_kernel->data_size) { 1764 DMERR("rejecting ioctl: data size modified while processing parameters"); 1765 goto bad; 1766 } 1767 1768 /* Wipe the user buffer so we do not return it to userspace */ 1769 if (secure_data && clear_user(user, param_kernel->data_size)) 1770 goto bad; 1771 1772 *param = dmi; 1773 return 0; 1774 1775 bad: 1776 free_params(dmi, param_kernel->data_size, *param_flags); 1777 1778 return -EFAULT; 1779 } 1780 1781 static int validate_params(uint cmd, struct dm_ioctl *param) 1782 { 1783 /* Always clear this flag */ 1784 param->flags &= ~DM_BUFFER_FULL_FLAG; 1785 param->flags &= ~DM_UEVENT_GENERATED_FLAG; 1786 param->flags &= ~DM_SECURE_DATA_FLAG; 1787 param->flags &= ~DM_DATA_OUT_FLAG; 1788 1789 /* Ignores parameters */ 1790 if (cmd == DM_REMOVE_ALL_CMD || 1791 cmd == DM_LIST_DEVICES_CMD || 1792 cmd == DM_LIST_VERSIONS_CMD) 1793 return 0; 1794 1795 if (cmd == DM_DEV_CREATE_CMD) { 1796 if (!*param->name) { 1797 DMWARN("name not supplied when creating device"); 1798 return -EINVAL; 1799 } 1800 } else if (*param->uuid && *param->name) { 1801 DMWARN("only supply one of name or uuid, cmd(%u)", cmd); 1802 return -EINVAL; 1803 } 1804 1805 /* Ensure strings are terminated */ 1806 param->name[DM_NAME_LEN - 1] = '\0'; 1807 param->uuid[DM_UUID_LEN - 1] = '\0'; 1808 1809 return 0; 1810 } 1811 1812 static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *user) 1813 { 1814 int r = 0; 1815 int ioctl_flags; 1816 int param_flags; 1817 unsigned int cmd; 1818 struct dm_ioctl *uninitialized_var(param); 1819 ioctl_fn fn = NULL; 1820 size_t input_param_size; 1821 struct dm_ioctl param_kernel; 1822 1823 /* only root can play with this */ 1824 if (!capable(CAP_SYS_ADMIN)) 1825 return -EACCES; 1826 1827 if (_IOC_TYPE(command) != DM_IOCTL) 1828 return -ENOTTY; 1829 1830 cmd = _IOC_NR(command); 1831 1832 /* 1833 * Check the interface version passed in. This also 1834 * writes out the kernel's interface version. 1835 */ 1836 r = check_version(cmd, user); 1837 if (r) 1838 return r; 1839 1840 /* 1841 * Nothing more to do for the version command. 1842 */ 1843 if (cmd == DM_VERSION_CMD) 1844 return 0; 1845 1846 fn = lookup_ioctl(cmd, &ioctl_flags); 1847 if (!fn) { 1848 DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); 1849 return -ENOTTY; 1850 } 1851 1852 /* 1853 * Copy the parameters into kernel space. 1854 */ 1855 r = copy_params(user, ¶m_kernel, ioctl_flags, ¶m, ¶m_flags); 1856 1857 if (r) 1858 return r; 1859 1860 input_param_size = param->data_size; 1861 r = validate_params(cmd, param); 1862 if (r) 1863 goto out; 1864 1865 param->data_size = offsetof(struct dm_ioctl, data); 1866 r = fn(file, param, input_param_size); 1867 1868 if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) && 1869 unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS)) 1870 DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd); 1871 1872 /* 1873 * Copy the results back to userland. 1874 */ 1875 if (!r && copy_to_user(user, param, param->data_size)) 1876 r = -EFAULT; 1877 1878 out: 1879 free_params(param, input_param_size, param_flags); 1880 return r; 1881 } 1882 1883 static long dm_ctl_ioctl(struct file *file, uint command, ulong u) 1884 { 1885 return (long)ctl_ioctl(file, command, (struct dm_ioctl __user *)u); 1886 } 1887 1888 #ifdef CONFIG_COMPAT 1889 static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u) 1890 { 1891 return (long)dm_ctl_ioctl(file, command, (ulong) compat_ptr(u)); 1892 } 1893 #else 1894 #define dm_compat_ctl_ioctl NULL 1895 #endif 1896 1897 static int dm_open(struct inode *inode, struct file *filp) 1898 { 1899 int r; 1900 struct dm_file *priv; 1901 1902 r = nonseekable_open(inode, filp); 1903 if (unlikely(r)) 1904 return r; 1905 1906 priv = filp->private_data = kmalloc(sizeof(struct dm_file), GFP_KERNEL); 1907 if (!priv) 1908 return -ENOMEM; 1909 1910 priv->global_event_nr = atomic_read(&dm_global_event_nr); 1911 1912 return 0; 1913 } 1914 1915 static int dm_release(struct inode *inode, struct file *filp) 1916 { 1917 kfree(filp->private_data); 1918 return 0; 1919 } 1920 1921 static unsigned dm_poll(struct file *filp, poll_table *wait) 1922 { 1923 struct dm_file *priv = filp->private_data; 1924 unsigned mask = 0; 1925 1926 poll_wait(filp, &dm_global_eventq, wait); 1927 1928 if ((int)(atomic_read(&dm_global_event_nr) - priv->global_event_nr) > 0) 1929 mask |= POLLIN; 1930 1931 return mask; 1932 } 1933 1934 static const struct file_operations _ctl_fops = { 1935 .open = dm_open, 1936 .release = dm_release, 1937 .poll = dm_poll, 1938 .unlocked_ioctl = dm_ctl_ioctl, 1939 .compat_ioctl = dm_compat_ctl_ioctl, 1940 .owner = THIS_MODULE, 1941 .llseek = noop_llseek, 1942 }; 1943 1944 static struct miscdevice _dm_misc = { 1945 .minor = MAPPER_CTRL_MINOR, 1946 .name = DM_NAME, 1947 .nodename = DM_DIR "/" DM_CONTROL_NODE, 1948 .fops = &_ctl_fops 1949 }; 1950 1951 MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR); 1952 MODULE_ALIAS("devname:" DM_DIR "/" DM_CONTROL_NODE); 1953 1954 /* 1955 * Create misc character device and link to DM_DIR/control. 1956 */ 1957 int __init dm_interface_init(void) 1958 { 1959 int r; 1960 1961 r = dm_hash_init(); 1962 if (r) 1963 return r; 1964 1965 r = misc_register(&_dm_misc); 1966 if (r) { 1967 DMERR("misc_register failed for control device"); 1968 dm_hash_exit(); 1969 return r; 1970 } 1971 1972 DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR, 1973 DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA, 1974 DM_DRIVER_EMAIL); 1975 return 0; 1976 } 1977 1978 void dm_interface_exit(void) 1979 { 1980 misc_deregister(&_dm_misc); 1981 dm_hash_exit(); 1982 } 1983 1984 /** 1985 * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers 1986 * @md: Pointer to mapped_device 1987 * @name: Buffer (size DM_NAME_LEN) for name 1988 * @uuid: Buffer (size DM_UUID_LEN) for uuid or empty string if uuid not defined 1989 */ 1990 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) 1991 { 1992 int r = 0; 1993 struct hash_cell *hc; 1994 1995 if (!md) 1996 return -ENXIO; 1997 1998 mutex_lock(&dm_hash_cells_mutex); 1999 hc = dm_get_mdptr(md); 2000 if (!hc || hc->md != md) { 2001 r = -ENXIO; 2002 goto out; 2003 } 2004 2005 if (name) 2006 strcpy(name, hc->name); 2007 if (uuid) 2008 strcpy(uuid, hc->uuid ? : ""); 2009 2010 out: 2011 mutex_unlock(&dm_hash_cells_mutex); 2012 2013 return r; 2014 } 2015