1 /* 2 * Copyright (c) International Business Machines Corp., 2006 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 12 * the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * 18 * Author: Artem Bityutskiy (Битюцкий Артём) 19 */ 20 21 /* 22 * This file includes implementation of UBI character device operations. 23 * 24 * There are two kinds of character devices in UBI: UBI character devices and 25 * UBI volume character devices. UBI character devices allow users to 26 * manipulate whole volumes: create, remove, and re-size them. Volume character 27 * devices provide volume I/O capabilities. 28 * 29 * Major and minor numbers are assigned dynamically to both UBI and volume 30 * character devices. 31 * 32 * Well, there is the third kind of character devices - the UBI control 33 * character device, which allows to manipulate by UBI devices - create and 34 * delete them. In other words, it is used for attaching and detaching MTD 35 * devices. 36 */ 37 38 #include <linux/module.h> 39 #include <linux/stat.h> 40 #include <linux/ioctl.h> 41 #include <linux/capability.h> 42 #include <linux/uaccess.h> 43 #include <linux/compat.h> 44 #include <linux/math64.h> 45 #include <mtd/ubi-user.h> 46 #include "ubi.h" 47 48 /** 49 * get_exclusive - get exclusive access to an UBI volume. 50 * @desc: volume descriptor 51 * 52 * This function changes UBI volume open mode to "exclusive". Returns previous 53 * mode value (positive integer) in case of success and a negative error code 54 * in case of failure. 55 */ 56 static int get_exclusive(struct ubi_volume_desc *desc) 57 { 58 int users, err; 59 struct ubi_volume *vol = desc->vol; 60 61 spin_lock(&vol->ubi->volumes_lock); 62 users = vol->readers + vol->writers + vol->exclusive; 63 ubi_assert(users > 0); 64 if (users > 1) { 65 dbg_err("%d users for volume %d", users, vol->vol_id); 66 err = -EBUSY; 67 } else { 68 vol->readers = vol->writers = 0; 69 vol->exclusive = 1; 70 err = desc->mode; 71 desc->mode = UBI_EXCLUSIVE; 72 } 73 spin_unlock(&vol->ubi->volumes_lock); 74 75 return err; 76 } 77 78 /** 79 * revoke_exclusive - revoke exclusive mode. 80 * @desc: volume descriptor 81 * @mode: new mode to switch to 82 */ 83 static void revoke_exclusive(struct ubi_volume_desc *desc, int mode) 84 { 85 struct ubi_volume *vol = desc->vol; 86 87 spin_lock(&vol->ubi->volumes_lock); 88 ubi_assert(vol->readers == 0 && vol->writers == 0); 89 ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE); 90 vol->exclusive = 0; 91 if (mode == UBI_READONLY) 92 vol->readers = 1; 93 else if (mode == UBI_READWRITE) 94 vol->writers = 1; 95 else 96 vol->exclusive = 1; 97 spin_unlock(&vol->ubi->volumes_lock); 98 99 desc->mode = mode; 100 } 101 102 static int vol_cdev_open(struct inode *inode, struct file *file) 103 { 104 struct ubi_volume_desc *desc; 105 int vol_id = iminor(inode) - 1, mode, ubi_num; 106 107 ubi_num = ubi_major2num(imajor(inode)); 108 if (ubi_num < 0) 109 return ubi_num; 110 111 if (file->f_mode & FMODE_WRITE) 112 mode = UBI_READWRITE; 113 else 114 mode = UBI_READONLY; 115 116 dbg_gen("open device %d, volume %d, mode %d", 117 ubi_num, vol_id, mode); 118 119 desc = ubi_open_volume(ubi_num, vol_id, mode); 120 if (IS_ERR(desc)) 121 return PTR_ERR(desc); 122 123 file->private_data = desc; 124 return 0; 125 } 126 127 static int vol_cdev_release(struct inode *inode, struct file *file) 128 { 129 struct ubi_volume_desc *desc = file->private_data; 130 struct ubi_volume *vol = desc->vol; 131 132 dbg_gen("release device %d, volume %d, mode %d", 133 vol->ubi->ubi_num, vol->vol_id, desc->mode); 134 135 if (vol->updating) { 136 ubi_warn("update of volume %d not finished, volume is damaged", 137 vol->vol_id); 138 ubi_assert(!vol->changing_leb); 139 vol->updating = 0; 140 vfree(vol->upd_buf); 141 } else if (vol->changing_leb) { 142 dbg_gen("only %lld of %lld bytes received for atomic LEB change" 143 " for volume %d:%d, cancel", vol->upd_received, 144 vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id); 145 vol->changing_leb = 0; 146 vfree(vol->upd_buf); 147 } 148 149 ubi_close_volume(desc); 150 return 0; 151 } 152 153 static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin) 154 { 155 struct ubi_volume_desc *desc = file->private_data; 156 struct ubi_volume *vol = desc->vol; 157 loff_t new_offset; 158 159 if (vol->updating) { 160 /* Update is in progress, seeking is prohibited */ 161 dbg_err("updating"); 162 return -EBUSY; 163 } 164 165 switch (origin) { 166 case 0: /* SEEK_SET */ 167 new_offset = offset; 168 break; 169 case 1: /* SEEK_CUR */ 170 new_offset = file->f_pos + offset; 171 break; 172 case 2: /* SEEK_END */ 173 new_offset = vol->used_bytes + offset; 174 break; 175 default: 176 return -EINVAL; 177 } 178 179 if (new_offset < 0 || new_offset > vol->used_bytes) { 180 dbg_err("bad seek %lld", new_offset); 181 return -EINVAL; 182 } 183 184 dbg_gen("seek volume %d, offset %lld, origin %d, new offset %lld", 185 vol->vol_id, offset, origin, new_offset); 186 187 file->f_pos = new_offset; 188 return new_offset; 189 } 190 191 static int vol_cdev_fsync(struct file *file, struct dentry *dentry, 192 int datasync) 193 { 194 struct ubi_volume_desc *desc = file->private_data; 195 struct ubi_device *ubi = desc->vol->ubi; 196 197 return ubi_sync(ubi->ubi_num); 198 } 199 200 201 static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, 202 loff_t *offp) 203 { 204 struct ubi_volume_desc *desc = file->private_data; 205 struct ubi_volume *vol = desc->vol; 206 struct ubi_device *ubi = vol->ubi; 207 int err, lnum, off, len, tbuf_size; 208 size_t count_save = count; 209 void *tbuf; 210 211 dbg_gen("read %zd bytes from offset %lld of volume %d", 212 count, *offp, vol->vol_id); 213 214 if (vol->updating) { 215 dbg_err("updating"); 216 return -EBUSY; 217 } 218 if (vol->upd_marker) { 219 dbg_err("damaged volume, update marker is set"); 220 return -EBADF; 221 } 222 if (*offp == vol->used_bytes || count == 0) 223 return 0; 224 225 if (vol->corrupted) 226 dbg_gen("read from corrupted volume %d", vol->vol_id); 227 228 if (*offp + count > vol->used_bytes) 229 count_save = count = vol->used_bytes - *offp; 230 231 tbuf_size = vol->usable_leb_size; 232 if (count < tbuf_size) 233 tbuf_size = ALIGN(count, ubi->min_io_size); 234 tbuf = vmalloc(tbuf_size); 235 if (!tbuf) 236 return -ENOMEM; 237 238 len = count > tbuf_size ? tbuf_size : count; 239 lnum = div_u64_rem(*offp, vol->usable_leb_size, &off); 240 241 do { 242 cond_resched(); 243 244 if (off + len >= vol->usable_leb_size) 245 len = vol->usable_leb_size - off; 246 247 err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0); 248 if (err) 249 break; 250 251 off += len; 252 if (off == vol->usable_leb_size) { 253 lnum += 1; 254 off -= vol->usable_leb_size; 255 } 256 257 count -= len; 258 *offp += len; 259 260 err = copy_to_user(buf, tbuf, len); 261 if (err) { 262 err = -EFAULT; 263 break; 264 } 265 266 buf += len; 267 len = count > tbuf_size ? tbuf_size : count; 268 } while (count); 269 270 vfree(tbuf); 271 return err ? err : count_save - count; 272 } 273 274 /* 275 * This function allows to directly write to dynamic UBI volumes, without 276 * issuing the volume update operation. 277 */ 278 static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, 279 size_t count, loff_t *offp) 280 { 281 struct ubi_volume_desc *desc = file->private_data; 282 struct ubi_volume *vol = desc->vol; 283 struct ubi_device *ubi = vol->ubi; 284 int lnum, off, len, tbuf_size, err = 0; 285 size_t count_save = count; 286 char *tbuf; 287 288 if (!vol->direct_writes) 289 return -EPERM; 290 291 dbg_gen("requested: write %zd bytes to offset %lld of volume %u", 292 count, *offp, vol->vol_id); 293 294 if (vol->vol_type == UBI_STATIC_VOLUME) 295 return -EROFS; 296 297 lnum = div_u64_rem(*offp, vol->usable_leb_size, &off); 298 if (off & (ubi->min_io_size - 1)) { 299 dbg_err("unaligned position"); 300 return -EINVAL; 301 } 302 303 if (*offp + count > vol->used_bytes) 304 count_save = count = vol->used_bytes - *offp; 305 306 /* We can write only in fractions of the minimum I/O unit */ 307 if (count & (ubi->min_io_size - 1)) { 308 dbg_err("unaligned write length"); 309 return -EINVAL; 310 } 311 312 tbuf_size = vol->usable_leb_size; 313 if (count < tbuf_size) 314 tbuf_size = ALIGN(count, ubi->min_io_size); 315 tbuf = vmalloc(tbuf_size); 316 if (!tbuf) 317 return -ENOMEM; 318 319 len = count > tbuf_size ? tbuf_size : count; 320 321 while (count) { 322 cond_resched(); 323 324 if (off + len >= vol->usable_leb_size) 325 len = vol->usable_leb_size - off; 326 327 err = copy_from_user(tbuf, buf, len); 328 if (err) { 329 err = -EFAULT; 330 break; 331 } 332 333 err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len, 334 UBI_UNKNOWN); 335 if (err) 336 break; 337 338 off += len; 339 if (off == vol->usable_leb_size) { 340 lnum += 1; 341 off -= vol->usable_leb_size; 342 } 343 344 count -= len; 345 *offp += len; 346 buf += len; 347 len = count > tbuf_size ? tbuf_size : count; 348 } 349 350 vfree(tbuf); 351 return err ? err : count_save - count; 352 } 353 354 static ssize_t vol_cdev_write(struct file *file, const char __user *buf, 355 size_t count, loff_t *offp) 356 { 357 int err = 0; 358 struct ubi_volume_desc *desc = file->private_data; 359 struct ubi_volume *vol = desc->vol; 360 struct ubi_device *ubi = vol->ubi; 361 362 if (!vol->updating && !vol->changing_leb) 363 return vol_cdev_direct_write(file, buf, count, offp); 364 365 if (vol->updating) 366 err = ubi_more_update_data(ubi, vol, buf, count); 367 else 368 err = ubi_more_leb_change_data(ubi, vol, buf, count); 369 370 if (err < 0) { 371 ubi_err("cannot accept more %zd bytes of data, error %d", 372 count, err); 373 return err; 374 } 375 376 if (err) { 377 /* 378 * The operation is finished, @err contains number of actually 379 * written bytes. 380 */ 381 count = err; 382 383 if (vol->changing_leb) { 384 revoke_exclusive(desc, UBI_READWRITE); 385 return count; 386 } 387 388 err = ubi_check_volume(ubi, vol->vol_id); 389 if (err < 0) 390 return err; 391 392 if (err) { 393 ubi_warn("volume %d on UBI device %d is corrupted", 394 vol->vol_id, ubi->ubi_num); 395 vol->corrupted = 1; 396 } 397 vol->checked = 1; 398 ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED); 399 revoke_exclusive(desc, UBI_READWRITE); 400 } 401 402 return count; 403 } 404 405 static long vol_cdev_ioctl(struct file *file, unsigned int cmd, 406 unsigned long arg) 407 { 408 int err = 0; 409 struct ubi_volume_desc *desc = file->private_data; 410 struct ubi_volume *vol = desc->vol; 411 struct ubi_device *ubi = vol->ubi; 412 void __user *argp = (void __user *)arg; 413 414 switch (cmd) { 415 /* Volume update command */ 416 case UBI_IOCVOLUP: 417 { 418 int64_t bytes, rsvd_bytes; 419 420 if (!capable(CAP_SYS_RESOURCE)) { 421 err = -EPERM; 422 break; 423 } 424 425 err = copy_from_user(&bytes, argp, sizeof(int64_t)); 426 if (err) { 427 err = -EFAULT; 428 break; 429 } 430 431 if (desc->mode == UBI_READONLY) { 432 err = -EROFS; 433 break; 434 } 435 436 rsvd_bytes = (long long)vol->reserved_pebs * 437 ubi->leb_size-vol->data_pad; 438 if (bytes < 0 || bytes > rsvd_bytes) { 439 err = -EINVAL; 440 break; 441 } 442 443 err = get_exclusive(desc); 444 if (err < 0) 445 break; 446 447 err = ubi_start_update(ubi, vol, bytes); 448 if (bytes == 0) 449 revoke_exclusive(desc, UBI_READWRITE); 450 break; 451 } 452 453 /* Atomic logical eraseblock change command */ 454 case UBI_IOCEBCH: 455 { 456 struct ubi_leb_change_req req; 457 458 err = copy_from_user(&req, argp, 459 sizeof(struct ubi_leb_change_req)); 460 if (err) { 461 err = -EFAULT; 462 break; 463 } 464 465 if (desc->mode == UBI_READONLY || 466 vol->vol_type == UBI_STATIC_VOLUME) { 467 err = -EROFS; 468 break; 469 } 470 471 /* Validate the request */ 472 err = -EINVAL; 473 if (req.lnum < 0 || req.lnum >= vol->reserved_pebs || 474 req.bytes < 0 || req.lnum >= vol->usable_leb_size) 475 break; 476 if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM && 477 req.dtype != UBI_UNKNOWN) 478 break; 479 480 err = get_exclusive(desc); 481 if (err < 0) 482 break; 483 484 err = ubi_start_leb_change(ubi, vol, &req); 485 if (req.bytes == 0) 486 revoke_exclusive(desc, UBI_READWRITE); 487 break; 488 } 489 490 /* Logical eraseblock erasure command */ 491 case UBI_IOCEBER: 492 { 493 int32_t lnum; 494 495 err = get_user(lnum, (__user int32_t *)argp); 496 if (err) { 497 err = -EFAULT; 498 break; 499 } 500 501 if (desc->mode == UBI_READONLY || 502 vol->vol_type == UBI_STATIC_VOLUME) { 503 err = -EROFS; 504 break; 505 } 506 507 if (lnum < 0 || lnum >= vol->reserved_pebs) { 508 err = -EINVAL; 509 break; 510 } 511 512 dbg_gen("erase LEB %d:%d", vol->vol_id, lnum); 513 err = ubi_eba_unmap_leb(ubi, vol, lnum); 514 if (err) 515 break; 516 517 err = ubi_wl_flush(ubi); 518 break; 519 } 520 521 /* Logical eraseblock map command */ 522 case UBI_IOCEBMAP: 523 { 524 struct ubi_map_req req; 525 526 err = copy_from_user(&req, argp, sizeof(struct ubi_map_req)); 527 if (err) { 528 err = -EFAULT; 529 break; 530 } 531 err = ubi_leb_map(desc, req.lnum, req.dtype); 532 break; 533 } 534 535 /* Logical eraseblock un-map command */ 536 case UBI_IOCEBUNMAP: 537 { 538 int32_t lnum; 539 540 err = get_user(lnum, (__user int32_t *)argp); 541 if (err) { 542 err = -EFAULT; 543 break; 544 } 545 err = ubi_leb_unmap(desc, lnum); 546 break; 547 } 548 549 /* Check if logical eraseblock is mapped command */ 550 case UBI_IOCEBISMAP: 551 { 552 int32_t lnum; 553 554 err = get_user(lnum, (__user int32_t *)argp); 555 if (err) { 556 err = -EFAULT; 557 break; 558 } 559 err = ubi_is_mapped(desc, lnum); 560 break; 561 } 562 563 /* Set volume property command */ 564 case UBI_IOCSETPROP: 565 { 566 struct ubi_set_prop_req req; 567 568 err = copy_from_user(&req, argp, 569 sizeof(struct ubi_set_prop_req)); 570 if (err) { 571 err = -EFAULT; 572 break; 573 } 574 switch (req.property) { 575 case UBI_PROP_DIRECT_WRITE: 576 mutex_lock(&ubi->device_mutex); 577 desc->vol->direct_writes = !!req.value; 578 mutex_unlock(&ubi->device_mutex); 579 break; 580 default: 581 err = -EINVAL; 582 break; 583 } 584 break; 585 } 586 587 default: 588 err = -ENOTTY; 589 break; 590 } 591 return err; 592 } 593 594 /** 595 * verify_mkvol_req - verify volume creation request. 596 * @ubi: UBI device description object 597 * @req: the request to check 598 * 599 * This function zero if the request is correct, and %-EINVAL if not. 600 */ 601 static int verify_mkvol_req(const struct ubi_device *ubi, 602 const struct ubi_mkvol_req *req) 603 { 604 int n, err = -EINVAL; 605 606 if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 || 607 req->name_len < 0) 608 goto bad; 609 610 if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) && 611 req->vol_id != UBI_VOL_NUM_AUTO) 612 goto bad; 613 614 if (req->alignment == 0) 615 goto bad; 616 617 if (req->bytes == 0) 618 goto bad; 619 620 if (req->vol_type != UBI_DYNAMIC_VOLUME && 621 req->vol_type != UBI_STATIC_VOLUME) 622 goto bad; 623 624 if (req->alignment > ubi->leb_size) 625 goto bad; 626 627 n = req->alignment & (ubi->min_io_size - 1); 628 if (req->alignment != 1 && n) 629 goto bad; 630 631 if (req->name_len > UBI_VOL_NAME_MAX) { 632 err = -ENAMETOOLONG; 633 goto bad; 634 } 635 636 n = strnlen(req->name, req->name_len + 1); 637 if (n != req->name_len) 638 goto bad; 639 640 return 0; 641 642 bad: 643 dbg_err("bad volume creation request"); 644 ubi_dbg_dump_mkvol_req(req); 645 return err; 646 } 647 648 /** 649 * verify_rsvol_req - verify volume re-size request. 650 * @ubi: UBI device description object 651 * @req: the request to check 652 * 653 * This function returns zero if the request is correct, and %-EINVAL if not. 654 */ 655 static int verify_rsvol_req(const struct ubi_device *ubi, 656 const struct ubi_rsvol_req *req) 657 { 658 if (req->bytes <= 0) 659 return -EINVAL; 660 661 if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) 662 return -EINVAL; 663 664 return 0; 665 } 666 667 /** 668 * rename_volumes - rename UBI volumes. 669 * @ubi: UBI device description object 670 * @req: volumes re-name request 671 * 672 * This is a helper function for the volume re-name IOCTL which validates the 673 * the request, opens the volume and calls corresponding volumes management 674 * function. Returns zero in case of success and a negative error code in case 675 * of failure. 676 */ 677 static int rename_volumes(struct ubi_device *ubi, 678 struct ubi_rnvol_req *req) 679 { 680 int i, n, err; 681 struct list_head rename_list; 682 struct ubi_rename_entry *re, *re1; 683 684 if (req->count < 0 || req->count > UBI_MAX_RNVOL) 685 return -EINVAL; 686 687 if (req->count == 0) 688 return 0; 689 690 /* Validate volume IDs and names in the request */ 691 for (i = 0; i < req->count; i++) { 692 if (req->ents[i].vol_id < 0 || 693 req->ents[i].vol_id >= ubi->vtbl_slots) 694 return -EINVAL; 695 if (req->ents[i].name_len < 0) 696 return -EINVAL; 697 if (req->ents[i].name_len > UBI_VOL_NAME_MAX) 698 return -ENAMETOOLONG; 699 req->ents[i].name[req->ents[i].name_len] = '\0'; 700 n = strlen(req->ents[i].name); 701 if (n != req->ents[i].name_len) 702 err = -EINVAL; 703 } 704 705 /* Make sure volume IDs and names are unique */ 706 for (i = 0; i < req->count - 1; i++) { 707 for (n = i + 1; n < req->count; n++) { 708 if (req->ents[i].vol_id == req->ents[n].vol_id) { 709 dbg_err("duplicated volume id %d", 710 req->ents[i].vol_id); 711 return -EINVAL; 712 } 713 if (!strcmp(req->ents[i].name, req->ents[n].name)) { 714 dbg_err("duplicated volume name \"%s\"", 715 req->ents[i].name); 716 return -EINVAL; 717 } 718 } 719 } 720 721 /* Create the re-name list */ 722 INIT_LIST_HEAD(&rename_list); 723 for (i = 0; i < req->count; i++) { 724 int vol_id = req->ents[i].vol_id; 725 int name_len = req->ents[i].name_len; 726 const char *name = req->ents[i].name; 727 728 re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); 729 if (!re) { 730 err = -ENOMEM; 731 goto out_free; 732 } 733 734 re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE); 735 if (IS_ERR(re->desc)) { 736 err = PTR_ERR(re->desc); 737 dbg_err("cannot open volume %d, error %d", vol_id, err); 738 kfree(re); 739 goto out_free; 740 } 741 742 /* Skip this re-naming if the name does not really change */ 743 if (re->desc->vol->name_len == name_len && 744 !memcmp(re->desc->vol->name, name, name_len)) { 745 ubi_close_volume(re->desc); 746 kfree(re); 747 continue; 748 } 749 750 re->new_name_len = name_len; 751 memcpy(re->new_name, name, name_len); 752 list_add_tail(&re->list, &rename_list); 753 dbg_msg("will rename volume %d from \"%s\" to \"%s\"", 754 vol_id, re->desc->vol->name, name); 755 } 756 757 if (list_empty(&rename_list)) 758 return 0; 759 760 /* Find out the volumes which have to be removed */ 761 list_for_each_entry(re, &rename_list, list) { 762 struct ubi_volume_desc *desc; 763 int no_remove_needed = 0; 764 765 /* 766 * Volume @re->vol_id is going to be re-named to 767 * @re->new_name, while its current name is @name. If a volume 768 * with name @re->new_name currently exists, it has to be 769 * removed, unless it is also re-named in the request (@req). 770 */ 771 list_for_each_entry(re1, &rename_list, list) { 772 if (re->new_name_len == re1->desc->vol->name_len && 773 !memcmp(re->new_name, re1->desc->vol->name, 774 re1->desc->vol->name_len)) { 775 no_remove_needed = 1; 776 break; 777 } 778 } 779 780 if (no_remove_needed) 781 continue; 782 783 /* 784 * It seems we need to remove volume with name @re->new_name, 785 * if it exists. 786 */ 787 desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, 788 UBI_EXCLUSIVE); 789 if (IS_ERR(desc)) { 790 err = PTR_ERR(desc); 791 if (err == -ENODEV) 792 /* Re-naming into a non-existing volume name */ 793 continue; 794 795 /* The volume exists but busy, or an error occurred */ 796 dbg_err("cannot open volume \"%s\", error %d", 797 re->new_name, err); 798 goto out_free; 799 } 800 801 re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); 802 if (!re) { 803 err = -ENOMEM; 804 ubi_close_volume(desc); 805 goto out_free; 806 } 807 808 re->remove = 1; 809 re->desc = desc; 810 list_add(&re->list, &rename_list); 811 dbg_msg("will remove volume %d, name \"%s\"", 812 re->desc->vol->vol_id, re->desc->vol->name); 813 } 814 815 mutex_lock(&ubi->device_mutex); 816 err = ubi_rename_volumes(ubi, &rename_list); 817 mutex_unlock(&ubi->device_mutex); 818 819 out_free: 820 list_for_each_entry_safe(re, re1, &rename_list, list) { 821 ubi_close_volume(re->desc); 822 list_del(&re->list); 823 kfree(re); 824 } 825 return err; 826 } 827 828 static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, 829 unsigned long arg) 830 { 831 int err = 0; 832 struct ubi_device *ubi; 833 struct ubi_volume_desc *desc; 834 void __user *argp = (void __user *)arg; 835 836 if (!capable(CAP_SYS_RESOURCE)) 837 return -EPERM; 838 839 ubi = ubi_get_by_major(imajor(file->f_mapping->host)); 840 if (!ubi) 841 return -ENODEV; 842 843 switch (cmd) { 844 /* Create volume command */ 845 case UBI_IOCMKVOL: 846 { 847 struct ubi_mkvol_req req; 848 849 dbg_gen("create volume"); 850 err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req)); 851 if (err) { 852 err = -EFAULT; 853 break; 854 } 855 856 req.name[req.name_len] = '\0'; 857 err = verify_mkvol_req(ubi, &req); 858 if (err) 859 break; 860 861 mutex_lock(&ubi->device_mutex); 862 err = ubi_create_volume(ubi, &req); 863 mutex_unlock(&ubi->device_mutex); 864 if (err) 865 break; 866 867 err = put_user(req.vol_id, (__user int32_t *)argp); 868 if (err) 869 err = -EFAULT; 870 871 break; 872 } 873 874 /* Remove volume command */ 875 case UBI_IOCRMVOL: 876 { 877 int vol_id; 878 879 dbg_gen("remove volume"); 880 err = get_user(vol_id, (__user int32_t *)argp); 881 if (err) { 882 err = -EFAULT; 883 break; 884 } 885 886 desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE); 887 if (IS_ERR(desc)) { 888 err = PTR_ERR(desc); 889 break; 890 } 891 892 mutex_lock(&ubi->device_mutex); 893 err = ubi_remove_volume(desc, 0); 894 mutex_unlock(&ubi->device_mutex); 895 896 /* 897 * The volume is deleted (unless an error occurred), and the 898 * 'struct ubi_volume' object will be freed when 899 * 'ubi_close_volume()' will call 'put_device()'. 900 */ 901 ubi_close_volume(desc); 902 break; 903 } 904 905 /* Re-size volume command */ 906 case UBI_IOCRSVOL: 907 { 908 int pebs; 909 struct ubi_rsvol_req req; 910 911 dbg_gen("re-size volume"); 912 err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req)); 913 if (err) { 914 err = -EFAULT; 915 break; 916 } 917 918 err = verify_rsvol_req(ubi, &req); 919 if (err) 920 break; 921 922 desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE); 923 if (IS_ERR(desc)) { 924 err = PTR_ERR(desc); 925 break; 926 } 927 928 pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1, 929 desc->vol->usable_leb_size); 930 931 mutex_lock(&ubi->device_mutex); 932 err = ubi_resize_volume(desc, pebs); 933 mutex_unlock(&ubi->device_mutex); 934 ubi_close_volume(desc); 935 break; 936 } 937 938 /* Re-name volumes command */ 939 case UBI_IOCRNVOL: 940 { 941 struct ubi_rnvol_req *req; 942 943 dbg_msg("re-name volumes"); 944 req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL); 945 if (!req) { 946 err = -ENOMEM; 947 break; 948 }; 949 950 err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req)); 951 if (err) { 952 err = -EFAULT; 953 kfree(req); 954 break; 955 } 956 957 err = rename_volumes(ubi, req); 958 kfree(req); 959 break; 960 } 961 962 default: 963 err = -ENOTTY; 964 break; 965 } 966 967 ubi_put_device(ubi); 968 return err; 969 } 970 971 static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd, 972 unsigned long arg) 973 { 974 int err = 0; 975 void __user *argp = (void __user *)arg; 976 977 if (!capable(CAP_SYS_RESOURCE)) 978 return -EPERM; 979 980 switch (cmd) { 981 /* Attach an MTD device command */ 982 case UBI_IOCATT: 983 { 984 struct ubi_attach_req req; 985 struct mtd_info *mtd; 986 987 dbg_gen("attach MTD device"); 988 err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req)); 989 if (err) { 990 err = -EFAULT; 991 break; 992 } 993 994 if (req.mtd_num < 0 || 995 (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) { 996 err = -EINVAL; 997 break; 998 } 999 1000 mtd = get_mtd_device(NULL, req.mtd_num); 1001 if (IS_ERR(mtd)) { 1002 err = PTR_ERR(mtd); 1003 break; 1004 } 1005 1006 /* 1007 * Note, further request verification is done by 1008 * 'ubi_attach_mtd_dev()'. 1009 */ 1010 mutex_lock(&ubi_devices_mutex); 1011 err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset); 1012 mutex_unlock(&ubi_devices_mutex); 1013 if (err < 0) 1014 put_mtd_device(mtd); 1015 else 1016 /* @err contains UBI device number */ 1017 err = put_user(err, (__user int32_t *)argp); 1018 1019 break; 1020 } 1021 1022 /* Detach an MTD device command */ 1023 case UBI_IOCDET: 1024 { 1025 int ubi_num; 1026 1027 dbg_gen("dettach MTD device"); 1028 err = get_user(ubi_num, (__user int32_t *)argp); 1029 if (err) { 1030 err = -EFAULT; 1031 break; 1032 } 1033 1034 mutex_lock(&ubi_devices_mutex); 1035 err = ubi_detach_mtd_dev(ubi_num, 0); 1036 mutex_unlock(&ubi_devices_mutex); 1037 break; 1038 } 1039 1040 default: 1041 err = -ENOTTY; 1042 break; 1043 } 1044 1045 return err; 1046 } 1047 1048 #ifdef CONFIG_COMPAT 1049 static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd, 1050 unsigned long arg) 1051 { 1052 unsigned long translated_arg = (unsigned long)compat_ptr(arg); 1053 1054 return vol_cdev_ioctl(file, cmd, translated_arg); 1055 } 1056 1057 static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd, 1058 unsigned long arg) 1059 { 1060 unsigned long translated_arg = (unsigned long)compat_ptr(arg); 1061 1062 return ubi_cdev_ioctl(file, cmd, translated_arg); 1063 } 1064 1065 static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd, 1066 unsigned long arg) 1067 { 1068 unsigned long translated_arg = (unsigned long)compat_ptr(arg); 1069 1070 return ctrl_cdev_ioctl(file, cmd, translated_arg); 1071 } 1072 #else 1073 #define vol_cdev_compat_ioctl NULL 1074 #define ubi_cdev_compat_ioctl NULL 1075 #define ctrl_cdev_compat_ioctl NULL 1076 #endif 1077 1078 /* UBI volume character device operations */ 1079 const struct file_operations ubi_vol_cdev_operations = { 1080 .owner = THIS_MODULE, 1081 .open = vol_cdev_open, 1082 .release = vol_cdev_release, 1083 .llseek = vol_cdev_llseek, 1084 .read = vol_cdev_read, 1085 .write = vol_cdev_write, 1086 .fsync = vol_cdev_fsync, 1087 .unlocked_ioctl = vol_cdev_ioctl, 1088 .compat_ioctl = vol_cdev_compat_ioctl, 1089 }; 1090 1091 /* UBI character device operations */ 1092 const struct file_operations ubi_cdev_operations = { 1093 .owner = THIS_MODULE, 1094 .llseek = no_llseek, 1095 .unlocked_ioctl = ubi_cdev_ioctl, 1096 .compat_ioctl = ubi_cdev_compat_ioctl, 1097 }; 1098 1099 /* UBI control character device operations */ 1100 const struct file_operations ubi_ctrl_cdev_operations = { 1101 .owner = THIS_MODULE, 1102 .unlocked_ioctl = ctrl_cdev_ioctl, 1103 .compat_ioctl = ctrl_cdev_compat_ioctl, 1104 }; 1105