1 /* 2 * Copyright (c) International Business Machines Corp., 2006 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 12 * the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * 18 * Author: Artem Bityutskiy (Битюцкий Артём) 19 */ 20 21 /* 22 * This file includes implementation of UBI character device operations. 23 * 24 * There are two kinds of character devices in UBI: UBI character devices and 25 * UBI volume character devices. UBI character devices allow users to 26 * manipulate whole volumes: create, remove, and re-size them. Volume character 27 * devices provide volume I/O capabilities. 28 * 29 * Major and minor numbers are assigned dynamically to both UBI and volume 30 * character devices. 31 * 32 * Well, there is the third kind of character devices - the UBI control 33 * character device, which allows to manipulate by UBI devices - create and 34 * delete them. In other words, it is used for attaching and detaching MTD 35 * devices. 36 */ 37 38 #include <linux/module.h> 39 #include <linux/stat.h> 40 #include <linux/ioctl.h> 41 #include <linux/capability.h> 42 #include <linux/uaccess.h> 43 #include <linux/compat.h> 44 #include <linux/math64.h> 45 #include <mtd/ubi-user.h> 46 #include "ubi.h" 47 48 /** 49 * get_exclusive - get exclusive access to an UBI volume. 50 * @desc: volume descriptor 51 * 52 * This function changes UBI volume open mode to "exclusive". Returns previous 53 * mode value (positive integer) in case of success and a negative error code 54 * in case of failure. 55 */ 56 static int get_exclusive(struct ubi_volume_desc *desc) 57 { 58 int users, err; 59 struct ubi_volume *vol = desc->vol; 60 61 spin_lock(&vol->ubi->volumes_lock); 62 users = vol->readers + vol->writers + vol->exclusive; 63 ubi_assert(users > 0); 64 if (users > 1) { 65 dbg_err("%d users for volume %d", users, vol->vol_id); 66 err = -EBUSY; 67 } else { 68 vol->readers = vol->writers = 0; 69 vol->exclusive = 1; 70 err = desc->mode; 71 desc->mode = UBI_EXCLUSIVE; 72 } 73 spin_unlock(&vol->ubi->volumes_lock); 74 75 return err; 76 } 77 78 /** 79 * revoke_exclusive - revoke exclusive mode. 80 * @desc: volume descriptor 81 * @mode: new mode to switch to 82 */ 83 static void revoke_exclusive(struct ubi_volume_desc *desc, int mode) 84 { 85 struct ubi_volume *vol = desc->vol; 86 87 spin_lock(&vol->ubi->volumes_lock); 88 ubi_assert(vol->readers == 0 && vol->writers == 0); 89 ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE); 90 vol->exclusive = 0; 91 if (mode == UBI_READONLY) 92 vol->readers = 1; 93 else if (mode == UBI_READWRITE) 94 vol->writers = 1; 95 else 96 vol->exclusive = 1; 97 spin_unlock(&vol->ubi->volumes_lock); 98 99 desc->mode = mode; 100 } 101 102 static int vol_cdev_open(struct inode *inode, struct file *file) 103 { 104 struct ubi_volume_desc *desc; 105 int vol_id = iminor(inode) - 1, mode, ubi_num; 106 107 ubi_num = ubi_major2num(imajor(inode)); 108 if (ubi_num < 0) 109 return ubi_num; 110 111 if (file->f_mode & FMODE_WRITE) 112 mode = UBI_READWRITE; 113 else 114 mode = UBI_READONLY; 115 116 dbg_gen("open volume %d, mode %d", vol_id, mode); 117 118 desc = ubi_open_volume(ubi_num, vol_id, mode); 119 if (IS_ERR(desc)) 120 return PTR_ERR(desc); 121 122 file->private_data = desc; 123 return 0; 124 } 125 126 static int vol_cdev_release(struct inode *inode, struct file *file) 127 { 128 struct ubi_volume_desc *desc = file->private_data; 129 struct ubi_volume *vol = desc->vol; 130 131 dbg_gen("release volume %d, mode %d", vol->vol_id, desc->mode); 132 133 if (vol->updating) { 134 ubi_warn("update of volume %d not finished, volume is damaged", 135 vol->vol_id); 136 ubi_assert(!vol->changing_leb); 137 vol->updating = 0; 138 vfree(vol->upd_buf); 139 } else if (vol->changing_leb) { 140 dbg_gen("only %lld of %lld bytes received for atomic LEB change" 141 " for volume %d:%d, cancel", vol->upd_received, 142 vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id); 143 vol->changing_leb = 0; 144 vfree(vol->upd_buf); 145 } 146 147 ubi_close_volume(desc); 148 return 0; 149 } 150 151 static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin) 152 { 153 struct ubi_volume_desc *desc = file->private_data; 154 struct ubi_volume *vol = desc->vol; 155 loff_t new_offset; 156 157 if (vol->updating) { 158 /* Update is in progress, seeking is prohibited */ 159 dbg_err("updating"); 160 return -EBUSY; 161 } 162 163 switch (origin) { 164 case 0: /* SEEK_SET */ 165 new_offset = offset; 166 break; 167 case 1: /* SEEK_CUR */ 168 new_offset = file->f_pos + offset; 169 break; 170 case 2: /* SEEK_END */ 171 new_offset = vol->used_bytes + offset; 172 break; 173 default: 174 return -EINVAL; 175 } 176 177 if (new_offset < 0 || new_offset > vol->used_bytes) { 178 dbg_err("bad seek %lld", new_offset); 179 return -EINVAL; 180 } 181 182 dbg_gen("seek volume %d, offset %lld, origin %d, new offset %lld", 183 vol->vol_id, offset, origin, new_offset); 184 185 file->f_pos = new_offset; 186 return new_offset; 187 } 188 189 static int vol_cdev_fsync(struct file *file, struct dentry *dentry, 190 int datasync) 191 { 192 struct ubi_volume_desc *desc = file->private_data; 193 struct ubi_device *ubi = desc->vol->ubi; 194 195 return ubi_sync(ubi->ubi_num); 196 } 197 198 199 static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, 200 loff_t *offp) 201 { 202 struct ubi_volume_desc *desc = file->private_data; 203 struct ubi_volume *vol = desc->vol; 204 struct ubi_device *ubi = vol->ubi; 205 int err, lnum, off, len, tbuf_size; 206 size_t count_save = count; 207 void *tbuf; 208 209 dbg_gen("read %zd bytes from offset %lld of volume %d", 210 count, *offp, vol->vol_id); 211 212 if (vol->updating) { 213 dbg_err("updating"); 214 return -EBUSY; 215 } 216 if (vol->upd_marker) { 217 dbg_err("damaged volume, update marker is set"); 218 return -EBADF; 219 } 220 if (*offp == vol->used_bytes || count == 0) 221 return 0; 222 223 if (vol->corrupted) 224 dbg_gen("read from corrupted volume %d", vol->vol_id); 225 226 if (*offp + count > vol->used_bytes) 227 count_save = count = vol->used_bytes - *offp; 228 229 tbuf_size = vol->usable_leb_size; 230 if (count < tbuf_size) 231 tbuf_size = ALIGN(count, ubi->min_io_size); 232 tbuf = vmalloc(tbuf_size); 233 if (!tbuf) 234 return -ENOMEM; 235 236 len = count > tbuf_size ? tbuf_size : count; 237 lnum = div_u64_rem(*offp, vol->usable_leb_size, &off); 238 239 do { 240 cond_resched(); 241 242 if (off + len >= vol->usable_leb_size) 243 len = vol->usable_leb_size - off; 244 245 err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0); 246 if (err) 247 break; 248 249 off += len; 250 if (off == vol->usable_leb_size) { 251 lnum += 1; 252 off -= vol->usable_leb_size; 253 } 254 255 count -= len; 256 *offp += len; 257 258 err = copy_to_user(buf, tbuf, len); 259 if (err) { 260 err = -EFAULT; 261 break; 262 } 263 264 buf += len; 265 len = count > tbuf_size ? tbuf_size : count; 266 } while (count); 267 268 vfree(tbuf); 269 return err ? err : count_save - count; 270 } 271 272 /* 273 * This function allows to directly write to dynamic UBI volumes, without 274 * issuing the volume update operation. 275 */ 276 static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, 277 size_t count, loff_t *offp) 278 { 279 struct ubi_volume_desc *desc = file->private_data; 280 struct ubi_volume *vol = desc->vol; 281 struct ubi_device *ubi = vol->ubi; 282 int lnum, off, len, tbuf_size, err = 0; 283 size_t count_save = count; 284 char *tbuf; 285 286 if (!vol->direct_writes) 287 return -EPERM; 288 289 dbg_gen("requested: write %zd bytes to offset %lld of volume %u", 290 count, *offp, vol->vol_id); 291 292 if (vol->vol_type == UBI_STATIC_VOLUME) 293 return -EROFS; 294 295 lnum = div_u64_rem(*offp, vol->usable_leb_size, &off); 296 if (off & (ubi->min_io_size - 1)) { 297 dbg_err("unaligned position"); 298 return -EINVAL; 299 } 300 301 if (*offp + count > vol->used_bytes) 302 count_save = count = vol->used_bytes - *offp; 303 304 /* We can write only in fractions of the minimum I/O unit */ 305 if (count & (ubi->min_io_size - 1)) { 306 dbg_err("unaligned write length"); 307 return -EINVAL; 308 } 309 310 tbuf_size = vol->usable_leb_size; 311 if (count < tbuf_size) 312 tbuf_size = ALIGN(count, ubi->min_io_size); 313 tbuf = vmalloc(tbuf_size); 314 if (!tbuf) 315 return -ENOMEM; 316 317 len = count > tbuf_size ? tbuf_size : count; 318 319 while (count) { 320 cond_resched(); 321 322 if (off + len >= vol->usable_leb_size) 323 len = vol->usable_leb_size - off; 324 325 err = copy_from_user(tbuf, buf, len); 326 if (err) { 327 err = -EFAULT; 328 break; 329 } 330 331 err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len, 332 UBI_UNKNOWN); 333 if (err) 334 break; 335 336 off += len; 337 if (off == vol->usable_leb_size) { 338 lnum += 1; 339 off -= vol->usable_leb_size; 340 } 341 342 count -= len; 343 *offp += len; 344 buf += len; 345 len = count > tbuf_size ? tbuf_size : count; 346 } 347 348 vfree(tbuf); 349 return err ? err : count_save - count; 350 } 351 352 static ssize_t vol_cdev_write(struct file *file, const char __user *buf, 353 size_t count, loff_t *offp) 354 { 355 int err = 0; 356 struct ubi_volume_desc *desc = file->private_data; 357 struct ubi_volume *vol = desc->vol; 358 struct ubi_device *ubi = vol->ubi; 359 360 if (!vol->updating && !vol->changing_leb) 361 return vol_cdev_direct_write(file, buf, count, offp); 362 363 if (vol->updating) 364 err = ubi_more_update_data(ubi, vol, buf, count); 365 else 366 err = ubi_more_leb_change_data(ubi, vol, buf, count); 367 368 if (err < 0) { 369 ubi_err("cannot accept more %zd bytes of data, error %d", 370 count, err); 371 return err; 372 } 373 374 if (err) { 375 /* 376 * The operation is finished, @err contains number of actually 377 * written bytes. 378 */ 379 count = err; 380 381 if (vol->changing_leb) { 382 revoke_exclusive(desc, UBI_READWRITE); 383 return count; 384 } 385 386 err = ubi_check_volume(ubi, vol->vol_id); 387 if (err < 0) 388 return err; 389 390 if (err) { 391 ubi_warn("volume %d on UBI device %d is corrupted", 392 vol->vol_id, ubi->ubi_num); 393 vol->corrupted = 1; 394 } 395 vol->checked = 1; 396 ubi_gluebi_updated(vol); 397 revoke_exclusive(desc, UBI_READWRITE); 398 } 399 400 return count; 401 } 402 403 static long vol_cdev_ioctl(struct file *file, unsigned int cmd, 404 unsigned long arg) 405 { 406 int err = 0; 407 struct ubi_volume_desc *desc = file->private_data; 408 struct ubi_volume *vol = desc->vol; 409 struct ubi_device *ubi = vol->ubi; 410 void __user *argp = (void __user *)arg; 411 412 switch (cmd) { 413 /* Volume update command */ 414 case UBI_IOCVOLUP: 415 { 416 int64_t bytes, rsvd_bytes; 417 418 if (!capable(CAP_SYS_RESOURCE)) { 419 err = -EPERM; 420 break; 421 } 422 423 err = copy_from_user(&bytes, argp, sizeof(int64_t)); 424 if (err) { 425 err = -EFAULT; 426 break; 427 } 428 429 if (desc->mode == UBI_READONLY) { 430 err = -EROFS; 431 break; 432 } 433 434 rsvd_bytes = (long long)vol->reserved_pebs * 435 ubi->leb_size-vol->data_pad; 436 if (bytes < 0 || bytes > rsvd_bytes) { 437 err = -EINVAL; 438 break; 439 } 440 441 err = get_exclusive(desc); 442 if (err < 0) 443 break; 444 445 err = ubi_start_update(ubi, vol, bytes); 446 if (bytes == 0) 447 revoke_exclusive(desc, UBI_READWRITE); 448 break; 449 } 450 451 /* Atomic logical eraseblock change command */ 452 case UBI_IOCEBCH: 453 { 454 struct ubi_leb_change_req req; 455 456 err = copy_from_user(&req, argp, 457 sizeof(struct ubi_leb_change_req)); 458 if (err) { 459 err = -EFAULT; 460 break; 461 } 462 463 if (desc->mode == UBI_READONLY || 464 vol->vol_type == UBI_STATIC_VOLUME) { 465 err = -EROFS; 466 break; 467 } 468 469 /* Validate the request */ 470 err = -EINVAL; 471 if (req.lnum < 0 || req.lnum >= vol->reserved_pebs || 472 req.bytes < 0 || req.lnum >= vol->usable_leb_size) 473 break; 474 if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM && 475 req.dtype != UBI_UNKNOWN) 476 break; 477 478 err = get_exclusive(desc); 479 if (err < 0) 480 break; 481 482 err = ubi_start_leb_change(ubi, vol, &req); 483 if (req.bytes == 0) 484 revoke_exclusive(desc, UBI_READWRITE); 485 break; 486 } 487 488 /* Logical eraseblock erasure command */ 489 case UBI_IOCEBER: 490 { 491 int32_t lnum; 492 493 err = get_user(lnum, (__user int32_t *)argp); 494 if (err) { 495 err = -EFAULT; 496 break; 497 } 498 499 if (desc->mode == UBI_READONLY || 500 vol->vol_type == UBI_STATIC_VOLUME) { 501 err = -EROFS; 502 break; 503 } 504 505 if (lnum < 0 || lnum >= vol->reserved_pebs) { 506 err = -EINVAL; 507 break; 508 } 509 510 dbg_gen("erase LEB %d:%d", vol->vol_id, lnum); 511 err = ubi_eba_unmap_leb(ubi, vol, lnum); 512 if (err) 513 break; 514 515 err = ubi_wl_flush(ubi); 516 break; 517 } 518 519 /* Logical eraseblock map command */ 520 case UBI_IOCEBMAP: 521 { 522 struct ubi_map_req req; 523 524 err = copy_from_user(&req, argp, sizeof(struct ubi_map_req)); 525 if (err) { 526 err = -EFAULT; 527 break; 528 } 529 err = ubi_leb_map(desc, req.lnum, req.dtype); 530 break; 531 } 532 533 /* Logical eraseblock un-map command */ 534 case UBI_IOCEBUNMAP: 535 { 536 int32_t lnum; 537 538 err = get_user(lnum, (__user int32_t *)argp); 539 if (err) { 540 err = -EFAULT; 541 break; 542 } 543 err = ubi_leb_unmap(desc, lnum); 544 break; 545 } 546 547 /* Check if logical eraseblock is mapped command */ 548 case UBI_IOCEBISMAP: 549 { 550 int32_t lnum; 551 552 err = get_user(lnum, (__user int32_t *)argp); 553 if (err) { 554 err = -EFAULT; 555 break; 556 } 557 err = ubi_is_mapped(desc, lnum); 558 break; 559 } 560 561 /* Set volume property command*/ 562 case UBI_IOCSETPROP: 563 { 564 struct ubi_set_prop_req req; 565 566 err = copy_from_user(&req, argp, 567 sizeof(struct ubi_set_prop_req)); 568 if (err) { 569 err = -EFAULT; 570 break; 571 } 572 switch (req.property) { 573 case UBI_PROP_DIRECT_WRITE: 574 mutex_lock(&ubi->volumes_mutex); 575 desc->vol->direct_writes = !!req.value; 576 mutex_unlock(&ubi->volumes_mutex); 577 break; 578 default: 579 err = -EINVAL; 580 break; 581 } 582 break; 583 } 584 585 default: 586 err = -ENOTTY; 587 break; 588 } 589 return err; 590 } 591 592 /** 593 * verify_mkvol_req - verify volume creation request. 594 * @ubi: UBI device description object 595 * @req: the request to check 596 * 597 * This function zero if the request is correct, and %-EINVAL if not. 598 */ 599 static int verify_mkvol_req(const struct ubi_device *ubi, 600 const struct ubi_mkvol_req *req) 601 { 602 int n, err = -EINVAL; 603 604 if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 || 605 req->name_len < 0) 606 goto bad; 607 608 if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) && 609 req->vol_id != UBI_VOL_NUM_AUTO) 610 goto bad; 611 612 if (req->alignment == 0) 613 goto bad; 614 615 if (req->bytes == 0) 616 goto bad; 617 618 if (req->vol_type != UBI_DYNAMIC_VOLUME && 619 req->vol_type != UBI_STATIC_VOLUME) 620 goto bad; 621 622 if (req->alignment > ubi->leb_size) 623 goto bad; 624 625 n = req->alignment & (ubi->min_io_size - 1); 626 if (req->alignment != 1 && n) 627 goto bad; 628 629 if (req->name_len > UBI_VOL_NAME_MAX) { 630 err = -ENAMETOOLONG; 631 goto bad; 632 } 633 634 n = strnlen(req->name, req->name_len + 1); 635 if (n != req->name_len) 636 goto bad; 637 638 return 0; 639 640 bad: 641 dbg_err("bad volume creation request"); 642 ubi_dbg_dump_mkvol_req(req); 643 return err; 644 } 645 646 /** 647 * verify_rsvol_req - verify volume re-size request. 648 * @ubi: UBI device description object 649 * @req: the request to check 650 * 651 * This function returns zero if the request is correct, and %-EINVAL if not. 652 */ 653 static int verify_rsvol_req(const struct ubi_device *ubi, 654 const struct ubi_rsvol_req *req) 655 { 656 if (req->bytes <= 0) 657 return -EINVAL; 658 659 if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) 660 return -EINVAL; 661 662 return 0; 663 } 664 665 /** 666 * rename_volumes - rename UBI volumes. 667 * @ubi: UBI device description object 668 * @req: volumes re-name request 669 * 670 * This is a helper function for the volume re-name IOCTL which validates the 671 * the request, opens the volume and calls corresponding volumes management 672 * function. Returns zero in case of success and a negative error code in case 673 * of failure. 674 */ 675 static int rename_volumes(struct ubi_device *ubi, 676 struct ubi_rnvol_req *req) 677 { 678 int i, n, err; 679 struct list_head rename_list; 680 struct ubi_rename_entry *re, *re1; 681 682 if (req->count < 0 || req->count > UBI_MAX_RNVOL) 683 return -EINVAL; 684 685 if (req->count == 0) 686 return 0; 687 688 /* Validate volume IDs and names in the request */ 689 for (i = 0; i < req->count; i++) { 690 if (req->ents[i].vol_id < 0 || 691 req->ents[i].vol_id >= ubi->vtbl_slots) 692 return -EINVAL; 693 if (req->ents[i].name_len < 0) 694 return -EINVAL; 695 if (req->ents[i].name_len > UBI_VOL_NAME_MAX) 696 return -ENAMETOOLONG; 697 req->ents[i].name[req->ents[i].name_len] = '\0'; 698 n = strlen(req->ents[i].name); 699 if (n != req->ents[i].name_len) 700 err = -EINVAL; 701 } 702 703 /* Make sure volume IDs and names are unique */ 704 for (i = 0; i < req->count - 1; i++) { 705 for (n = i + 1; n < req->count; n++) { 706 if (req->ents[i].vol_id == req->ents[n].vol_id) { 707 dbg_err("duplicated volume id %d", 708 req->ents[i].vol_id); 709 return -EINVAL; 710 } 711 if (!strcmp(req->ents[i].name, req->ents[n].name)) { 712 dbg_err("duplicated volume name \"%s\"", 713 req->ents[i].name); 714 return -EINVAL; 715 } 716 } 717 } 718 719 /* Create the re-name list */ 720 INIT_LIST_HEAD(&rename_list); 721 for (i = 0; i < req->count; i++) { 722 int vol_id = req->ents[i].vol_id; 723 int name_len = req->ents[i].name_len; 724 const char *name = req->ents[i].name; 725 726 re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); 727 if (!re) { 728 err = -ENOMEM; 729 goto out_free; 730 } 731 732 re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE); 733 if (IS_ERR(re->desc)) { 734 err = PTR_ERR(re->desc); 735 dbg_err("cannot open volume %d, error %d", vol_id, err); 736 kfree(re); 737 goto out_free; 738 } 739 740 /* Skip this re-naming if the name does not really change */ 741 if (re->desc->vol->name_len == name_len && 742 !memcmp(re->desc->vol->name, name, name_len)) { 743 ubi_close_volume(re->desc); 744 kfree(re); 745 continue; 746 } 747 748 re->new_name_len = name_len; 749 memcpy(re->new_name, name, name_len); 750 list_add_tail(&re->list, &rename_list); 751 dbg_msg("will rename volume %d from \"%s\" to \"%s\"", 752 vol_id, re->desc->vol->name, name); 753 } 754 755 if (list_empty(&rename_list)) 756 return 0; 757 758 /* Find out the volumes which have to be removed */ 759 list_for_each_entry(re, &rename_list, list) { 760 struct ubi_volume_desc *desc; 761 int no_remove_needed = 0; 762 763 /* 764 * Volume @re->vol_id is going to be re-named to 765 * @re->new_name, while its current name is @name. If a volume 766 * with name @re->new_name currently exists, it has to be 767 * removed, unless it is also re-named in the request (@req). 768 */ 769 list_for_each_entry(re1, &rename_list, list) { 770 if (re->new_name_len == re1->desc->vol->name_len && 771 !memcmp(re->new_name, re1->desc->vol->name, 772 re1->desc->vol->name_len)) { 773 no_remove_needed = 1; 774 break; 775 } 776 } 777 778 if (no_remove_needed) 779 continue; 780 781 /* 782 * It seems we need to remove volume with name @re->new_name, 783 * if it exists. 784 */ 785 desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, 786 UBI_EXCLUSIVE); 787 if (IS_ERR(desc)) { 788 err = PTR_ERR(desc); 789 if (err == -ENODEV) 790 /* Re-naming into a non-existing volume name */ 791 continue; 792 793 /* The volume exists but busy, or an error occurred */ 794 dbg_err("cannot open volume \"%s\", error %d", 795 re->new_name, err); 796 goto out_free; 797 } 798 799 re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); 800 if (!re) { 801 err = -ENOMEM; 802 ubi_close_volume(desc); 803 goto out_free; 804 } 805 806 re->remove = 1; 807 re->desc = desc; 808 list_add(&re->list, &rename_list); 809 dbg_msg("will remove volume %d, name \"%s\"", 810 re->desc->vol->vol_id, re->desc->vol->name); 811 } 812 813 mutex_lock(&ubi->volumes_mutex); 814 err = ubi_rename_volumes(ubi, &rename_list); 815 mutex_unlock(&ubi->volumes_mutex); 816 817 out_free: 818 list_for_each_entry_safe(re, re1, &rename_list, list) { 819 ubi_close_volume(re->desc); 820 list_del(&re->list); 821 kfree(re); 822 } 823 return err; 824 } 825 826 static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, 827 unsigned long arg) 828 { 829 int err = 0; 830 struct ubi_device *ubi; 831 struct ubi_volume_desc *desc; 832 void __user *argp = (void __user *)arg; 833 834 if (!capable(CAP_SYS_RESOURCE)) 835 return -EPERM; 836 837 ubi = ubi_get_by_major(imajor(file->f_mapping->host)); 838 if (!ubi) 839 return -ENODEV; 840 841 switch (cmd) { 842 /* Create volume command */ 843 case UBI_IOCMKVOL: 844 { 845 struct ubi_mkvol_req req; 846 847 dbg_gen("create volume"); 848 err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req)); 849 if (err) { 850 err = -EFAULT; 851 break; 852 } 853 854 req.name[req.name_len] = '\0'; 855 err = verify_mkvol_req(ubi, &req); 856 if (err) 857 break; 858 859 mutex_lock(&ubi->volumes_mutex); 860 err = ubi_create_volume(ubi, &req); 861 mutex_unlock(&ubi->volumes_mutex); 862 if (err) 863 break; 864 865 err = put_user(req.vol_id, (__user int32_t *)argp); 866 if (err) 867 err = -EFAULT; 868 869 break; 870 } 871 872 /* Remove volume command */ 873 case UBI_IOCRMVOL: 874 { 875 int vol_id; 876 877 dbg_gen("remove volume"); 878 err = get_user(vol_id, (__user int32_t *)argp); 879 if (err) { 880 err = -EFAULT; 881 break; 882 } 883 884 desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE); 885 if (IS_ERR(desc)) { 886 err = PTR_ERR(desc); 887 break; 888 } 889 890 mutex_lock(&ubi->volumes_mutex); 891 err = ubi_remove_volume(desc, 0); 892 mutex_unlock(&ubi->volumes_mutex); 893 894 /* 895 * The volume is deleted (unless an error occurred), and the 896 * 'struct ubi_volume' object will be freed when 897 * 'ubi_close_volume()' will call 'put_device()'. 898 */ 899 ubi_close_volume(desc); 900 break; 901 } 902 903 /* Re-size volume command */ 904 case UBI_IOCRSVOL: 905 { 906 int pebs; 907 struct ubi_rsvol_req req; 908 909 dbg_gen("re-size volume"); 910 err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req)); 911 if (err) { 912 err = -EFAULT; 913 break; 914 } 915 916 err = verify_rsvol_req(ubi, &req); 917 if (err) 918 break; 919 920 desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE); 921 if (IS_ERR(desc)) { 922 err = PTR_ERR(desc); 923 break; 924 } 925 926 pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1, 927 desc->vol->usable_leb_size); 928 929 mutex_lock(&ubi->volumes_mutex); 930 err = ubi_resize_volume(desc, pebs); 931 mutex_unlock(&ubi->volumes_mutex); 932 ubi_close_volume(desc); 933 break; 934 } 935 936 /* Re-name volumes command */ 937 case UBI_IOCRNVOL: 938 { 939 struct ubi_rnvol_req *req; 940 941 dbg_msg("re-name volumes"); 942 req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL); 943 if (!req) { 944 err = -ENOMEM; 945 break; 946 }; 947 948 err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req)); 949 if (err) { 950 err = -EFAULT; 951 kfree(req); 952 break; 953 } 954 955 mutex_lock(&ubi->mult_mutex); 956 err = rename_volumes(ubi, req); 957 mutex_unlock(&ubi->mult_mutex); 958 kfree(req); 959 break; 960 } 961 962 default: 963 err = -ENOTTY; 964 break; 965 } 966 967 ubi_put_device(ubi); 968 return err; 969 } 970 971 static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd, 972 unsigned long arg) 973 { 974 int err = 0; 975 void __user *argp = (void __user *)arg; 976 977 if (!capable(CAP_SYS_RESOURCE)) 978 return -EPERM; 979 980 switch (cmd) { 981 /* Attach an MTD device command */ 982 case UBI_IOCATT: 983 { 984 struct ubi_attach_req req; 985 struct mtd_info *mtd; 986 987 dbg_gen("attach MTD device"); 988 err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req)); 989 if (err) { 990 err = -EFAULT; 991 break; 992 } 993 994 if (req.mtd_num < 0 || 995 (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) { 996 err = -EINVAL; 997 break; 998 } 999 1000 mtd = get_mtd_device(NULL, req.mtd_num); 1001 if (IS_ERR(mtd)) { 1002 err = PTR_ERR(mtd); 1003 break; 1004 } 1005 1006 /* 1007 * Note, further request verification is done by 1008 * 'ubi_attach_mtd_dev()'. 1009 */ 1010 mutex_lock(&ubi_devices_mutex); 1011 err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset); 1012 mutex_unlock(&ubi_devices_mutex); 1013 if (err < 0) 1014 put_mtd_device(mtd); 1015 else 1016 /* @err contains UBI device number */ 1017 err = put_user(err, (__user int32_t *)argp); 1018 1019 break; 1020 } 1021 1022 /* Detach an MTD device command */ 1023 case UBI_IOCDET: 1024 { 1025 int ubi_num; 1026 1027 dbg_gen("dettach MTD device"); 1028 err = get_user(ubi_num, (__user int32_t *)argp); 1029 if (err) { 1030 err = -EFAULT; 1031 break; 1032 } 1033 1034 mutex_lock(&ubi_devices_mutex); 1035 err = ubi_detach_mtd_dev(ubi_num, 0); 1036 mutex_unlock(&ubi_devices_mutex); 1037 break; 1038 } 1039 1040 default: 1041 err = -ENOTTY; 1042 break; 1043 } 1044 1045 return err; 1046 } 1047 1048 #ifdef CONFIG_COMPAT 1049 static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd, 1050 unsigned long arg) 1051 { 1052 unsigned long translated_arg = (unsigned long)compat_ptr(arg); 1053 1054 return vol_cdev_ioctl(file, cmd, translated_arg); 1055 } 1056 1057 static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd, 1058 unsigned long arg) 1059 { 1060 unsigned long translated_arg = (unsigned long)compat_ptr(arg); 1061 1062 return ubi_cdev_ioctl(file, cmd, translated_arg); 1063 } 1064 1065 static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd, 1066 unsigned long arg) 1067 { 1068 unsigned long translated_arg = (unsigned long)compat_ptr(arg); 1069 1070 return ctrl_cdev_ioctl(file, cmd, translated_arg); 1071 } 1072 #else 1073 #define vol_cdev_compat_ioctl NULL 1074 #define ubi_cdev_compat_ioctl NULL 1075 #define ctrl_cdev_compat_ioctl NULL 1076 #endif 1077 1078 /* UBI volume character device operations */ 1079 const struct file_operations ubi_vol_cdev_operations = { 1080 .owner = THIS_MODULE, 1081 .open = vol_cdev_open, 1082 .release = vol_cdev_release, 1083 .llseek = vol_cdev_llseek, 1084 .read = vol_cdev_read, 1085 .write = vol_cdev_write, 1086 .fsync = vol_cdev_fsync, 1087 .unlocked_ioctl = vol_cdev_ioctl, 1088 .compat_ioctl = vol_cdev_compat_ioctl, 1089 }; 1090 1091 /* UBI character device operations */ 1092 const struct file_operations ubi_cdev_operations = { 1093 .owner = THIS_MODULE, 1094 .llseek = no_llseek, 1095 .unlocked_ioctl = ubi_cdev_ioctl, 1096 .compat_ioctl = ubi_cdev_compat_ioctl, 1097 }; 1098 1099 /* UBI control character device operations */ 1100 const struct file_operations ubi_ctrl_cdev_operations = { 1101 .owner = THIS_MODULE, 1102 .unlocked_ioctl = ctrl_cdev_ioctl, 1103 .compat_ioctl = ctrl_cdev_compat_ioctl, 1104 }; 1105