1 /* 2 * Copyright (c) International Business Machines Corp., 2006 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 12 * the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * 18 * Author: Artem Bityutskiy (Битюцкий Артём) 19 */ 20 21 /* 22 * This file includes implementation of UBI character device operations. 23 * 24 * There are two kinds of character devices in UBI: UBI character devices and 25 * UBI volume character devices. UBI character devices allow users to 26 * manipulate whole volumes: create, remove, and re-size them. Volume character 27 * devices provide volume I/O capabilities. 28 * 29 * Major and minor numbers are assigned dynamically to both UBI and volume 30 * character devices. 31 * 32 * Well, there is the third kind of character devices - the UBI control 33 * character device, which allows to manipulate by UBI devices - create and 34 * delete them. In other words, it is used for attaching and detaching MTD 35 * devices. 36 */ 37 38 #include <linux/module.h> 39 #include <linux/stat.h> 40 #include <linux/ioctl.h> 41 #include <linux/capability.h> 42 #include <linux/smp_lock.h> 43 #include <mtd/ubi-user.h> 44 #include <asm/uaccess.h> 45 #include <asm/div64.h> 46 #include "ubi.h" 47 48 /** 49 * get_exclusive - get exclusive access to an UBI volume. 50 * @desc: volume descriptor 51 * 52 * This function changes UBI volume open mode to "exclusive". Returns previous 53 * mode value (positive integer) in case of success and a negative error code 54 * in case of failure. 55 */ 56 static int get_exclusive(struct ubi_volume_desc *desc) 57 { 58 int users, err; 59 struct ubi_volume *vol = desc->vol; 60 61 spin_lock(&vol->ubi->volumes_lock); 62 users = vol->readers + vol->writers + vol->exclusive; 63 ubi_assert(users > 0); 64 if (users > 1) { 65 dbg_err("%d users for volume %d", users, vol->vol_id); 66 err = -EBUSY; 67 } else { 68 vol->readers = vol->writers = 0; 69 vol->exclusive = 1; 70 err = desc->mode; 71 desc->mode = UBI_EXCLUSIVE; 72 } 73 spin_unlock(&vol->ubi->volumes_lock); 74 75 return err; 76 } 77 78 /** 79 * revoke_exclusive - revoke exclusive mode. 80 * @desc: volume descriptor 81 * @mode: new mode to switch to 82 */ 83 static void revoke_exclusive(struct ubi_volume_desc *desc, int mode) 84 { 85 struct ubi_volume *vol = desc->vol; 86 87 spin_lock(&vol->ubi->volumes_lock); 88 ubi_assert(vol->readers == 0 && vol->writers == 0); 89 ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE); 90 vol->exclusive = 0; 91 if (mode == UBI_READONLY) 92 vol->readers = 1; 93 else if (mode == UBI_READWRITE) 94 vol->writers = 1; 95 else 96 vol->exclusive = 1; 97 spin_unlock(&vol->ubi->volumes_lock); 98 99 desc->mode = mode; 100 } 101 102 static int vol_cdev_open(struct inode *inode, struct file *file) 103 { 104 struct ubi_volume_desc *desc; 105 int vol_id = iminor(inode) - 1, mode, ubi_num; 106 107 lock_kernel(); 108 ubi_num = ubi_major2num(imajor(inode)); 109 if (ubi_num < 0) { 110 unlock_kernel(); 111 return ubi_num; 112 } 113 114 if (file->f_mode & FMODE_WRITE) 115 mode = UBI_READWRITE; 116 else 117 mode = UBI_READONLY; 118 119 dbg_msg("open volume %d, mode %d", vol_id, mode); 120 121 desc = ubi_open_volume(ubi_num, vol_id, mode); 122 unlock_kernel(); 123 if (IS_ERR(desc)) 124 return PTR_ERR(desc); 125 126 file->private_data = desc; 127 return 0; 128 } 129 130 static int vol_cdev_release(struct inode *inode, struct file *file) 131 { 132 struct ubi_volume_desc *desc = file->private_data; 133 struct ubi_volume *vol = desc->vol; 134 135 dbg_msg("release volume %d, mode %d", vol->vol_id, desc->mode); 136 137 if (vol->updating) { 138 ubi_warn("update of volume %d not finished, volume is damaged", 139 vol->vol_id); 140 ubi_assert(!vol->changing_leb); 141 vol->updating = 0; 142 vfree(vol->upd_buf); 143 } else if (vol->changing_leb) { 144 dbg_msg("only %lld of %lld bytes received for atomic LEB change" 145 " for volume %d:%d, cancel", vol->upd_received, 146 vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id); 147 vol->changing_leb = 0; 148 vfree(vol->upd_buf); 149 } 150 151 ubi_close_volume(desc); 152 return 0; 153 } 154 155 static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin) 156 { 157 struct ubi_volume_desc *desc = file->private_data; 158 struct ubi_volume *vol = desc->vol; 159 loff_t new_offset; 160 161 if (vol->updating) { 162 /* Update is in progress, seeking is prohibited */ 163 dbg_err("updating"); 164 return -EBUSY; 165 } 166 167 switch (origin) { 168 case 0: /* SEEK_SET */ 169 new_offset = offset; 170 break; 171 case 1: /* SEEK_CUR */ 172 new_offset = file->f_pos + offset; 173 break; 174 case 2: /* SEEK_END */ 175 new_offset = vol->used_bytes + offset; 176 break; 177 default: 178 return -EINVAL; 179 } 180 181 if (new_offset < 0 || new_offset > vol->used_bytes) { 182 dbg_err("bad seek %lld", new_offset); 183 return -EINVAL; 184 } 185 186 dbg_msg("seek volume %d, offset %lld, origin %d, new offset %lld", 187 vol->vol_id, offset, origin, new_offset); 188 189 file->f_pos = new_offset; 190 return new_offset; 191 } 192 193 static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, 194 loff_t *offp) 195 { 196 struct ubi_volume_desc *desc = file->private_data; 197 struct ubi_volume *vol = desc->vol; 198 struct ubi_device *ubi = vol->ubi; 199 int err, lnum, off, len, tbuf_size; 200 size_t count_save = count; 201 void *tbuf; 202 uint64_t tmp; 203 204 dbg_msg("read %zd bytes from offset %lld of volume %d", 205 count, *offp, vol->vol_id); 206 207 if (vol->updating) { 208 dbg_err("updating"); 209 return -EBUSY; 210 } 211 if (vol->upd_marker) { 212 dbg_err("damaged volume, update marker is set"); 213 return -EBADF; 214 } 215 if (*offp == vol->used_bytes || count == 0) 216 return 0; 217 218 if (vol->corrupted) 219 dbg_msg("read from corrupted volume %d", vol->vol_id); 220 221 if (*offp + count > vol->used_bytes) 222 count_save = count = vol->used_bytes - *offp; 223 224 tbuf_size = vol->usable_leb_size; 225 if (count < tbuf_size) 226 tbuf_size = ALIGN(count, ubi->min_io_size); 227 tbuf = vmalloc(tbuf_size); 228 if (!tbuf) 229 return -ENOMEM; 230 231 len = count > tbuf_size ? tbuf_size : count; 232 233 tmp = *offp; 234 off = do_div(tmp, vol->usable_leb_size); 235 lnum = tmp; 236 237 do { 238 cond_resched(); 239 240 if (off + len >= vol->usable_leb_size) 241 len = vol->usable_leb_size - off; 242 243 err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0); 244 if (err) 245 break; 246 247 off += len; 248 if (off == vol->usable_leb_size) { 249 lnum += 1; 250 off -= vol->usable_leb_size; 251 } 252 253 count -= len; 254 *offp += len; 255 256 err = copy_to_user(buf, tbuf, len); 257 if (err) { 258 err = -EFAULT; 259 break; 260 } 261 262 buf += len; 263 len = count > tbuf_size ? tbuf_size : count; 264 } while (count); 265 266 vfree(tbuf); 267 return err ? err : count_save - count; 268 } 269 270 #ifdef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO 271 272 /* 273 * This function allows to directly write to dynamic UBI volumes, without 274 * issuing the volume update operation. Available only as a debugging feature. 275 * Very useful for testing UBI. 276 */ 277 static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, 278 size_t count, loff_t *offp) 279 { 280 struct ubi_volume_desc *desc = file->private_data; 281 struct ubi_volume *vol = desc->vol; 282 struct ubi_device *ubi = vol->ubi; 283 int lnum, off, len, tbuf_size, err = 0; 284 size_t count_save = count; 285 char *tbuf; 286 uint64_t tmp; 287 288 dbg_msg("requested: write %zd bytes to offset %lld of volume %u", 289 count, *offp, vol->vol_id); 290 291 if (vol->vol_type == UBI_STATIC_VOLUME) 292 return -EROFS; 293 294 tmp = *offp; 295 off = do_div(tmp, vol->usable_leb_size); 296 lnum = tmp; 297 298 if (off % ubi->min_io_size) { 299 dbg_err("unaligned position"); 300 return -EINVAL; 301 } 302 303 if (*offp + count > vol->used_bytes) 304 count_save = count = vol->used_bytes - *offp; 305 306 /* We can write only in fractions of the minimum I/O unit */ 307 if (count % ubi->min_io_size) { 308 dbg_err("unaligned write length"); 309 return -EINVAL; 310 } 311 312 tbuf_size = vol->usable_leb_size; 313 if (count < tbuf_size) 314 tbuf_size = ALIGN(count, ubi->min_io_size); 315 tbuf = vmalloc(tbuf_size); 316 if (!tbuf) 317 return -ENOMEM; 318 319 len = count > tbuf_size ? tbuf_size : count; 320 321 while (count) { 322 cond_resched(); 323 324 if (off + len >= vol->usable_leb_size) 325 len = vol->usable_leb_size - off; 326 327 err = copy_from_user(tbuf, buf, len); 328 if (err) { 329 err = -EFAULT; 330 break; 331 } 332 333 err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len, 334 UBI_UNKNOWN); 335 if (err) 336 break; 337 338 off += len; 339 if (off == vol->usable_leb_size) { 340 lnum += 1; 341 off -= vol->usable_leb_size; 342 } 343 344 count -= len; 345 *offp += len; 346 buf += len; 347 len = count > tbuf_size ? tbuf_size : count; 348 } 349 350 vfree(tbuf); 351 return err ? err : count_save - count; 352 } 353 354 #else 355 #define vol_cdev_direct_write(file, buf, count, offp) -EPERM 356 #endif /* CONFIG_MTD_UBI_DEBUG_USERSPACE_IO */ 357 358 static ssize_t vol_cdev_write(struct file *file, const char __user *buf, 359 size_t count, loff_t *offp) 360 { 361 int err = 0; 362 struct ubi_volume_desc *desc = file->private_data; 363 struct ubi_volume *vol = desc->vol; 364 struct ubi_device *ubi = vol->ubi; 365 366 if (!vol->updating && !vol->changing_leb) 367 return vol_cdev_direct_write(file, buf, count, offp); 368 369 if (vol->updating) 370 err = ubi_more_update_data(ubi, vol, buf, count); 371 else 372 err = ubi_more_leb_change_data(ubi, vol, buf, count); 373 374 if (err < 0) { 375 ubi_err("cannot accept more %zd bytes of data, error %d", 376 count, err); 377 return err; 378 } 379 380 if (err) { 381 /* 382 * The operation is finished, @err contains number of actually 383 * written bytes. 384 */ 385 count = err; 386 387 if (vol->changing_leb) { 388 revoke_exclusive(desc, UBI_READWRITE); 389 return count; 390 } 391 392 err = ubi_check_volume(ubi, vol->vol_id); 393 if (err < 0) 394 return err; 395 396 if (err) { 397 ubi_warn("volume %d on UBI device %d is corrupted", 398 vol->vol_id, ubi->ubi_num); 399 vol->corrupted = 1; 400 } 401 vol->checked = 1; 402 ubi_gluebi_updated(vol); 403 revoke_exclusive(desc, UBI_READWRITE); 404 } 405 406 return count; 407 } 408 409 static int vol_cdev_ioctl(struct inode *inode, struct file *file, 410 unsigned int cmd, unsigned long arg) 411 { 412 int err = 0; 413 struct ubi_volume_desc *desc = file->private_data; 414 struct ubi_volume *vol = desc->vol; 415 struct ubi_device *ubi = vol->ubi; 416 void __user *argp = (void __user *)arg; 417 418 switch (cmd) { 419 /* Volume update command */ 420 case UBI_IOCVOLUP: 421 { 422 int64_t bytes, rsvd_bytes; 423 424 if (!capable(CAP_SYS_RESOURCE)) { 425 err = -EPERM; 426 break; 427 } 428 429 err = copy_from_user(&bytes, argp, sizeof(int64_t)); 430 if (err) { 431 err = -EFAULT; 432 break; 433 } 434 435 if (desc->mode == UBI_READONLY) { 436 err = -EROFS; 437 break; 438 } 439 440 rsvd_bytes = vol->reserved_pebs * (ubi->leb_size-vol->data_pad); 441 if (bytes < 0 || bytes > rsvd_bytes) { 442 err = -EINVAL; 443 break; 444 } 445 446 err = get_exclusive(desc); 447 if (err < 0) 448 break; 449 450 err = ubi_start_update(ubi, vol, bytes); 451 if (bytes == 0) 452 revoke_exclusive(desc, UBI_READWRITE); 453 break; 454 } 455 456 /* Atomic logical eraseblock change command */ 457 case UBI_IOCEBCH: 458 { 459 struct ubi_leb_change_req req; 460 461 err = copy_from_user(&req, argp, 462 sizeof(struct ubi_leb_change_req)); 463 if (err) { 464 err = -EFAULT; 465 break; 466 } 467 468 if (desc->mode == UBI_READONLY || 469 vol->vol_type == UBI_STATIC_VOLUME) { 470 err = -EROFS; 471 break; 472 } 473 474 /* Validate the request */ 475 err = -EINVAL; 476 if (req.lnum < 0 || req.lnum >= vol->reserved_pebs || 477 req.bytes < 0 || req.lnum >= vol->usable_leb_size) 478 break; 479 if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM && 480 req.dtype != UBI_UNKNOWN) 481 break; 482 483 err = get_exclusive(desc); 484 if (err < 0) 485 break; 486 487 err = ubi_start_leb_change(ubi, vol, &req); 488 if (req.bytes == 0) 489 revoke_exclusive(desc, UBI_READWRITE); 490 break; 491 } 492 493 #ifdef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO 494 /* Logical eraseblock erasure command */ 495 case UBI_IOCEBER: 496 { 497 int32_t lnum; 498 499 err = get_user(lnum, (__user int32_t *)argp); 500 if (err) { 501 err = -EFAULT; 502 break; 503 } 504 505 if (desc->mode == UBI_READONLY || 506 vol->vol_type == UBI_STATIC_VOLUME) { 507 err = -EROFS; 508 break; 509 } 510 511 if (lnum < 0 || lnum >= vol->reserved_pebs) { 512 err = -EINVAL; 513 break; 514 } 515 516 dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); 517 err = ubi_eba_unmap_leb(ubi, vol, lnum); 518 if (err) 519 break; 520 521 err = ubi_wl_flush(ubi); 522 break; 523 } 524 #endif 525 526 default: 527 err = -ENOTTY; 528 break; 529 } 530 531 return err; 532 } 533 534 /** 535 * verify_mkvol_req - verify volume creation request. 536 * @ubi: UBI device description object 537 * @req: the request to check 538 * 539 * This function zero if the request is correct, and %-EINVAL if not. 540 */ 541 static int verify_mkvol_req(const struct ubi_device *ubi, 542 const struct ubi_mkvol_req *req) 543 { 544 int n, err = -EINVAL; 545 546 if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 || 547 req->name_len < 0) 548 goto bad; 549 550 if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) && 551 req->vol_id != UBI_VOL_NUM_AUTO) 552 goto bad; 553 554 if (req->alignment == 0) 555 goto bad; 556 557 if (req->bytes == 0) 558 goto bad; 559 560 if (req->vol_type != UBI_DYNAMIC_VOLUME && 561 req->vol_type != UBI_STATIC_VOLUME) 562 goto bad; 563 564 if (req->alignment > ubi->leb_size) 565 goto bad; 566 567 n = req->alignment % ubi->min_io_size; 568 if (req->alignment != 1 && n) 569 goto bad; 570 571 if (req->name_len > UBI_VOL_NAME_MAX) { 572 err = -ENAMETOOLONG; 573 goto bad; 574 } 575 576 return 0; 577 578 bad: 579 dbg_err("bad volume creation request"); 580 ubi_dbg_dump_mkvol_req(req); 581 return err; 582 } 583 584 /** 585 * verify_rsvol_req - verify volume re-size request. 586 * @ubi: UBI device description object 587 * @req: the request to check 588 * 589 * This function returns zero if the request is correct, and %-EINVAL if not. 590 */ 591 static int verify_rsvol_req(const struct ubi_device *ubi, 592 const struct ubi_rsvol_req *req) 593 { 594 if (req->bytes <= 0) 595 return -EINVAL; 596 597 if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) 598 return -EINVAL; 599 600 return 0; 601 } 602 603 static int ubi_cdev_ioctl(struct inode *inode, struct file *file, 604 unsigned int cmd, unsigned long arg) 605 { 606 int err = 0; 607 struct ubi_device *ubi; 608 struct ubi_volume_desc *desc; 609 void __user *argp = (void __user *)arg; 610 611 if (!capable(CAP_SYS_RESOURCE)) 612 return -EPERM; 613 614 ubi = ubi_get_by_major(imajor(inode)); 615 if (!ubi) 616 return -ENODEV; 617 618 switch (cmd) { 619 /* Create volume command */ 620 case UBI_IOCMKVOL: 621 { 622 struct ubi_mkvol_req req; 623 624 dbg_msg("create volume"); 625 err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req)); 626 if (err) { 627 err = -EFAULT; 628 break; 629 } 630 631 err = verify_mkvol_req(ubi, &req); 632 if (err) 633 break; 634 635 req.name[req.name_len] = '\0'; 636 637 mutex_lock(&ubi->volumes_mutex); 638 err = ubi_create_volume(ubi, &req); 639 mutex_unlock(&ubi->volumes_mutex); 640 if (err) 641 break; 642 643 err = put_user(req.vol_id, (__user int32_t *)argp); 644 if (err) 645 err = -EFAULT; 646 647 break; 648 } 649 650 /* Remove volume command */ 651 case UBI_IOCRMVOL: 652 { 653 int vol_id; 654 655 dbg_msg("remove volume"); 656 err = get_user(vol_id, (__user int32_t *)argp); 657 if (err) { 658 err = -EFAULT; 659 break; 660 } 661 662 desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE); 663 if (IS_ERR(desc)) { 664 err = PTR_ERR(desc); 665 break; 666 } 667 668 mutex_lock(&ubi->volumes_mutex); 669 err = ubi_remove_volume(desc); 670 mutex_unlock(&ubi->volumes_mutex); 671 672 /* 673 * The volume is deleted (unless an error occurred), and the 674 * 'struct ubi_volume' object will be freed when 675 * 'ubi_close_volume()' will call 'put_device()'. 676 */ 677 ubi_close_volume(desc); 678 break; 679 } 680 681 /* Re-size volume command */ 682 case UBI_IOCRSVOL: 683 { 684 int pebs; 685 uint64_t tmp; 686 struct ubi_rsvol_req req; 687 688 dbg_msg("re-size volume"); 689 err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req)); 690 if (err) { 691 err = -EFAULT; 692 break; 693 } 694 695 err = verify_rsvol_req(ubi, &req); 696 if (err) 697 break; 698 699 desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE); 700 if (IS_ERR(desc)) { 701 err = PTR_ERR(desc); 702 break; 703 } 704 705 tmp = req.bytes; 706 pebs = !!do_div(tmp, desc->vol->usable_leb_size); 707 pebs += tmp; 708 709 mutex_lock(&ubi->volumes_mutex); 710 err = ubi_resize_volume(desc, pebs); 711 mutex_unlock(&ubi->volumes_mutex); 712 ubi_close_volume(desc); 713 break; 714 } 715 716 default: 717 err = -ENOTTY; 718 break; 719 } 720 721 ubi_put_device(ubi); 722 return err; 723 } 724 725 static int ctrl_cdev_ioctl(struct inode *inode, struct file *file, 726 unsigned int cmd, unsigned long arg) 727 { 728 int err = 0; 729 void __user *argp = (void __user *)arg; 730 731 if (!capable(CAP_SYS_RESOURCE)) 732 return -EPERM; 733 734 switch (cmd) { 735 /* Attach an MTD device command */ 736 case UBI_IOCATT: 737 { 738 struct ubi_attach_req req; 739 struct mtd_info *mtd; 740 741 dbg_msg("attach MTD device"); 742 err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req)); 743 if (err) { 744 err = -EFAULT; 745 break; 746 } 747 748 if (req.mtd_num < 0 || 749 (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) { 750 err = -EINVAL; 751 break; 752 } 753 754 mtd = get_mtd_device(NULL, req.mtd_num); 755 if (IS_ERR(mtd)) { 756 err = PTR_ERR(mtd); 757 break; 758 } 759 760 /* 761 * Note, further request verification is done by 762 * 'ubi_attach_mtd_dev()'. 763 */ 764 mutex_lock(&ubi_devices_mutex); 765 err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset); 766 mutex_unlock(&ubi_devices_mutex); 767 if (err < 0) 768 put_mtd_device(mtd); 769 else 770 /* @err contains UBI device number */ 771 err = put_user(err, (__user int32_t *)argp); 772 773 break; 774 } 775 776 /* Detach an MTD device command */ 777 case UBI_IOCDET: 778 { 779 int ubi_num; 780 781 dbg_msg("dettach MTD device"); 782 err = get_user(ubi_num, (__user int32_t *)argp); 783 if (err) { 784 err = -EFAULT; 785 break; 786 } 787 788 mutex_lock(&ubi_devices_mutex); 789 err = ubi_detach_mtd_dev(ubi_num, 0); 790 mutex_unlock(&ubi_devices_mutex); 791 break; 792 } 793 794 default: 795 err = -ENOTTY; 796 break; 797 } 798 799 return err; 800 } 801 802 /* UBI control character device operations */ 803 struct file_operations ubi_ctrl_cdev_operations = { 804 .ioctl = ctrl_cdev_ioctl, 805 .owner = THIS_MODULE, 806 }; 807 808 /* UBI character device operations */ 809 struct file_operations ubi_cdev_operations = { 810 .owner = THIS_MODULE, 811 .ioctl = ubi_cdev_ioctl, 812 .llseek = no_llseek, 813 }; 814 815 /* UBI volume character device operations */ 816 struct file_operations ubi_vol_cdev_operations = { 817 .owner = THIS_MODULE, 818 .open = vol_cdev_open, 819 .release = vol_cdev_release, 820 .llseek = vol_cdev_llseek, 821 .read = vol_cdev_read, 822 .write = vol_cdev_write, 823 .ioctl = vol_cdev_ioctl, 824 }; 825