1 /* 2 * $Id: mtdchar.c,v 1.76 2005/11/07 11:14:20 gleixner Exp $ 3 * 4 * Character-device access to raw MTD devices. 5 * 6 */ 7 8 #include <linux/device.h> 9 #include <linux/fs.h> 10 #include <linux/mm.h> 11 #include <linux/err.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/sched.h> 17 18 #include <linux/mtd/mtd.h> 19 #include <linux/mtd/compatmac.h> 20 21 #include <asm/uaccess.h> 22 23 static struct class *mtd_class; 24 25 static void mtd_notify_add(struct mtd_info* mtd) 26 { 27 if (!mtd) 28 return; 29 30 device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2), "mtd%d", mtd->index); 31 32 device_create(mtd_class, NULL, 33 MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1), "mtd%dro", mtd->index); 34 } 35 36 static void mtd_notify_remove(struct mtd_info* mtd) 37 { 38 if (!mtd) 39 return; 40 41 device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2)); 42 device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1)); 43 } 44 45 static struct mtd_notifier notifier = { 46 .add = mtd_notify_add, 47 .remove = mtd_notify_remove, 48 }; 49 50 /* 51 * Data structure to hold the pointer to the mtd device as well 52 * as mode information ofr various use cases. 53 */ 54 struct mtd_file_info { 55 struct mtd_info *mtd; 56 enum mtd_file_modes mode; 57 }; 58 59 static loff_t mtd_lseek (struct file *file, loff_t offset, int orig) 60 { 61 struct mtd_file_info *mfi = file->private_data; 62 struct mtd_info *mtd = mfi->mtd; 63 64 switch (orig) { 65 case SEEK_SET: 66 break; 67 case SEEK_CUR: 68 offset += file->f_pos; 69 break; 70 case SEEK_END: 71 offset += mtd->size; 72 break; 73 default: 74 return -EINVAL; 75 } 76 77 if (offset >= 0 && offset <= mtd->size) 78 return file->f_pos = offset; 79 80 return -EINVAL; 81 } 82 83 84 85 static int mtd_open(struct inode *inode, struct file *file) 86 { 87 int minor = iminor(inode); 88 int devnum = minor >> 1; 89 struct mtd_info *mtd; 90 struct mtd_file_info *mfi; 91 92 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); 93 94 if (devnum >= MAX_MTD_DEVICES) 95 return -ENODEV; 96 97 /* You can't open the RO devices RW */ 98 if ((file->f_mode & 2) && (minor & 1)) 99 return -EACCES; 100 101 mtd = get_mtd_device(NULL, devnum); 102 103 if (IS_ERR(mtd)) 104 return PTR_ERR(mtd); 105 106 if (MTD_ABSENT == mtd->type) { 107 put_mtd_device(mtd); 108 return -ENODEV; 109 } 110 111 /* You can't open it RW if it's not a writeable device */ 112 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) { 113 put_mtd_device(mtd); 114 return -EACCES; 115 } 116 117 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 118 if (!mfi) { 119 put_mtd_device(mtd); 120 return -ENOMEM; 121 } 122 mfi->mtd = mtd; 123 file->private_data = mfi; 124 125 return 0; 126 } /* mtd_open */ 127 128 /*====================================================================*/ 129 130 static int mtd_close(struct inode *inode, struct file *file) 131 { 132 struct mtd_file_info *mfi = file->private_data; 133 struct mtd_info *mtd = mfi->mtd; 134 135 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); 136 137 /* Only sync if opened RW */ 138 if ((file->f_mode & 2) && mtd->sync) 139 mtd->sync(mtd); 140 141 put_mtd_device(mtd); 142 file->private_data = NULL; 143 kfree(mfi); 144 145 return 0; 146 } /* mtd_close */ 147 148 /* FIXME: This _really_ needs to die. In 2.5, we should lock the 149 userspace buffer down and use it directly with readv/writev. 150 */ 151 #define MAX_KMALLOC_SIZE 0x20000 152 153 static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) 154 { 155 struct mtd_file_info *mfi = file->private_data; 156 struct mtd_info *mtd = mfi->mtd; 157 size_t retlen=0; 158 size_t total_retlen=0; 159 int ret=0; 160 int len; 161 char *kbuf; 162 163 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); 164 165 if (*ppos + count > mtd->size) 166 count = mtd->size - *ppos; 167 168 if (!count) 169 return 0; 170 171 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers 172 and pass them directly to the MTD functions */ 173 174 if (count > MAX_KMALLOC_SIZE) 175 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL); 176 else 177 kbuf=kmalloc(count, GFP_KERNEL); 178 179 if (!kbuf) 180 return -ENOMEM; 181 182 while (count) { 183 184 if (count > MAX_KMALLOC_SIZE) 185 len = MAX_KMALLOC_SIZE; 186 else 187 len = count; 188 189 switch (mfi->mode) { 190 case MTD_MODE_OTP_FACTORY: 191 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); 192 break; 193 case MTD_MODE_OTP_USER: 194 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 195 break; 196 case MTD_MODE_RAW: 197 { 198 struct mtd_oob_ops ops; 199 200 ops.mode = MTD_OOB_RAW; 201 ops.datbuf = kbuf; 202 ops.oobbuf = NULL; 203 ops.len = len; 204 205 ret = mtd->read_oob(mtd, *ppos, &ops); 206 retlen = ops.retlen; 207 break; 208 } 209 default: 210 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf); 211 } 212 /* Nand returns -EBADMSG on ecc errors, but it returns 213 * the data. For our userspace tools it is important 214 * to dump areas with ecc errors ! 215 * For kernel internal usage it also might return -EUCLEAN 216 * to signal the caller that a bitflip has occured and has 217 * been corrected by the ECC algorithm. 218 * Userspace software which accesses NAND this way 219 * must be aware of the fact that it deals with NAND 220 */ 221 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) { 222 *ppos += retlen; 223 if (copy_to_user(buf, kbuf, retlen)) { 224 kfree(kbuf); 225 return -EFAULT; 226 } 227 else 228 total_retlen += retlen; 229 230 count -= retlen; 231 buf += retlen; 232 if (retlen == 0) 233 count = 0; 234 } 235 else { 236 kfree(kbuf); 237 return ret; 238 } 239 240 } 241 242 kfree(kbuf); 243 return total_retlen; 244 } /* mtd_read */ 245 246 static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos) 247 { 248 struct mtd_file_info *mfi = file->private_data; 249 struct mtd_info *mtd = mfi->mtd; 250 char *kbuf; 251 size_t retlen; 252 size_t total_retlen=0; 253 int ret=0; 254 int len; 255 256 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n"); 257 258 if (*ppos == mtd->size) 259 return -ENOSPC; 260 261 if (*ppos + count > mtd->size) 262 count = mtd->size - *ppos; 263 264 if (!count) 265 return 0; 266 267 if (count > MAX_KMALLOC_SIZE) 268 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL); 269 else 270 kbuf=kmalloc(count, GFP_KERNEL); 271 272 if (!kbuf) 273 return -ENOMEM; 274 275 while (count) { 276 277 if (count > MAX_KMALLOC_SIZE) 278 len = MAX_KMALLOC_SIZE; 279 else 280 len = count; 281 282 if (copy_from_user(kbuf, buf, len)) { 283 kfree(kbuf); 284 return -EFAULT; 285 } 286 287 switch (mfi->mode) { 288 case MTD_MODE_OTP_FACTORY: 289 ret = -EROFS; 290 break; 291 case MTD_MODE_OTP_USER: 292 if (!mtd->write_user_prot_reg) { 293 ret = -EOPNOTSUPP; 294 break; 295 } 296 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 297 break; 298 299 case MTD_MODE_RAW: 300 { 301 struct mtd_oob_ops ops; 302 303 ops.mode = MTD_OOB_RAW; 304 ops.datbuf = kbuf; 305 ops.oobbuf = NULL; 306 ops.len = len; 307 308 ret = mtd->write_oob(mtd, *ppos, &ops); 309 retlen = ops.retlen; 310 break; 311 } 312 313 default: 314 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf); 315 } 316 if (!ret) { 317 *ppos += retlen; 318 total_retlen += retlen; 319 count -= retlen; 320 buf += retlen; 321 } 322 else { 323 kfree(kbuf); 324 return ret; 325 } 326 } 327 328 kfree(kbuf); 329 return total_retlen; 330 } /* mtd_write */ 331 332 /*====================================================================== 333 334 IOCTL calls for getting device parameters. 335 336 ======================================================================*/ 337 static void mtdchar_erase_callback (struct erase_info *instr) 338 { 339 wake_up((wait_queue_head_t *)instr->priv); 340 } 341 342 #if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP) 343 static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 344 { 345 struct mtd_info *mtd = mfi->mtd; 346 int ret = 0; 347 348 switch (mode) { 349 case MTD_OTP_FACTORY: 350 if (!mtd->read_fact_prot_reg) 351 ret = -EOPNOTSUPP; 352 else 353 mfi->mode = MTD_MODE_OTP_FACTORY; 354 break; 355 case MTD_OTP_USER: 356 if (!mtd->read_fact_prot_reg) 357 ret = -EOPNOTSUPP; 358 else 359 mfi->mode = MTD_MODE_OTP_USER; 360 break; 361 default: 362 ret = -EINVAL; 363 case MTD_OTP_OFF: 364 break; 365 } 366 return ret; 367 } 368 #else 369 # define otp_select_filemode(f,m) -EOPNOTSUPP 370 #endif 371 372 static int mtd_ioctl(struct inode *inode, struct file *file, 373 u_int cmd, u_long arg) 374 { 375 struct mtd_file_info *mfi = file->private_data; 376 struct mtd_info *mtd = mfi->mtd; 377 void __user *argp = (void __user *)arg; 378 int ret = 0; 379 u_long size; 380 struct mtd_info_user info; 381 382 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); 383 384 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; 385 if (cmd & IOC_IN) { 386 if (!access_ok(VERIFY_READ, argp, size)) 387 return -EFAULT; 388 } 389 if (cmd & IOC_OUT) { 390 if (!access_ok(VERIFY_WRITE, argp, size)) 391 return -EFAULT; 392 } 393 394 switch (cmd) { 395 case MEMGETREGIONCOUNT: 396 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) 397 return -EFAULT; 398 break; 399 400 case MEMGETREGIONINFO: 401 { 402 struct region_info_user ur; 403 404 if (copy_from_user(&ur, argp, sizeof(struct region_info_user))) 405 return -EFAULT; 406 407 if (ur.regionindex >= mtd->numeraseregions) 408 return -EINVAL; 409 if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]), 410 sizeof(struct mtd_erase_region_info))) 411 return -EFAULT; 412 break; 413 } 414 415 case MEMGETINFO: 416 info.type = mtd->type; 417 info.flags = mtd->flags; 418 info.size = mtd->size; 419 info.erasesize = mtd->erasesize; 420 info.writesize = mtd->writesize; 421 info.oobsize = mtd->oobsize; 422 /* The below fields are obsolete */ 423 info.ecctype = -1; 424 info.eccsize = 0; 425 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 426 return -EFAULT; 427 break; 428 429 case MEMERASE: 430 { 431 struct erase_info *erase; 432 433 if(!(file->f_mode & 2)) 434 return -EPERM; 435 436 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); 437 if (!erase) 438 ret = -ENOMEM; 439 else { 440 wait_queue_head_t waitq; 441 DECLARE_WAITQUEUE(wait, current); 442 443 init_waitqueue_head(&waitq); 444 445 if (copy_from_user(&erase->addr, argp, 446 sizeof(struct erase_info_user))) { 447 kfree(erase); 448 return -EFAULT; 449 } 450 erase->mtd = mtd; 451 erase->callback = mtdchar_erase_callback; 452 erase->priv = (unsigned long)&waitq; 453 454 /* 455 FIXME: Allow INTERRUPTIBLE. Which means 456 not having the wait_queue head on the stack. 457 458 If the wq_head is on the stack, and we 459 leave because we got interrupted, then the 460 wq_head is no longer there when the 461 callback routine tries to wake us up. 462 */ 463 ret = mtd->erase(mtd, erase); 464 if (!ret) { 465 set_current_state(TASK_UNINTERRUPTIBLE); 466 add_wait_queue(&waitq, &wait); 467 if (erase->state != MTD_ERASE_DONE && 468 erase->state != MTD_ERASE_FAILED) 469 schedule(); 470 remove_wait_queue(&waitq, &wait); 471 set_current_state(TASK_RUNNING); 472 473 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0; 474 } 475 kfree(erase); 476 } 477 break; 478 } 479 480 case MEMWRITEOOB: 481 { 482 struct mtd_oob_buf buf; 483 struct mtd_oob_ops ops; 484 uint32_t retlen; 485 486 if(!(file->f_mode & 2)) 487 return -EPERM; 488 489 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 490 return -EFAULT; 491 492 if (buf.length > 4096) 493 return -EINVAL; 494 495 if (!mtd->write_oob) 496 ret = -EOPNOTSUPP; 497 else 498 ret = access_ok(VERIFY_READ, buf.ptr, 499 buf.length) ? 0 : EFAULT; 500 501 if (ret) 502 return ret; 503 504 ops.ooblen = buf.length; 505 ops.ooboffs = buf.start & (mtd->oobsize - 1); 506 ops.datbuf = NULL; 507 ops.mode = MTD_OOB_PLACE; 508 509 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 510 return -EINVAL; 511 512 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL); 513 if (!ops.oobbuf) 514 return -ENOMEM; 515 516 if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) { 517 kfree(ops.oobbuf); 518 return -EFAULT; 519 } 520 521 buf.start &= ~(mtd->oobsize - 1); 522 ret = mtd->write_oob(mtd, buf.start, &ops); 523 524 if (ops.oobretlen > 0xFFFFFFFFU) 525 ret = -EOVERFLOW; 526 retlen = ops.oobretlen; 527 if (copy_to_user(&((struct mtd_oob_buf *)argp)->length, 528 &retlen, sizeof(buf.length))) 529 ret = -EFAULT; 530 531 kfree(ops.oobbuf); 532 break; 533 534 } 535 536 case MEMREADOOB: 537 { 538 struct mtd_oob_buf buf; 539 struct mtd_oob_ops ops; 540 541 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 542 return -EFAULT; 543 544 if (buf.length > 4096) 545 return -EINVAL; 546 547 if (!mtd->read_oob) 548 ret = -EOPNOTSUPP; 549 else 550 ret = access_ok(VERIFY_WRITE, buf.ptr, 551 buf.length) ? 0 : -EFAULT; 552 if (ret) 553 return ret; 554 555 ops.ooblen = buf.length; 556 ops.ooboffs = buf.start & (mtd->oobsize - 1); 557 ops.datbuf = NULL; 558 ops.mode = MTD_OOB_PLACE; 559 560 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 561 return -EINVAL; 562 563 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL); 564 if (!ops.oobbuf) 565 return -ENOMEM; 566 567 buf.start &= ~(mtd->oobsize - 1); 568 ret = mtd->read_oob(mtd, buf.start, &ops); 569 570 if (put_user(ops.oobretlen, (uint32_t __user *)argp)) 571 ret = -EFAULT; 572 else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf, 573 ops.oobretlen)) 574 ret = -EFAULT; 575 576 kfree(ops.oobbuf); 577 break; 578 } 579 580 case MEMLOCK: 581 { 582 struct erase_info_user info; 583 584 if (copy_from_user(&info, argp, sizeof(info))) 585 return -EFAULT; 586 587 if (!mtd->lock) 588 ret = -EOPNOTSUPP; 589 else 590 ret = mtd->lock(mtd, info.start, info.length); 591 break; 592 } 593 594 case MEMUNLOCK: 595 { 596 struct erase_info_user info; 597 598 if (copy_from_user(&info, argp, sizeof(info))) 599 return -EFAULT; 600 601 if (!mtd->unlock) 602 ret = -EOPNOTSUPP; 603 else 604 ret = mtd->unlock(mtd, info.start, info.length); 605 break; 606 } 607 608 /* Legacy interface */ 609 case MEMGETOOBSEL: 610 { 611 struct nand_oobinfo oi; 612 613 if (!mtd->ecclayout) 614 return -EOPNOTSUPP; 615 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos)) 616 return -EINVAL; 617 618 oi.useecc = MTD_NANDECC_AUTOPLACE; 619 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos)); 620 memcpy(&oi.oobfree, mtd->ecclayout->oobfree, 621 sizeof(oi.oobfree)); 622 oi.eccbytes = mtd->ecclayout->eccbytes; 623 624 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) 625 return -EFAULT; 626 break; 627 } 628 629 case MEMGETBADBLOCK: 630 { 631 loff_t offs; 632 633 if (copy_from_user(&offs, argp, sizeof(loff_t))) 634 return -EFAULT; 635 if (!mtd->block_isbad) 636 ret = -EOPNOTSUPP; 637 else 638 return mtd->block_isbad(mtd, offs); 639 break; 640 } 641 642 case MEMSETBADBLOCK: 643 { 644 loff_t offs; 645 646 if (copy_from_user(&offs, argp, sizeof(loff_t))) 647 return -EFAULT; 648 if (!mtd->block_markbad) 649 ret = -EOPNOTSUPP; 650 else 651 return mtd->block_markbad(mtd, offs); 652 break; 653 } 654 655 #if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP) 656 case OTPSELECT: 657 { 658 int mode; 659 if (copy_from_user(&mode, argp, sizeof(int))) 660 return -EFAULT; 661 662 mfi->mode = MTD_MODE_NORMAL; 663 664 ret = otp_select_filemode(mfi, mode); 665 666 file->f_pos = 0; 667 break; 668 } 669 670 case OTPGETREGIONCOUNT: 671 case OTPGETREGIONINFO: 672 { 673 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 674 if (!buf) 675 return -ENOMEM; 676 ret = -EOPNOTSUPP; 677 switch (mfi->mode) { 678 case MTD_MODE_OTP_FACTORY: 679 if (mtd->get_fact_prot_info) 680 ret = mtd->get_fact_prot_info(mtd, buf, 4096); 681 break; 682 case MTD_MODE_OTP_USER: 683 if (mtd->get_user_prot_info) 684 ret = mtd->get_user_prot_info(mtd, buf, 4096); 685 break; 686 default: 687 break; 688 } 689 if (ret >= 0) { 690 if (cmd == OTPGETREGIONCOUNT) { 691 int nbr = ret / sizeof(struct otp_info); 692 ret = copy_to_user(argp, &nbr, sizeof(int)); 693 } else 694 ret = copy_to_user(argp, buf, ret); 695 if (ret) 696 ret = -EFAULT; 697 } 698 kfree(buf); 699 break; 700 } 701 702 case OTPLOCK: 703 { 704 struct otp_info info; 705 706 if (mfi->mode != MTD_MODE_OTP_USER) 707 return -EINVAL; 708 if (copy_from_user(&info, argp, sizeof(info))) 709 return -EFAULT; 710 if (!mtd->lock_user_prot_reg) 711 return -EOPNOTSUPP; 712 ret = mtd->lock_user_prot_reg(mtd, info.start, info.length); 713 break; 714 } 715 #endif 716 717 case ECCGETLAYOUT: 718 { 719 if (!mtd->ecclayout) 720 return -EOPNOTSUPP; 721 722 if (copy_to_user(argp, mtd->ecclayout, 723 sizeof(struct nand_ecclayout))) 724 return -EFAULT; 725 break; 726 } 727 728 case ECCGETSTATS: 729 { 730 if (copy_to_user(argp, &mtd->ecc_stats, 731 sizeof(struct mtd_ecc_stats))) 732 return -EFAULT; 733 break; 734 } 735 736 case MTDFILEMODE: 737 { 738 mfi->mode = 0; 739 740 switch(arg) { 741 case MTD_MODE_OTP_FACTORY: 742 case MTD_MODE_OTP_USER: 743 ret = otp_select_filemode(mfi, arg); 744 break; 745 746 case MTD_MODE_RAW: 747 if (!mtd->read_oob || !mtd->write_oob) 748 return -EOPNOTSUPP; 749 mfi->mode = arg; 750 751 case MTD_MODE_NORMAL: 752 break; 753 default: 754 ret = -EINVAL; 755 } 756 file->f_pos = 0; 757 break; 758 } 759 760 default: 761 ret = -ENOTTY; 762 } 763 764 return ret; 765 } /* memory_ioctl */ 766 767 static const struct file_operations mtd_fops = { 768 .owner = THIS_MODULE, 769 .llseek = mtd_lseek, 770 .read = mtd_read, 771 .write = mtd_write, 772 .ioctl = mtd_ioctl, 773 .open = mtd_open, 774 .release = mtd_close, 775 }; 776 777 static int __init init_mtdchar(void) 778 { 779 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) { 780 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n", 781 MTD_CHAR_MAJOR); 782 return -EAGAIN; 783 } 784 785 mtd_class = class_create(THIS_MODULE, "mtd"); 786 787 if (IS_ERR(mtd_class)) { 788 printk(KERN_ERR "Error creating mtd class.\n"); 789 unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); 790 return PTR_ERR(mtd_class); 791 } 792 793 register_mtd_user(¬ifier); 794 return 0; 795 } 796 797 static void __exit cleanup_mtdchar(void) 798 { 799 unregister_mtd_user(¬ifier); 800 class_destroy(mtd_class); 801 unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); 802 } 803 804 module_init(init_mtdchar); 805 module_exit(cleanup_mtdchar); 806 807 808 MODULE_LICENSE("GPL"); 809 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 810 MODULE_DESCRIPTION("Direct character-device access to MTD devices"); 811