1 /* 2 * Character-device access to raw MTD devices. 3 * 4 */ 5 6 #include <linux/device.h> 7 #include <linux/fs.h> 8 #include <linux/mm.h> 9 #include <linux/err.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/sched.h> 15 #include <linux/smp_lock.h> 16 17 #include <linux/mtd/mtd.h> 18 #include <linux/mtd/compatmac.h> 19 20 #include <asm/uaccess.h> 21 22 static struct class *mtd_class; 23 24 static void mtd_notify_add(struct mtd_info* mtd) 25 { 26 if (!mtd) 27 return; 28 29 device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2), 30 NULL, "mtd%d", mtd->index); 31 32 device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1), 33 NULL, "mtd%dro", mtd->index); 34 } 35 36 static void mtd_notify_remove(struct mtd_info* mtd) 37 { 38 if (!mtd) 39 return; 40 41 device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2)); 42 device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1)); 43 } 44 45 static struct mtd_notifier notifier = { 46 .add = mtd_notify_add, 47 .remove = mtd_notify_remove, 48 }; 49 50 /* 51 * Data structure to hold the pointer to the mtd device as well 52 * as mode information ofr various use cases. 53 */ 54 struct mtd_file_info { 55 struct mtd_info *mtd; 56 enum mtd_file_modes mode; 57 }; 58 59 static loff_t mtd_lseek (struct file *file, loff_t offset, int orig) 60 { 61 struct mtd_file_info *mfi = file->private_data; 62 struct mtd_info *mtd = mfi->mtd; 63 64 switch (orig) { 65 case SEEK_SET: 66 break; 67 case SEEK_CUR: 68 offset += file->f_pos; 69 break; 70 case SEEK_END: 71 offset += mtd->size; 72 break; 73 default: 74 return -EINVAL; 75 } 76 77 if (offset >= 0 && offset <= mtd->size) 78 return file->f_pos = offset; 79 80 return -EINVAL; 81 } 82 83 84 85 static int mtd_open(struct inode *inode, struct file *file) 86 { 87 int minor = iminor(inode); 88 int devnum = minor >> 1; 89 int ret = 0; 90 struct mtd_info *mtd; 91 struct mtd_file_info *mfi; 92 93 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); 94 95 if (devnum >= MAX_MTD_DEVICES) 96 return -ENODEV; 97 98 /* You can't open the RO devices RW */ 99 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) 100 return -EACCES; 101 102 lock_kernel(); 103 mtd = get_mtd_device(NULL, devnum); 104 105 if (IS_ERR(mtd)) { 106 ret = PTR_ERR(mtd); 107 goto out; 108 } 109 110 if (MTD_ABSENT == mtd->type) { 111 put_mtd_device(mtd); 112 ret = -ENODEV; 113 goto out; 114 } 115 116 /* You can't open it RW if it's not a writeable device */ 117 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { 118 put_mtd_device(mtd); 119 ret = -EACCES; 120 goto out; 121 } 122 123 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 124 if (!mfi) { 125 put_mtd_device(mtd); 126 ret = -ENOMEM; 127 goto out; 128 } 129 mfi->mtd = mtd; 130 file->private_data = mfi; 131 132 out: 133 unlock_kernel(); 134 return ret; 135 } /* mtd_open */ 136 137 /*====================================================================*/ 138 139 static int mtd_close(struct inode *inode, struct file *file) 140 { 141 struct mtd_file_info *mfi = file->private_data; 142 struct mtd_info *mtd = mfi->mtd; 143 144 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); 145 146 /* Only sync if opened RW */ 147 if ((file->f_mode & FMODE_WRITE) && mtd->sync) 148 mtd->sync(mtd); 149 150 put_mtd_device(mtd); 151 file->private_data = NULL; 152 kfree(mfi); 153 154 return 0; 155 } /* mtd_close */ 156 157 /* FIXME: This _really_ needs to die. In 2.5, we should lock the 158 userspace buffer down and use it directly with readv/writev. 159 */ 160 #define MAX_KMALLOC_SIZE 0x20000 161 162 static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) 163 { 164 struct mtd_file_info *mfi = file->private_data; 165 struct mtd_info *mtd = mfi->mtd; 166 size_t retlen=0; 167 size_t total_retlen=0; 168 int ret=0; 169 int len; 170 char *kbuf; 171 172 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); 173 174 if (*ppos + count > mtd->size) 175 count = mtd->size - *ppos; 176 177 if (!count) 178 return 0; 179 180 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers 181 and pass them directly to the MTD functions */ 182 183 if (count > MAX_KMALLOC_SIZE) 184 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL); 185 else 186 kbuf=kmalloc(count, GFP_KERNEL); 187 188 if (!kbuf) 189 return -ENOMEM; 190 191 while (count) { 192 193 if (count > MAX_KMALLOC_SIZE) 194 len = MAX_KMALLOC_SIZE; 195 else 196 len = count; 197 198 switch (mfi->mode) { 199 case MTD_MODE_OTP_FACTORY: 200 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); 201 break; 202 case MTD_MODE_OTP_USER: 203 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 204 break; 205 case MTD_MODE_RAW: 206 { 207 struct mtd_oob_ops ops; 208 209 ops.mode = MTD_OOB_RAW; 210 ops.datbuf = kbuf; 211 ops.oobbuf = NULL; 212 ops.len = len; 213 214 ret = mtd->read_oob(mtd, *ppos, &ops); 215 retlen = ops.retlen; 216 break; 217 } 218 default: 219 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf); 220 } 221 /* Nand returns -EBADMSG on ecc errors, but it returns 222 * the data. For our userspace tools it is important 223 * to dump areas with ecc errors ! 224 * For kernel internal usage it also might return -EUCLEAN 225 * to signal the caller that a bitflip has occured and has 226 * been corrected by the ECC algorithm. 227 * Userspace software which accesses NAND this way 228 * must be aware of the fact that it deals with NAND 229 */ 230 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) { 231 *ppos += retlen; 232 if (copy_to_user(buf, kbuf, retlen)) { 233 kfree(kbuf); 234 return -EFAULT; 235 } 236 else 237 total_retlen += retlen; 238 239 count -= retlen; 240 buf += retlen; 241 if (retlen == 0) 242 count = 0; 243 } 244 else { 245 kfree(kbuf); 246 return ret; 247 } 248 249 } 250 251 kfree(kbuf); 252 return total_retlen; 253 } /* mtd_read */ 254 255 static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos) 256 { 257 struct mtd_file_info *mfi = file->private_data; 258 struct mtd_info *mtd = mfi->mtd; 259 char *kbuf; 260 size_t retlen; 261 size_t total_retlen=0; 262 int ret=0; 263 int len; 264 265 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n"); 266 267 if (*ppos == mtd->size) 268 return -ENOSPC; 269 270 if (*ppos + count > mtd->size) 271 count = mtd->size - *ppos; 272 273 if (!count) 274 return 0; 275 276 if (count > MAX_KMALLOC_SIZE) 277 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL); 278 else 279 kbuf=kmalloc(count, GFP_KERNEL); 280 281 if (!kbuf) 282 return -ENOMEM; 283 284 while (count) { 285 286 if (count > MAX_KMALLOC_SIZE) 287 len = MAX_KMALLOC_SIZE; 288 else 289 len = count; 290 291 if (copy_from_user(kbuf, buf, len)) { 292 kfree(kbuf); 293 return -EFAULT; 294 } 295 296 switch (mfi->mode) { 297 case MTD_MODE_OTP_FACTORY: 298 ret = -EROFS; 299 break; 300 case MTD_MODE_OTP_USER: 301 if (!mtd->write_user_prot_reg) { 302 ret = -EOPNOTSUPP; 303 break; 304 } 305 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 306 break; 307 308 case MTD_MODE_RAW: 309 { 310 struct mtd_oob_ops ops; 311 312 ops.mode = MTD_OOB_RAW; 313 ops.datbuf = kbuf; 314 ops.oobbuf = NULL; 315 ops.len = len; 316 317 ret = mtd->write_oob(mtd, *ppos, &ops); 318 retlen = ops.retlen; 319 break; 320 } 321 322 default: 323 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf); 324 } 325 if (!ret) { 326 *ppos += retlen; 327 total_retlen += retlen; 328 count -= retlen; 329 buf += retlen; 330 } 331 else { 332 kfree(kbuf); 333 return ret; 334 } 335 } 336 337 kfree(kbuf); 338 return total_retlen; 339 } /* mtd_write */ 340 341 /*====================================================================== 342 343 IOCTL calls for getting device parameters. 344 345 ======================================================================*/ 346 static void mtdchar_erase_callback (struct erase_info *instr) 347 { 348 wake_up((wait_queue_head_t *)instr->priv); 349 } 350 351 #ifdef CONFIG_HAVE_MTD_OTP 352 static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 353 { 354 struct mtd_info *mtd = mfi->mtd; 355 int ret = 0; 356 357 switch (mode) { 358 case MTD_OTP_FACTORY: 359 if (!mtd->read_fact_prot_reg) 360 ret = -EOPNOTSUPP; 361 else 362 mfi->mode = MTD_MODE_OTP_FACTORY; 363 break; 364 case MTD_OTP_USER: 365 if (!mtd->read_fact_prot_reg) 366 ret = -EOPNOTSUPP; 367 else 368 mfi->mode = MTD_MODE_OTP_USER; 369 break; 370 default: 371 ret = -EINVAL; 372 case MTD_OTP_OFF: 373 break; 374 } 375 return ret; 376 } 377 #else 378 # define otp_select_filemode(f,m) -EOPNOTSUPP 379 #endif 380 381 static int mtd_ioctl(struct inode *inode, struct file *file, 382 u_int cmd, u_long arg) 383 { 384 struct mtd_file_info *mfi = file->private_data; 385 struct mtd_info *mtd = mfi->mtd; 386 void __user *argp = (void __user *)arg; 387 int ret = 0; 388 u_long size; 389 struct mtd_info_user info; 390 391 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); 392 393 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; 394 if (cmd & IOC_IN) { 395 if (!access_ok(VERIFY_READ, argp, size)) 396 return -EFAULT; 397 } 398 if (cmd & IOC_OUT) { 399 if (!access_ok(VERIFY_WRITE, argp, size)) 400 return -EFAULT; 401 } 402 403 switch (cmd) { 404 case MEMGETREGIONCOUNT: 405 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) 406 return -EFAULT; 407 break; 408 409 case MEMGETREGIONINFO: 410 { 411 uint32_t ur_idx; 412 struct mtd_erase_region_info *kr; 413 struct region_info_user *ur = (struct region_info_user *) argp; 414 415 if (get_user(ur_idx, &(ur->regionindex))) 416 return -EFAULT; 417 418 kr = &(mtd->eraseregions[ur_idx]); 419 420 if (put_user(kr->offset, &(ur->offset)) 421 || put_user(kr->erasesize, &(ur->erasesize)) 422 || put_user(kr->numblocks, &(ur->numblocks))) 423 return -EFAULT; 424 425 break; 426 } 427 428 case MEMGETINFO: 429 info.type = mtd->type; 430 info.flags = mtd->flags; 431 info.size = mtd->size; 432 info.erasesize = mtd->erasesize; 433 info.writesize = mtd->writesize; 434 info.oobsize = mtd->oobsize; 435 /* The below fields are obsolete */ 436 info.ecctype = -1; 437 info.eccsize = 0; 438 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 439 return -EFAULT; 440 break; 441 442 case MEMERASE: 443 { 444 struct erase_info *erase; 445 446 if(!(file->f_mode & FMODE_WRITE)) 447 return -EPERM; 448 449 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); 450 if (!erase) 451 ret = -ENOMEM; 452 else { 453 struct erase_info_user einfo; 454 455 wait_queue_head_t waitq; 456 DECLARE_WAITQUEUE(wait, current); 457 458 init_waitqueue_head(&waitq); 459 460 if (copy_from_user(&einfo, argp, 461 sizeof(struct erase_info_user))) { 462 kfree(erase); 463 return -EFAULT; 464 } 465 erase->addr = einfo.start; 466 erase->len = einfo.length; 467 erase->mtd = mtd; 468 erase->callback = mtdchar_erase_callback; 469 erase->priv = (unsigned long)&waitq; 470 471 /* 472 FIXME: Allow INTERRUPTIBLE. Which means 473 not having the wait_queue head on the stack. 474 475 If the wq_head is on the stack, and we 476 leave because we got interrupted, then the 477 wq_head is no longer there when the 478 callback routine tries to wake us up. 479 */ 480 ret = mtd->erase(mtd, erase); 481 if (!ret) { 482 set_current_state(TASK_UNINTERRUPTIBLE); 483 add_wait_queue(&waitq, &wait); 484 if (erase->state != MTD_ERASE_DONE && 485 erase->state != MTD_ERASE_FAILED) 486 schedule(); 487 remove_wait_queue(&waitq, &wait); 488 set_current_state(TASK_RUNNING); 489 490 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0; 491 } 492 kfree(erase); 493 } 494 break; 495 } 496 497 case MEMWRITEOOB: 498 { 499 struct mtd_oob_buf buf; 500 struct mtd_oob_ops ops; 501 struct mtd_oob_buf __user *user_buf = argp; 502 uint32_t retlen; 503 504 if(!(file->f_mode & FMODE_WRITE)) 505 return -EPERM; 506 507 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 508 return -EFAULT; 509 510 if (buf.length > 4096) 511 return -EINVAL; 512 513 if (!mtd->write_oob) 514 ret = -EOPNOTSUPP; 515 else 516 ret = access_ok(VERIFY_READ, buf.ptr, 517 buf.length) ? 0 : EFAULT; 518 519 if (ret) 520 return ret; 521 522 ops.ooblen = buf.length; 523 ops.ooboffs = buf.start & (mtd->oobsize - 1); 524 ops.datbuf = NULL; 525 ops.mode = MTD_OOB_PLACE; 526 527 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 528 return -EINVAL; 529 530 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL); 531 if (!ops.oobbuf) 532 return -ENOMEM; 533 534 if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) { 535 kfree(ops.oobbuf); 536 return -EFAULT; 537 } 538 539 buf.start &= ~(mtd->oobsize - 1); 540 ret = mtd->write_oob(mtd, buf.start, &ops); 541 542 if (ops.oobretlen > 0xFFFFFFFFU) 543 ret = -EOVERFLOW; 544 retlen = ops.oobretlen; 545 if (copy_to_user(&user_buf->length, &retlen, sizeof(buf.length))) 546 ret = -EFAULT; 547 548 kfree(ops.oobbuf); 549 break; 550 551 } 552 553 case MEMREADOOB: 554 { 555 struct mtd_oob_buf buf; 556 struct mtd_oob_ops ops; 557 558 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 559 return -EFAULT; 560 561 if (buf.length > 4096) 562 return -EINVAL; 563 564 if (!mtd->read_oob) 565 ret = -EOPNOTSUPP; 566 else 567 ret = access_ok(VERIFY_WRITE, buf.ptr, 568 buf.length) ? 0 : -EFAULT; 569 if (ret) 570 return ret; 571 572 ops.ooblen = buf.length; 573 ops.ooboffs = buf.start & (mtd->oobsize - 1); 574 ops.datbuf = NULL; 575 ops.mode = MTD_OOB_PLACE; 576 577 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 578 return -EINVAL; 579 580 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL); 581 if (!ops.oobbuf) 582 return -ENOMEM; 583 584 buf.start &= ~(mtd->oobsize - 1); 585 ret = mtd->read_oob(mtd, buf.start, &ops); 586 587 if (put_user(ops.oobretlen, (uint32_t __user *)argp)) 588 ret = -EFAULT; 589 else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf, 590 ops.oobretlen)) 591 ret = -EFAULT; 592 593 kfree(ops.oobbuf); 594 break; 595 } 596 597 case MEMLOCK: 598 { 599 struct erase_info_user einfo; 600 601 if (copy_from_user(&einfo, argp, sizeof(einfo))) 602 return -EFAULT; 603 604 if (!mtd->lock) 605 ret = -EOPNOTSUPP; 606 else 607 ret = mtd->lock(mtd, einfo.start, einfo.length); 608 break; 609 } 610 611 case MEMUNLOCK: 612 { 613 struct erase_info_user einfo; 614 615 if (copy_from_user(&einfo, argp, sizeof(einfo))) 616 return -EFAULT; 617 618 if (!mtd->unlock) 619 ret = -EOPNOTSUPP; 620 else 621 ret = mtd->unlock(mtd, einfo.start, einfo.length); 622 break; 623 } 624 625 /* Legacy interface */ 626 case MEMGETOOBSEL: 627 { 628 struct nand_oobinfo oi; 629 630 if (!mtd->ecclayout) 631 return -EOPNOTSUPP; 632 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos)) 633 return -EINVAL; 634 635 oi.useecc = MTD_NANDECC_AUTOPLACE; 636 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos)); 637 memcpy(&oi.oobfree, mtd->ecclayout->oobfree, 638 sizeof(oi.oobfree)); 639 oi.eccbytes = mtd->ecclayout->eccbytes; 640 641 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) 642 return -EFAULT; 643 break; 644 } 645 646 case MEMGETBADBLOCK: 647 { 648 loff_t offs; 649 650 if (copy_from_user(&offs, argp, sizeof(loff_t))) 651 return -EFAULT; 652 if (!mtd->block_isbad) 653 ret = -EOPNOTSUPP; 654 else 655 return mtd->block_isbad(mtd, offs); 656 break; 657 } 658 659 case MEMSETBADBLOCK: 660 { 661 loff_t offs; 662 663 if (copy_from_user(&offs, argp, sizeof(loff_t))) 664 return -EFAULT; 665 if (!mtd->block_markbad) 666 ret = -EOPNOTSUPP; 667 else 668 return mtd->block_markbad(mtd, offs); 669 break; 670 } 671 672 #ifdef CONFIG_HAVE_MTD_OTP 673 case OTPSELECT: 674 { 675 int mode; 676 if (copy_from_user(&mode, argp, sizeof(int))) 677 return -EFAULT; 678 679 mfi->mode = MTD_MODE_NORMAL; 680 681 ret = otp_select_filemode(mfi, mode); 682 683 file->f_pos = 0; 684 break; 685 } 686 687 case OTPGETREGIONCOUNT: 688 case OTPGETREGIONINFO: 689 { 690 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 691 if (!buf) 692 return -ENOMEM; 693 ret = -EOPNOTSUPP; 694 switch (mfi->mode) { 695 case MTD_MODE_OTP_FACTORY: 696 if (mtd->get_fact_prot_info) 697 ret = mtd->get_fact_prot_info(mtd, buf, 4096); 698 break; 699 case MTD_MODE_OTP_USER: 700 if (mtd->get_user_prot_info) 701 ret = mtd->get_user_prot_info(mtd, buf, 4096); 702 break; 703 default: 704 break; 705 } 706 if (ret >= 0) { 707 if (cmd == OTPGETREGIONCOUNT) { 708 int nbr = ret / sizeof(struct otp_info); 709 ret = copy_to_user(argp, &nbr, sizeof(int)); 710 } else 711 ret = copy_to_user(argp, buf, ret); 712 if (ret) 713 ret = -EFAULT; 714 } 715 kfree(buf); 716 break; 717 } 718 719 case OTPLOCK: 720 { 721 struct otp_info oinfo; 722 723 if (mfi->mode != MTD_MODE_OTP_USER) 724 return -EINVAL; 725 if (copy_from_user(&oinfo, argp, sizeof(oinfo))) 726 return -EFAULT; 727 if (!mtd->lock_user_prot_reg) 728 return -EOPNOTSUPP; 729 ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length); 730 break; 731 } 732 #endif 733 734 case ECCGETLAYOUT: 735 { 736 if (!mtd->ecclayout) 737 return -EOPNOTSUPP; 738 739 if (copy_to_user(argp, mtd->ecclayout, 740 sizeof(struct nand_ecclayout))) 741 return -EFAULT; 742 break; 743 } 744 745 case ECCGETSTATS: 746 { 747 if (copy_to_user(argp, &mtd->ecc_stats, 748 sizeof(struct mtd_ecc_stats))) 749 return -EFAULT; 750 break; 751 } 752 753 case MTDFILEMODE: 754 { 755 mfi->mode = 0; 756 757 switch(arg) { 758 case MTD_MODE_OTP_FACTORY: 759 case MTD_MODE_OTP_USER: 760 ret = otp_select_filemode(mfi, arg); 761 break; 762 763 case MTD_MODE_RAW: 764 if (!mtd->read_oob || !mtd->write_oob) 765 return -EOPNOTSUPP; 766 mfi->mode = arg; 767 768 case MTD_MODE_NORMAL: 769 break; 770 default: 771 ret = -EINVAL; 772 } 773 file->f_pos = 0; 774 break; 775 } 776 777 default: 778 ret = -ENOTTY; 779 } 780 781 return ret; 782 } /* memory_ioctl */ 783 784 static const struct file_operations mtd_fops = { 785 .owner = THIS_MODULE, 786 .llseek = mtd_lseek, 787 .read = mtd_read, 788 .write = mtd_write, 789 .ioctl = mtd_ioctl, 790 .open = mtd_open, 791 .release = mtd_close, 792 }; 793 794 static int __init init_mtdchar(void) 795 { 796 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) { 797 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n", 798 MTD_CHAR_MAJOR); 799 return -EAGAIN; 800 } 801 802 mtd_class = class_create(THIS_MODULE, "mtd"); 803 804 if (IS_ERR(mtd_class)) { 805 printk(KERN_ERR "Error creating mtd class.\n"); 806 unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); 807 return PTR_ERR(mtd_class); 808 } 809 810 register_mtd_user(¬ifier); 811 return 0; 812 } 813 814 static void __exit cleanup_mtdchar(void) 815 { 816 unregister_mtd_user(¬ifier); 817 class_destroy(mtd_class); 818 unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); 819 } 820 821 module_init(init_mtdchar); 822 module_exit(cleanup_mtdchar); 823 824 825 MODULE_LICENSE("GPL"); 826 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 827 MODULE_DESCRIPTION("Direct character-device access to MTD devices"); 828