1 /* 2 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 * 18 */ 19 20 #include <linux/device.h> 21 #include <linux/fs.h> 22 #include <linux/mm.h> 23 #include <linux/err.h> 24 #include <linux/init.h> 25 #include <linux/kernel.h> 26 #include <linux/module.h> 27 #include <linux/slab.h> 28 #include <linux/sched.h> 29 #include <linux/mutex.h> 30 #include <linux/backing-dev.h> 31 #include <linux/compat.h> 32 #include <linux/mount.h> 33 #include <linux/blkpg.h> 34 #include <linux/mtd/mtd.h> 35 #include <linux/mtd/partitions.h> 36 #include <linux/mtd/map.h> 37 38 #include <asm/uaccess.h> 39 40 #define MTD_INODE_FS_MAGIC 0x11307854 41 static DEFINE_MUTEX(mtd_mutex); 42 static struct vfsmount *mtd_inode_mnt __read_mostly; 43 44 /* 45 * Data structure to hold the pointer to the mtd device as well 46 * as mode information ofr various use cases. 47 */ 48 struct mtd_file_info { 49 struct mtd_info *mtd; 50 struct inode *ino; 51 enum mtd_file_modes mode; 52 }; 53 54 static loff_t mtd_lseek (struct file *file, loff_t offset, int orig) 55 { 56 struct mtd_file_info *mfi = file->private_data; 57 struct mtd_info *mtd = mfi->mtd; 58 59 switch (orig) { 60 case SEEK_SET: 61 break; 62 case SEEK_CUR: 63 offset += file->f_pos; 64 break; 65 case SEEK_END: 66 offset += mtd->size; 67 break; 68 default: 69 return -EINVAL; 70 } 71 72 if (offset >= 0 && offset <= mtd->size) 73 return file->f_pos = offset; 74 75 return -EINVAL; 76 } 77 78 79 80 static int mtd_open(struct inode *inode, struct file *file) 81 { 82 int minor = iminor(inode); 83 int devnum = minor >> 1; 84 int ret = 0; 85 struct mtd_info *mtd; 86 struct mtd_file_info *mfi; 87 struct inode *mtd_ino; 88 89 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); 90 91 /* You can't open the RO devices RW */ 92 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) 93 return -EACCES; 94 95 mutex_lock(&mtd_mutex); 96 mtd = get_mtd_device(NULL, devnum); 97 98 if (IS_ERR(mtd)) { 99 ret = PTR_ERR(mtd); 100 goto out; 101 } 102 103 if (mtd->type == MTD_ABSENT) { 104 put_mtd_device(mtd); 105 ret = -ENODEV; 106 goto out; 107 } 108 109 mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum); 110 if (!mtd_ino) { 111 put_mtd_device(mtd); 112 ret = -ENOMEM; 113 goto out; 114 } 115 if (mtd_ino->i_state & I_NEW) { 116 mtd_ino->i_private = mtd; 117 mtd_ino->i_mode = S_IFCHR; 118 mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info; 119 unlock_new_inode(mtd_ino); 120 } 121 file->f_mapping = mtd_ino->i_mapping; 122 123 /* You can't open it RW if it's not a writeable device */ 124 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { 125 iput(mtd_ino); 126 put_mtd_device(mtd); 127 ret = -EACCES; 128 goto out; 129 } 130 131 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 132 if (!mfi) { 133 iput(mtd_ino); 134 put_mtd_device(mtd); 135 ret = -ENOMEM; 136 goto out; 137 } 138 mfi->ino = mtd_ino; 139 mfi->mtd = mtd; 140 file->private_data = mfi; 141 142 out: 143 mutex_unlock(&mtd_mutex); 144 return ret; 145 } /* mtd_open */ 146 147 /*====================================================================*/ 148 149 static int mtd_close(struct inode *inode, struct file *file) 150 { 151 struct mtd_file_info *mfi = file->private_data; 152 struct mtd_info *mtd = mfi->mtd; 153 154 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); 155 156 /* Only sync if opened RW */ 157 if ((file->f_mode & FMODE_WRITE) && mtd->sync) 158 mtd->sync(mtd); 159 160 iput(mfi->ino); 161 162 put_mtd_device(mtd); 163 file->private_data = NULL; 164 kfree(mfi); 165 166 return 0; 167 } /* mtd_close */ 168 169 /* Back in June 2001, dwmw2 wrote: 170 * 171 * FIXME: This _really_ needs to die. In 2.5, we should lock the 172 * userspace buffer down and use it directly with readv/writev. 173 * 174 * The implementation below, using mtd_kmalloc_up_to, mitigates 175 * allocation failures when the system is under low-memory situations 176 * or if memory is highly fragmented at the cost of reducing the 177 * performance of the requested transfer due to a smaller buffer size. 178 * 179 * A more complex but more memory-efficient implementation based on 180 * get_user_pages and iovecs to cover extents of those pages is a 181 * longer-term goal, as intimated by dwmw2 above. However, for the 182 * write case, this requires yet more complex head and tail transfer 183 * handling when those head and tail offsets and sizes are such that 184 * alignment requirements are not met in the NAND subdriver. 185 */ 186 187 static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) 188 { 189 struct mtd_file_info *mfi = file->private_data; 190 struct mtd_info *mtd = mfi->mtd; 191 size_t retlen=0; 192 size_t total_retlen=0; 193 int ret=0; 194 int len; 195 size_t size = count; 196 char *kbuf; 197 198 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); 199 200 if (*ppos + count > mtd->size) 201 count = mtd->size - *ppos; 202 203 if (!count) 204 return 0; 205 206 kbuf = mtd_kmalloc_up_to(mtd, &size); 207 if (!kbuf) 208 return -ENOMEM; 209 210 while (count) { 211 len = min_t(size_t, count, size); 212 213 switch (mfi->mode) { 214 case MTD_MODE_OTP_FACTORY: 215 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); 216 break; 217 case MTD_MODE_OTP_USER: 218 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 219 break; 220 case MTD_MODE_RAW: 221 { 222 struct mtd_oob_ops ops; 223 224 ops.mode = MTD_OOB_RAW; 225 ops.datbuf = kbuf; 226 ops.oobbuf = NULL; 227 ops.len = len; 228 229 ret = mtd->read_oob(mtd, *ppos, &ops); 230 retlen = ops.retlen; 231 break; 232 } 233 default: 234 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf); 235 } 236 /* Nand returns -EBADMSG on ecc errors, but it returns 237 * the data. For our userspace tools it is important 238 * to dump areas with ecc errors ! 239 * For kernel internal usage it also might return -EUCLEAN 240 * to signal the caller that a bitflip has occurred and has 241 * been corrected by the ECC algorithm. 242 * Userspace software which accesses NAND this way 243 * must be aware of the fact that it deals with NAND 244 */ 245 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) { 246 *ppos += retlen; 247 if (copy_to_user(buf, kbuf, retlen)) { 248 kfree(kbuf); 249 return -EFAULT; 250 } 251 else 252 total_retlen += retlen; 253 254 count -= retlen; 255 buf += retlen; 256 if (retlen == 0) 257 count = 0; 258 } 259 else { 260 kfree(kbuf); 261 return ret; 262 } 263 264 } 265 266 kfree(kbuf); 267 return total_retlen; 268 } /* mtd_read */ 269 270 static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos) 271 { 272 struct mtd_file_info *mfi = file->private_data; 273 struct mtd_info *mtd = mfi->mtd; 274 size_t size = count; 275 char *kbuf; 276 size_t retlen; 277 size_t total_retlen=0; 278 int ret=0; 279 int len; 280 281 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n"); 282 283 if (*ppos == mtd->size) 284 return -ENOSPC; 285 286 if (*ppos + count > mtd->size) 287 count = mtd->size - *ppos; 288 289 if (!count) 290 return 0; 291 292 kbuf = mtd_kmalloc_up_to(mtd, &size); 293 if (!kbuf) 294 return -ENOMEM; 295 296 while (count) { 297 len = min_t(size_t, count, size); 298 299 if (copy_from_user(kbuf, buf, len)) { 300 kfree(kbuf); 301 return -EFAULT; 302 } 303 304 switch (mfi->mode) { 305 case MTD_MODE_OTP_FACTORY: 306 ret = -EROFS; 307 break; 308 case MTD_MODE_OTP_USER: 309 if (!mtd->write_user_prot_reg) { 310 ret = -EOPNOTSUPP; 311 break; 312 } 313 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 314 break; 315 316 case MTD_MODE_RAW: 317 { 318 struct mtd_oob_ops ops; 319 320 ops.mode = MTD_OOB_RAW; 321 ops.datbuf = kbuf; 322 ops.oobbuf = NULL; 323 ops.len = len; 324 325 ret = mtd->write_oob(mtd, *ppos, &ops); 326 retlen = ops.retlen; 327 break; 328 } 329 330 default: 331 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf); 332 } 333 if (!ret) { 334 *ppos += retlen; 335 total_retlen += retlen; 336 count -= retlen; 337 buf += retlen; 338 } 339 else { 340 kfree(kbuf); 341 return ret; 342 } 343 } 344 345 kfree(kbuf); 346 return total_retlen; 347 } /* mtd_write */ 348 349 /*====================================================================== 350 351 IOCTL calls for getting device parameters. 352 353 ======================================================================*/ 354 static void mtdchar_erase_callback (struct erase_info *instr) 355 { 356 wake_up((wait_queue_head_t *)instr->priv); 357 } 358 359 #ifdef CONFIG_HAVE_MTD_OTP 360 static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 361 { 362 struct mtd_info *mtd = mfi->mtd; 363 int ret = 0; 364 365 switch (mode) { 366 case MTD_OTP_FACTORY: 367 if (!mtd->read_fact_prot_reg) 368 ret = -EOPNOTSUPP; 369 else 370 mfi->mode = MTD_MODE_OTP_FACTORY; 371 break; 372 case MTD_OTP_USER: 373 if (!mtd->read_fact_prot_reg) 374 ret = -EOPNOTSUPP; 375 else 376 mfi->mode = MTD_MODE_OTP_USER; 377 break; 378 default: 379 ret = -EINVAL; 380 case MTD_OTP_OFF: 381 break; 382 } 383 return ret; 384 } 385 #else 386 # define otp_select_filemode(f,m) -EOPNOTSUPP 387 #endif 388 389 static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd, 390 uint64_t start, uint32_t length, void __user *ptr, 391 uint32_t __user *retp) 392 { 393 struct mtd_oob_ops ops; 394 uint32_t retlen; 395 int ret = 0; 396 397 if (!(file->f_mode & FMODE_WRITE)) 398 return -EPERM; 399 400 if (length > 4096) 401 return -EINVAL; 402 403 if (!mtd->write_oob) 404 ret = -EOPNOTSUPP; 405 else 406 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT; 407 408 if (ret) 409 return ret; 410 411 ops.ooblen = length; 412 ops.ooboffs = start & (mtd->oobsize - 1); 413 ops.datbuf = NULL; 414 ops.mode = MTD_OOB_PLACE; 415 416 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 417 return -EINVAL; 418 419 ops.oobbuf = memdup_user(ptr, length); 420 if (IS_ERR(ops.oobbuf)) 421 return PTR_ERR(ops.oobbuf); 422 423 start &= ~((uint64_t)mtd->oobsize - 1); 424 ret = mtd->write_oob(mtd, start, &ops); 425 426 if (ops.oobretlen > 0xFFFFFFFFU) 427 ret = -EOVERFLOW; 428 retlen = ops.oobretlen; 429 if (copy_to_user(retp, &retlen, sizeof(length))) 430 ret = -EFAULT; 431 432 kfree(ops.oobbuf); 433 return ret; 434 } 435 436 static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start, 437 uint32_t length, void __user *ptr, uint32_t __user *retp) 438 { 439 struct mtd_oob_ops ops; 440 int ret = 0; 441 442 if (length > 4096) 443 return -EINVAL; 444 445 if (!mtd->read_oob) 446 ret = -EOPNOTSUPP; 447 else 448 ret = access_ok(VERIFY_WRITE, ptr, 449 length) ? 0 : -EFAULT; 450 if (ret) 451 return ret; 452 453 ops.ooblen = length; 454 ops.ooboffs = start & (mtd->oobsize - 1); 455 ops.datbuf = NULL; 456 ops.mode = MTD_OOB_PLACE; 457 458 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 459 return -EINVAL; 460 461 ops.oobbuf = kmalloc(length, GFP_KERNEL); 462 if (!ops.oobbuf) 463 return -ENOMEM; 464 465 start &= ~((uint64_t)mtd->oobsize - 1); 466 ret = mtd->read_oob(mtd, start, &ops); 467 468 if (put_user(ops.oobretlen, retp)) 469 ret = -EFAULT; 470 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf, 471 ops.oobretlen)) 472 ret = -EFAULT; 473 474 kfree(ops.oobbuf); 475 return ret; 476 } 477 478 /* 479 * Copies (and truncates, if necessary) data from the larger struct, 480 * nand_ecclayout, to the smaller, deprecated layout struct, 481 * nand_ecclayout_user. This is necessary only to suppport the deprecated 482 * API ioctl ECCGETLAYOUT while allowing all new functionality to use 483 * nand_ecclayout flexibly (i.e. the struct may change size in new 484 * releases without requiring major rewrites). 485 */ 486 static int shrink_ecclayout(const struct nand_ecclayout *from, 487 struct nand_ecclayout_user *to) 488 { 489 int i; 490 491 if (!from || !to) 492 return -EINVAL; 493 494 memset(to, 0, sizeof(*to)); 495 496 to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES); 497 for (i = 0; i < to->eccbytes; i++) 498 to->eccpos[i] = from->eccpos[i]; 499 500 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) { 501 if (from->oobfree[i].length == 0 && 502 from->oobfree[i].offset == 0) 503 break; 504 to->oobavail += from->oobfree[i].length; 505 to->oobfree[i] = from->oobfree[i]; 506 } 507 508 return 0; 509 } 510 511 static int mtd_blkpg_ioctl(struct mtd_info *mtd, 512 struct blkpg_ioctl_arg __user *arg) 513 { 514 struct blkpg_ioctl_arg a; 515 struct blkpg_partition p; 516 517 if (!capable(CAP_SYS_ADMIN)) 518 return -EPERM; 519 520 if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg))) 521 return -EFAULT; 522 523 if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition))) 524 return -EFAULT; 525 526 switch (a.op) { 527 case BLKPG_ADD_PARTITION: 528 529 /* Only master mtd device must be used to add partitions */ 530 if (mtd_is_partition(mtd)) 531 return -EINVAL; 532 533 return mtd_add_partition(mtd, p.devname, p.start, p.length); 534 535 case BLKPG_DEL_PARTITION: 536 537 if (p.pno < 0) 538 return -EINVAL; 539 540 return mtd_del_partition(mtd, p.pno); 541 542 default: 543 return -EINVAL; 544 } 545 } 546 547 static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) 548 { 549 struct mtd_file_info *mfi = file->private_data; 550 struct mtd_info *mtd = mfi->mtd; 551 void __user *argp = (void __user *)arg; 552 int ret = 0; 553 u_long size; 554 struct mtd_info_user info; 555 556 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); 557 558 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; 559 if (cmd & IOC_IN) { 560 if (!access_ok(VERIFY_READ, argp, size)) 561 return -EFAULT; 562 } 563 if (cmd & IOC_OUT) { 564 if (!access_ok(VERIFY_WRITE, argp, size)) 565 return -EFAULT; 566 } 567 568 switch (cmd) { 569 case MEMGETREGIONCOUNT: 570 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) 571 return -EFAULT; 572 break; 573 574 case MEMGETREGIONINFO: 575 { 576 uint32_t ur_idx; 577 struct mtd_erase_region_info *kr; 578 struct region_info_user __user *ur = argp; 579 580 if (get_user(ur_idx, &(ur->regionindex))) 581 return -EFAULT; 582 583 if (ur_idx >= mtd->numeraseregions) 584 return -EINVAL; 585 586 kr = &(mtd->eraseregions[ur_idx]); 587 588 if (put_user(kr->offset, &(ur->offset)) 589 || put_user(kr->erasesize, &(ur->erasesize)) 590 || put_user(kr->numblocks, &(ur->numblocks))) 591 return -EFAULT; 592 593 break; 594 } 595 596 case MEMGETINFO: 597 memset(&info, 0, sizeof(info)); 598 info.type = mtd->type; 599 info.flags = mtd->flags; 600 info.size = mtd->size; 601 info.erasesize = mtd->erasesize; 602 info.writesize = mtd->writesize; 603 info.oobsize = mtd->oobsize; 604 /* The below fields are obsolete */ 605 info.ecctype = -1; 606 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 607 return -EFAULT; 608 break; 609 610 case MEMERASE: 611 case MEMERASE64: 612 { 613 struct erase_info *erase; 614 615 if(!(file->f_mode & FMODE_WRITE)) 616 return -EPERM; 617 618 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); 619 if (!erase) 620 ret = -ENOMEM; 621 else { 622 wait_queue_head_t waitq; 623 DECLARE_WAITQUEUE(wait, current); 624 625 init_waitqueue_head(&waitq); 626 627 if (cmd == MEMERASE64) { 628 struct erase_info_user64 einfo64; 629 630 if (copy_from_user(&einfo64, argp, 631 sizeof(struct erase_info_user64))) { 632 kfree(erase); 633 return -EFAULT; 634 } 635 erase->addr = einfo64.start; 636 erase->len = einfo64.length; 637 } else { 638 struct erase_info_user einfo32; 639 640 if (copy_from_user(&einfo32, argp, 641 sizeof(struct erase_info_user))) { 642 kfree(erase); 643 return -EFAULT; 644 } 645 erase->addr = einfo32.start; 646 erase->len = einfo32.length; 647 } 648 erase->mtd = mtd; 649 erase->callback = mtdchar_erase_callback; 650 erase->priv = (unsigned long)&waitq; 651 652 /* 653 FIXME: Allow INTERRUPTIBLE. Which means 654 not having the wait_queue head on the stack. 655 656 If the wq_head is on the stack, and we 657 leave because we got interrupted, then the 658 wq_head is no longer there when the 659 callback routine tries to wake us up. 660 */ 661 ret = mtd->erase(mtd, erase); 662 if (!ret) { 663 set_current_state(TASK_UNINTERRUPTIBLE); 664 add_wait_queue(&waitq, &wait); 665 if (erase->state != MTD_ERASE_DONE && 666 erase->state != MTD_ERASE_FAILED) 667 schedule(); 668 remove_wait_queue(&waitq, &wait); 669 set_current_state(TASK_RUNNING); 670 671 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0; 672 } 673 kfree(erase); 674 } 675 break; 676 } 677 678 case MEMWRITEOOB: 679 { 680 struct mtd_oob_buf buf; 681 struct mtd_oob_buf __user *buf_user = argp; 682 683 /* NOTE: writes return length to buf_user->length */ 684 if (copy_from_user(&buf, argp, sizeof(buf))) 685 ret = -EFAULT; 686 else 687 ret = mtd_do_writeoob(file, mtd, buf.start, buf.length, 688 buf.ptr, &buf_user->length); 689 break; 690 } 691 692 case MEMREADOOB: 693 { 694 struct mtd_oob_buf buf; 695 struct mtd_oob_buf __user *buf_user = argp; 696 697 /* NOTE: writes return length to buf_user->start */ 698 if (copy_from_user(&buf, argp, sizeof(buf))) 699 ret = -EFAULT; 700 else 701 ret = mtd_do_readoob(mtd, buf.start, buf.length, 702 buf.ptr, &buf_user->start); 703 break; 704 } 705 706 case MEMWRITEOOB64: 707 { 708 struct mtd_oob_buf64 buf; 709 struct mtd_oob_buf64 __user *buf_user = argp; 710 711 if (copy_from_user(&buf, argp, sizeof(buf))) 712 ret = -EFAULT; 713 else 714 ret = mtd_do_writeoob(file, mtd, buf.start, buf.length, 715 (void __user *)(uintptr_t)buf.usr_ptr, 716 &buf_user->length); 717 break; 718 } 719 720 case MEMREADOOB64: 721 { 722 struct mtd_oob_buf64 buf; 723 struct mtd_oob_buf64 __user *buf_user = argp; 724 725 if (copy_from_user(&buf, argp, sizeof(buf))) 726 ret = -EFAULT; 727 else 728 ret = mtd_do_readoob(mtd, buf.start, buf.length, 729 (void __user *)(uintptr_t)buf.usr_ptr, 730 &buf_user->length); 731 break; 732 } 733 734 case MEMLOCK: 735 { 736 struct erase_info_user einfo; 737 738 if (copy_from_user(&einfo, argp, sizeof(einfo))) 739 return -EFAULT; 740 741 if (!mtd->lock) 742 ret = -EOPNOTSUPP; 743 else 744 ret = mtd->lock(mtd, einfo.start, einfo.length); 745 break; 746 } 747 748 case MEMUNLOCK: 749 { 750 struct erase_info_user einfo; 751 752 if (copy_from_user(&einfo, argp, sizeof(einfo))) 753 return -EFAULT; 754 755 if (!mtd->unlock) 756 ret = -EOPNOTSUPP; 757 else 758 ret = mtd->unlock(mtd, einfo.start, einfo.length); 759 break; 760 } 761 762 case MEMISLOCKED: 763 { 764 struct erase_info_user einfo; 765 766 if (copy_from_user(&einfo, argp, sizeof(einfo))) 767 return -EFAULT; 768 769 if (!mtd->is_locked) 770 ret = -EOPNOTSUPP; 771 else 772 ret = mtd->is_locked(mtd, einfo.start, einfo.length); 773 break; 774 } 775 776 /* Legacy interface */ 777 case MEMGETOOBSEL: 778 { 779 struct nand_oobinfo oi; 780 781 if (!mtd->ecclayout) 782 return -EOPNOTSUPP; 783 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos)) 784 return -EINVAL; 785 786 oi.useecc = MTD_NANDECC_AUTOPLACE; 787 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos)); 788 memcpy(&oi.oobfree, mtd->ecclayout->oobfree, 789 sizeof(oi.oobfree)); 790 oi.eccbytes = mtd->ecclayout->eccbytes; 791 792 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) 793 return -EFAULT; 794 break; 795 } 796 797 case MEMGETBADBLOCK: 798 { 799 loff_t offs; 800 801 if (copy_from_user(&offs, argp, sizeof(loff_t))) 802 return -EFAULT; 803 if (!mtd->block_isbad) 804 ret = -EOPNOTSUPP; 805 else 806 return mtd->block_isbad(mtd, offs); 807 break; 808 } 809 810 case MEMSETBADBLOCK: 811 { 812 loff_t offs; 813 814 if (copy_from_user(&offs, argp, sizeof(loff_t))) 815 return -EFAULT; 816 if (!mtd->block_markbad) 817 ret = -EOPNOTSUPP; 818 else 819 return mtd->block_markbad(mtd, offs); 820 break; 821 } 822 823 #ifdef CONFIG_HAVE_MTD_OTP 824 case OTPSELECT: 825 { 826 int mode; 827 if (copy_from_user(&mode, argp, sizeof(int))) 828 return -EFAULT; 829 830 mfi->mode = MTD_MODE_NORMAL; 831 832 ret = otp_select_filemode(mfi, mode); 833 834 file->f_pos = 0; 835 break; 836 } 837 838 case OTPGETREGIONCOUNT: 839 case OTPGETREGIONINFO: 840 { 841 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 842 if (!buf) 843 return -ENOMEM; 844 ret = -EOPNOTSUPP; 845 switch (mfi->mode) { 846 case MTD_MODE_OTP_FACTORY: 847 if (mtd->get_fact_prot_info) 848 ret = mtd->get_fact_prot_info(mtd, buf, 4096); 849 break; 850 case MTD_MODE_OTP_USER: 851 if (mtd->get_user_prot_info) 852 ret = mtd->get_user_prot_info(mtd, buf, 4096); 853 break; 854 default: 855 break; 856 } 857 if (ret >= 0) { 858 if (cmd == OTPGETREGIONCOUNT) { 859 int nbr = ret / sizeof(struct otp_info); 860 ret = copy_to_user(argp, &nbr, sizeof(int)); 861 } else 862 ret = copy_to_user(argp, buf, ret); 863 if (ret) 864 ret = -EFAULT; 865 } 866 kfree(buf); 867 break; 868 } 869 870 case OTPLOCK: 871 { 872 struct otp_info oinfo; 873 874 if (mfi->mode != MTD_MODE_OTP_USER) 875 return -EINVAL; 876 if (copy_from_user(&oinfo, argp, sizeof(oinfo))) 877 return -EFAULT; 878 if (!mtd->lock_user_prot_reg) 879 return -EOPNOTSUPP; 880 ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length); 881 break; 882 } 883 #endif 884 885 /* This ioctl is being deprecated - it truncates the ecc layout */ 886 case ECCGETLAYOUT: 887 { 888 struct nand_ecclayout_user *usrlay; 889 890 if (!mtd->ecclayout) 891 return -EOPNOTSUPP; 892 893 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL); 894 if (!usrlay) 895 return -ENOMEM; 896 897 shrink_ecclayout(mtd->ecclayout, usrlay); 898 899 if (copy_to_user(argp, usrlay, sizeof(*usrlay))) 900 ret = -EFAULT; 901 kfree(usrlay); 902 break; 903 } 904 905 case ECCGETSTATS: 906 { 907 if (copy_to_user(argp, &mtd->ecc_stats, 908 sizeof(struct mtd_ecc_stats))) 909 return -EFAULT; 910 break; 911 } 912 913 case MTDFILEMODE: 914 { 915 mfi->mode = 0; 916 917 switch(arg) { 918 case MTD_MODE_OTP_FACTORY: 919 case MTD_MODE_OTP_USER: 920 ret = otp_select_filemode(mfi, arg); 921 break; 922 923 case MTD_MODE_RAW: 924 if (!mtd->read_oob || !mtd->write_oob) 925 return -EOPNOTSUPP; 926 mfi->mode = arg; 927 928 case MTD_MODE_NORMAL: 929 break; 930 default: 931 ret = -EINVAL; 932 } 933 file->f_pos = 0; 934 break; 935 } 936 937 case BLKPG: 938 { 939 ret = mtd_blkpg_ioctl(mtd, 940 (struct blkpg_ioctl_arg __user *)arg); 941 break; 942 } 943 944 case BLKRRPART: 945 { 946 /* No reread partition feature. Just return ok */ 947 ret = 0; 948 break; 949 } 950 951 default: 952 ret = -ENOTTY; 953 } 954 955 return ret; 956 } /* memory_ioctl */ 957 958 static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) 959 { 960 int ret; 961 962 mutex_lock(&mtd_mutex); 963 ret = mtd_ioctl(file, cmd, arg); 964 mutex_unlock(&mtd_mutex); 965 966 return ret; 967 } 968 969 #ifdef CONFIG_COMPAT 970 971 struct mtd_oob_buf32 { 972 u_int32_t start; 973 u_int32_t length; 974 compat_caddr_t ptr; /* unsigned char* */ 975 }; 976 977 #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32) 978 #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32) 979 980 static long mtd_compat_ioctl(struct file *file, unsigned int cmd, 981 unsigned long arg) 982 { 983 struct mtd_file_info *mfi = file->private_data; 984 struct mtd_info *mtd = mfi->mtd; 985 void __user *argp = compat_ptr(arg); 986 int ret = 0; 987 988 mutex_lock(&mtd_mutex); 989 990 switch (cmd) { 991 case MEMWRITEOOB32: 992 { 993 struct mtd_oob_buf32 buf; 994 struct mtd_oob_buf32 __user *buf_user = argp; 995 996 if (copy_from_user(&buf, argp, sizeof(buf))) 997 ret = -EFAULT; 998 else 999 ret = mtd_do_writeoob(file, mtd, buf.start, 1000 buf.length, compat_ptr(buf.ptr), 1001 &buf_user->length); 1002 break; 1003 } 1004 1005 case MEMREADOOB32: 1006 { 1007 struct mtd_oob_buf32 buf; 1008 struct mtd_oob_buf32 __user *buf_user = argp; 1009 1010 /* NOTE: writes return length to buf->start */ 1011 if (copy_from_user(&buf, argp, sizeof(buf))) 1012 ret = -EFAULT; 1013 else 1014 ret = mtd_do_readoob(mtd, buf.start, 1015 buf.length, compat_ptr(buf.ptr), 1016 &buf_user->start); 1017 break; 1018 } 1019 default: 1020 ret = mtd_ioctl(file, cmd, (unsigned long)argp); 1021 } 1022 1023 mutex_unlock(&mtd_mutex); 1024 1025 return ret; 1026 } 1027 1028 #endif /* CONFIG_COMPAT */ 1029 1030 /* 1031 * try to determine where a shared mapping can be made 1032 * - only supported for NOMMU at the moment (MMU can't doesn't copy private 1033 * mappings) 1034 */ 1035 #ifndef CONFIG_MMU 1036 static unsigned long mtd_get_unmapped_area(struct file *file, 1037 unsigned long addr, 1038 unsigned long len, 1039 unsigned long pgoff, 1040 unsigned long flags) 1041 { 1042 struct mtd_file_info *mfi = file->private_data; 1043 struct mtd_info *mtd = mfi->mtd; 1044 1045 if (mtd->get_unmapped_area) { 1046 unsigned long offset; 1047 1048 if (addr != 0) 1049 return (unsigned long) -EINVAL; 1050 1051 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) 1052 return (unsigned long) -EINVAL; 1053 1054 offset = pgoff << PAGE_SHIFT; 1055 if (offset > mtd->size - len) 1056 return (unsigned long) -EINVAL; 1057 1058 return mtd->get_unmapped_area(mtd, len, offset, flags); 1059 } 1060 1061 /* can't map directly */ 1062 return (unsigned long) -ENOSYS; 1063 } 1064 #endif 1065 1066 /* 1067 * set up a mapping for shared memory segments 1068 */ 1069 static int mtd_mmap(struct file *file, struct vm_area_struct *vma) 1070 { 1071 #ifdef CONFIG_MMU 1072 struct mtd_file_info *mfi = file->private_data; 1073 struct mtd_info *mtd = mfi->mtd; 1074 struct map_info *map = mtd->priv; 1075 unsigned long start; 1076 unsigned long off; 1077 u32 len; 1078 1079 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) { 1080 off = vma->vm_pgoff << PAGE_SHIFT; 1081 start = map->phys; 1082 len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); 1083 start &= PAGE_MASK; 1084 if ((vma->vm_end - vma->vm_start + off) > len) 1085 return -EINVAL; 1086 1087 off += start; 1088 vma->vm_pgoff = off >> PAGE_SHIFT; 1089 vma->vm_flags |= VM_IO | VM_RESERVED; 1090 1091 #ifdef pgprot_noncached 1092 if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) 1093 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1094 #endif 1095 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, 1096 vma->vm_end - vma->vm_start, 1097 vma->vm_page_prot)) 1098 return -EAGAIN; 1099 1100 return 0; 1101 } 1102 return -ENOSYS; 1103 #else 1104 return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS; 1105 #endif 1106 } 1107 1108 static const struct file_operations mtd_fops = { 1109 .owner = THIS_MODULE, 1110 .llseek = mtd_lseek, 1111 .read = mtd_read, 1112 .write = mtd_write, 1113 .unlocked_ioctl = mtd_unlocked_ioctl, 1114 #ifdef CONFIG_COMPAT 1115 .compat_ioctl = mtd_compat_ioctl, 1116 #endif 1117 .open = mtd_open, 1118 .release = mtd_close, 1119 .mmap = mtd_mmap, 1120 #ifndef CONFIG_MMU 1121 .get_unmapped_area = mtd_get_unmapped_area, 1122 #endif 1123 }; 1124 1125 static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type, 1126 int flags, const char *dev_name, void *data) 1127 { 1128 return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC); 1129 } 1130 1131 static struct file_system_type mtd_inodefs_type = { 1132 .name = "mtd_inodefs", 1133 .mount = mtd_inodefs_mount, 1134 .kill_sb = kill_anon_super, 1135 }; 1136 1137 static void mtdchar_notify_add(struct mtd_info *mtd) 1138 { 1139 } 1140 1141 static void mtdchar_notify_remove(struct mtd_info *mtd) 1142 { 1143 struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index); 1144 1145 if (mtd_ino) { 1146 /* Destroy the inode if it exists */ 1147 mtd_ino->i_nlink = 0; 1148 iput(mtd_ino); 1149 } 1150 } 1151 1152 static struct mtd_notifier mtdchar_notifier = { 1153 .add = mtdchar_notify_add, 1154 .remove = mtdchar_notify_remove, 1155 }; 1156 1157 static int __init init_mtdchar(void) 1158 { 1159 int ret; 1160 1161 ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, 1162 "mtd", &mtd_fops); 1163 if (ret < 0) { 1164 pr_notice("Can't allocate major number %d for " 1165 "Memory Technology Devices.\n", MTD_CHAR_MAJOR); 1166 return ret; 1167 } 1168 1169 ret = register_filesystem(&mtd_inodefs_type); 1170 if (ret) { 1171 pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret); 1172 goto err_unregister_chdev; 1173 } 1174 1175 mtd_inode_mnt = kern_mount(&mtd_inodefs_type); 1176 if (IS_ERR(mtd_inode_mnt)) { 1177 ret = PTR_ERR(mtd_inode_mnt); 1178 pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret); 1179 goto err_unregister_filesystem; 1180 } 1181 register_mtd_user(&mtdchar_notifier); 1182 1183 return ret; 1184 1185 err_unregister_filesystem: 1186 unregister_filesystem(&mtd_inodefs_type); 1187 err_unregister_chdev: 1188 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1189 return ret; 1190 } 1191 1192 static void __exit cleanup_mtdchar(void) 1193 { 1194 unregister_mtd_user(&mtdchar_notifier); 1195 mntput(mtd_inode_mnt); 1196 unregister_filesystem(&mtd_inodefs_type); 1197 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1198 } 1199 1200 module_init(init_mtdchar); 1201 module_exit(cleanup_mtdchar); 1202 1203 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); 1204 1205 MODULE_LICENSE("GPL"); 1206 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 1207 MODULE_DESCRIPTION("Direct character-device access to MTD devices"); 1208 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); 1209