1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> 4 */ 5 6 #include <linux/device.h> 7 #include <linux/fs.h> 8 #include <linux/mm.h> 9 #include <linux/err.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/sched.h> 15 #include <linux/mutex.h> 16 #include <linux/backing-dev.h> 17 #include <linux/compat.h> 18 #include <linux/mount.h> 19 #include <linux/blkpg.h> 20 #include <linux/magic.h> 21 #include <linux/major.h> 22 #include <linux/mtd/mtd.h> 23 #include <linux/mtd/partitions.h> 24 #include <linux/mtd/map.h> 25 26 #include <linux/uaccess.h> 27 28 #include "mtdcore.h" 29 30 static DEFINE_MUTEX(mtd_mutex); 31 32 /* 33 * Data structure to hold the pointer to the mtd device as well 34 * as mode information of various use cases. 35 */ 36 struct mtd_file_info { 37 struct mtd_info *mtd; 38 enum mtd_file_modes mode; 39 }; 40 41 static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig) 42 { 43 struct mtd_file_info *mfi = file->private_data; 44 return fixed_size_llseek(file, offset, orig, mfi->mtd->size); 45 } 46 47 static int mtdchar_open(struct inode *inode, struct file *file) 48 { 49 int minor = iminor(inode); 50 int devnum = minor >> 1; 51 int ret = 0; 52 struct mtd_info *mtd; 53 struct mtd_file_info *mfi; 54 55 pr_debug("MTD_open\n"); 56 57 /* You can't open the RO devices RW */ 58 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) 59 return -EACCES; 60 61 mutex_lock(&mtd_mutex); 62 mtd = get_mtd_device(NULL, devnum); 63 64 if (IS_ERR(mtd)) { 65 ret = PTR_ERR(mtd); 66 goto out; 67 } 68 69 if (mtd->type == MTD_ABSENT) { 70 ret = -ENODEV; 71 goto out1; 72 } 73 74 /* You can't open it RW if it's not a writeable device */ 75 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { 76 ret = -EACCES; 77 goto out1; 78 } 79 80 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 81 if (!mfi) { 82 ret = -ENOMEM; 83 goto out1; 84 } 85 mfi->mtd = mtd; 86 file->private_data = mfi; 87 mutex_unlock(&mtd_mutex); 88 return 0; 89 90 out1: 91 put_mtd_device(mtd); 92 out: 93 mutex_unlock(&mtd_mutex); 94 return ret; 95 } /* mtdchar_open */ 96 97 /*====================================================================*/ 98 99 static int mtdchar_close(struct inode *inode, struct file *file) 100 { 101 struct mtd_file_info *mfi = file->private_data; 102 struct mtd_info *mtd = mfi->mtd; 103 104 pr_debug("MTD_close\n"); 105 106 /* Only sync if opened RW */ 107 if ((file->f_mode & FMODE_WRITE)) 108 mtd_sync(mtd); 109 110 put_mtd_device(mtd); 111 file->private_data = NULL; 112 kfree(mfi); 113 114 return 0; 115 } /* mtdchar_close */ 116 117 /* Back in June 2001, dwmw2 wrote: 118 * 119 * FIXME: This _really_ needs to die. In 2.5, we should lock the 120 * userspace buffer down and use it directly with readv/writev. 121 * 122 * The implementation below, using mtd_kmalloc_up_to, mitigates 123 * allocation failures when the system is under low-memory situations 124 * or if memory is highly fragmented at the cost of reducing the 125 * performance of the requested transfer due to a smaller buffer size. 126 * 127 * A more complex but more memory-efficient implementation based on 128 * get_user_pages and iovecs to cover extents of those pages is a 129 * longer-term goal, as intimated by dwmw2 above. However, for the 130 * write case, this requires yet more complex head and tail transfer 131 * handling when those head and tail offsets and sizes are such that 132 * alignment requirements are not met in the NAND subdriver. 133 */ 134 135 static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count, 136 loff_t *ppos) 137 { 138 struct mtd_file_info *mfi = file->private_data; 139 struct mtd_info *mtd = mfi->mtd; 140 size_t retlen; 141 size_t total_retlen=0; 142 int ret=0; 143 int len; 144 size_t size = count; 145 char *kbuf; 146 147 pr_debug("MTD_read\n"); 148 149 if (*ppos + count > mtd->size) { 150 if (*ppos < mtd->size) 151 count = mtd->size - *ppos; 152 else 153 count = 0; 154 } 155 156 if (!count) 157 return 0; 158 159 kbuf = mtd_kmalloc_up_to(mtd, &size); 160 if (!kbuf) 161 return -ENOMEM; 162 163 while (count) { 164 len = min_t(size_t, count, size); 165 166 switch (mfi->mode) { 167 case MTD_FILE_MODE_OTP_FACTORY: 168 ret = mtd_read_fact_prot_reg(mtd, *ppos, len, 169 &retlen, kbuf); 170 break; 171 case MTD_FILE_MODE_OTP_USER: 172 ret = mtd_read_user_prot_reg(mtd, *ppos, len, 173 &retlen, kbuf); 174 break; 175 case MTD_FILE_MODE_RAW: 176 { 177 struct mtd_oob_ops ops = {}; 178 179 ops.mode = MTD_OPS_RAW; 180 ops.datbuf = kbuf; 181 ops.oobbuf = NULL; 182 ops.len = len; 183 184 ret = mtd_read_oob(mtd, *ppos, &ops); 185 retlen = ops.retlen; 186 break; 187 } 188 default: 189 ret = mtd_read(mtd, *ppos, len, &retlen, kbuf); 190 } 191 /* Nand returns -EBADMSG on ECC errors, but it returns 192 * the data. For our userspace tools it is important 193 * to dump areas with ECC errors! 194 * For kernel internal usage it also might return -EUCLEAN 195 * to signal the caller that a bitflip has occurred and has 196 * been corrected by the ECC algorithm. 197 * Userspace software which accesses NAND this way 198 * must be aware of the fact that it deals with NAND 199 */ 200 if (!ret || mtd_is_bitflip_or_eccerr(ret)) { 201 *ppos += retlen; 202 if (copy_to_user(buf, kbuf, retlen)) { 203 kfree(kbuf); 204 return -EFAULT; 205 } 206 else 207 total_retlen += retlen; 208 209 count -= retlen; 210 buf += retlen; 211 if (retlen == 0) 212 count = 0; 213 } 214 else { 215 kfree(kbuf); 216 return ret; 217 } 218 219 } 220 221 kfree(kbuf); 222 return total_retlen; 223 } /* mtdchar_read */ 224 225 static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count, 226 loff_t *ppos) 227 { 228 struct mtd_file_info *mfi = file->private_data; 229 struct mtd_info *mtd = mfi->mtd; 230 size_t size = count; 231 char *kbuf; 232 size_t retlen; 233 size_t total_retlen=0; 234 int ret=0; 235 int len; 236 237 pr_debug("MTD_write\n"); 238 239 if (*ppos >= mtd->size) 240 return -ENOSPC; 241 242 if (*ppos + count > mtd->size) 243 count = mtd->size - *ppos; 244 245 if (!count) 246 return 0; 247 248 kbuf = mtd_kmalloc_up_to(mtd, &size); 249 if (!kbuf) 250 return -ENOMEM; 251 252 while (count) { 253 len = min_t(size_t, count, size); 254 255 if (copy_from_user(kbuf, buf, len)) { 256 kfree(kbuf); 257 return -EFAULT; 258 } 259 260 switch (mfi->mode) { 261 case MTD_FILE_MODE_OTP_FACTORY: 262 ret = -EROFS; 263 break; 264 case MTD_FILE_MODE_OTP_USER: 265 ret = mtd_write_user_prot_reg(mtd, *ppos, len, 266 &retlen, kbuf); 267 break; 268 269 case MTD_FILE_MODE_RAW: 270 { 271 struct mtd_oob_ops ops = {}; 272 273 ops.mode = MTD_OPS_RAW; 274 ops.datbuf = kbuf; 275 ops.oobbuf = NULL; 276 ops.ooboffs = 0; 277 ops.len = len; 278 279 ret = mtd_write_oob(mtd, *ppos, &ops); 280 retlen = ops.retlen; 281 break; 282 } 283 284 default: 285 ret = mtd_write(mtd, *ppos, len, &retlen, kbuf); 286 } 287 288 /* 289 * Return -ENOSPC only if no data could be written at all. 290 * Otherwise just return the number of bytes that actually 291 * have been written. 292 */ 293 if ((ret == -ENOSPC) && (total_retlen)) 294 break; 295 296 if (!ret) { 297 *ppos += retlen; 298 total_retlen += retlen; 299 count -= retlen; 300 buf += retlen; 301 } 302 else { 303 kfree(kbuf); 304 return ret; 305 } 306 } 307 308 kfree(kbuf); 309 return total_retlen; 310 } /* mtdchar_write */ 311 312 /*====================================================================== 313 314 IOCTL calls for getting device parameters. 315 316 ======================================================================*/ 317 318 static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 319 { 320 struct mtd_info *mtd = mfi->mtd; 321 size_t retlen; 322 323 switch (mode) { 324 case MTD_OTP_FACTORY: 325 if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == 326 -EOPNOTSUPP) 327 return -EOPNOTSUPP; 328 329 mfi->mode = MTD_FILE_MODE_OTP_FACTORY; 330 break; 331 case MTD_OTP_USER: 332 if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) == 333 -EOPNOTSUPP) 334 return -EOPNOTSUPP; 335 336 mfi->mode = MTD_FILE_MODE_OTP_USER; 337 break; 338 case MTD_OTP_OFF: 339 mfi->mode = MTD_FILE_MODE_NORMAL; 340 break; 341 default: 342 return -EINVAL; 343 } 344 345 return 0; 346 } 347 348 static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, 349 uint64_t start, uint32_t length, void __user *ptr, 350 uint32_t __user *retp) 351 { 352 struct mtd_info *master = mtd_get_master(mtd); 353 struct mtd_file_info *mfi = file->private_data; 354 struct mtd_oob_ops ops = {}; 355 uint32_t retlen; 356 int ret = 0; 357 358 if (length > 4096) 359 return -EINVAL; 360 361 if (!master->_write_oob) 362 return -EOPNOTSUPP; 363 364 ops.ooblen = length; 365 ops.ooboffs = start & (mtd->writesize - 1); 366 ops.datbuf = NULL; 367 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 368 MTD_OPS_PLACE_OOB; 369 370 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 371 return -EINVAL; 372 373 ops.oobbuf = memdup_user(ptr, length); 374 if (IS_ERR(ops.oobbuf)) 375 return PTR_ERR(ops.oobbuf); 376 377 start &= ~((uint64_t)mtd->writesize - 1); 378 ret = mtd_write_oob(mtd, start, &ops); 379 380 if (ops.oobretlen > 0xFFFFFFFFU) 381 ret = -EOVERFLOW; 382 retlen = ops.oobretlen; 383 if (copy_to_user(retp, &retlen, sizeof(length))) 384 ret = -EFAULT; 385 386 kfree(ops.oobbuf); 387 return ret; 388 } 389 390 static int mtdchar_readoob(struct file *file, struct mtd_info *mtd, 391 uint64_t start, uint32_t length, void __user *ptr, 392 uint32_t __user *retp) 393 { 394 struct mtd_file_info *mfi = file->private_data; 395 struct mtd_oob_ops ops = {}; 396 int ret = 0; 397 398 if (length > 4096) 399 return -EINVAL; 400 401 ops.ooblen = length; 402 ops.ooboffs = start & (mtd->writesize - 1); 403 ops.datbuf = NULL; 404 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 405 MTD_OPS_PLACE_OOB; 406 407 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 408 return -EINVAL; 409 410 ops.oobbuf = kmalloc(length, GFP_KERNEL); 411 if (!ops.oobbuf) 412 return -ENOMEM; 413 414 start &= ~((uint64_t)mtd->writesize - 1); 415 ret = mtd_read_oob(mtd, start, &ops); 416 417 if (put_user(ops.oobretlen, retp)) 418 ret = -EFAULT; 419 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf, 420 ops.oobretlen)) 421 ret = -EFAULT; 422 423 kfree(ops.oobbuf); 424 425 /* 426 * NAND returns -EBADMSG on ECC errors, but it returns the OOB 427 * data. For our userspace tools it is important to dump areas 428 * with ECC errors! 429 * For kernel internal usage it also might return -EUCLEAN 430 * to signal the caller that a bitflip has occurred and has 431 * been corrected by the ECC algorithm. 432 * 433 * Note: currently the standard NAND function, nand_read_oob_std, 434 * does not calculate ECC for the OOB area, so do not rely on 435 * this behavior unless you have replaced it with your own. 436 */ 437 if (mtd_is_bitflip_or_eccerr(ret)) 438 return 0; 439 440 return ret; 441 } 442 443 /* 444 * Copies (and truncates, if necessary) OOB layout information to the 445 * deprecated layout struct, nand_ecclayout_user. This is necessary only to 446 * support the deprecated API ioctl ECCGETLAYOUT while allowing all new 447 * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops 448 * can describe any kind of OOB layout with almost zero overhead from a 449 * memory usage point of view). 450 */ 451 static int shrink_ecclayout(struct mtd_info *mtd, 452 struct nand_ecclayout_user *to) 453 { 454 struct mtd_oob_region oobregion; 455 int i, section = 0, ret; 456 457 if (!mtd || !to) 458 return -EINVAL; 459 460 memset(to, 0, sizeof(*to)); 461 462 to->eccbytes = 0; 463 for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) { 464 u32 eccpos; 465 466 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); 467 if (ret < 0) { 468 if (ret != -ERANGE) 469 return ret; 470 471 break; 472 } 473 474 eccpos = oobregion.offset; 475 for (; i < MTD_MAX_ECCPOS_ENTRIES && 476 eccpos < oobregion.offset + oobregion.length; i++) { 477 to->eccpos[i] = eccpos++; 478 to->eccbytes++; 479 } 480 } 481 482 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) { 483 ret = mtd_ooblayout_free(mtd, i, &oobregion); 484 if (ret < 0) { 485 if (ret != -ERANGE) 486 return ret; 487 488 break; 489 } 490 491 to->oobfree[i].offset = oobregion.offset; 492 to->oobfree[i].length = oobregion.length; 493 to->oobavail += to->oobfree[i].length; 494 } 495 496 return 0; 497 } 498 499 static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to) 500 { 501 struct mtd_oob_region oobregion; 502 int i, section = 0, ret; 503 504 if (!mtd || !to) 505 return -EINVAL; 506 507 memset(to, 0, sizeof(*to)); 508 509 to->eccbytes = 0; 510 for (i = 0; i < ARRAY_SIZE(to->eccpos);) { 511 u32 eccpos; 512 513 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); 514 if (ret < 0) { 515 if (ret != -ERANGE) 516 return ret; 517 518 break; 519 } 520 521 if (oobregion.length + i > ARRAY_SIZE(to->eccpos)) 522 return -EINVAL; 523 524 eccpos = oobregion.offset; 525 for (; eccpos < oobregion.offset + oobregion.length; i++) { 526 to->eccpos[i] = eccpos++; 527 to->eccbytes++; 528 } 529 } 530 531 for (i = 0; i < 8; i++) { 532 ret = mtd_ooblayout_free(mtd, i, &oobregion); 533 if (ret < 0) { 534 if (ret != -ERANGE) 535 return ret; 536 537 break; 538 } 539 540 to->oobfree[i][0] = oobregion.offset; 541 to->oobfree[i][1] = oobregion.length; 542 } 543 544 to->useecc = MTD_NANDECC_AUTOPLACE; 545 546 return 0; 547 } 548 549 static int mtdchar_blkpg_ioctl(struct mtd_info *mtd, 550 struct blkpg_ioctl_arg *arg) 551 { 552 struct blkpg_partition p; 553 554 if (!capable(CAP_SYS_ADMIN)) 555 return -EPERM; 556 557 if (copy_from_user(&p, arg->data, sizeof(p))) 558 return -EFAULT; 559 560 switch (arg->op) { 561 case BLKPG_ADD_PARTITION: 562 563 /* Only master mtd device must be used to add partitions */ 564 if (mtd_is_partition(mtd)) 565 return -EINVAL; 566 567 /* Sanitize user input */ 568 p.devname[BLKPG_DEVNAMELTH - 1] = '\0'; 569 570 return mtd_add_partition(mtd, p.devname, p.start, p.length); 571 572 case BLKPG_DEL_PARTITION: 573 574 if (p.pno < 0) 575 return -EINVAL; 576 577 return mtd_del_partition(mtd, p.pno); 578 579 default: 580 return -EINVAL; 581 } 582 } 583 584 static int mtdchar_write_ioctl(struct mtd_info *mtd, 585 struct mtd_write_req __user *argp) 586 { 587 struct mtd_info *master = mtd_get_master(mtd); 588 struct mtd_write_req req; 589 struct mtd_oob_ops ops = {}; 590 const void __user *usr_data, *usr_oob; 591 int ret; 592 593 if (copy_from_user(&req, argp, sizeof(req))) 594 return -EFAULT; 595 596 usr_data = (const void __user *)(uintptr_t)req.usr_data; 597 usr_oob = (const void __user *)(uintptr_t)req.usr_oob; 598 599 if (!master->_write_oob) 600 return -EOPNOTSUPP; 601 ops.mode = req.mode; 602 ops.len = (size_t)req.len; 603 ops.ooblen = (size_t)req.ooblen; 604 ops.ooboffs = 0; 605 606 if (usr_data) { 607 ops.datbuf = memdup_user(usr_data, ops.len); 608 if (IS_ERR(ops.datbuf)) 609 return PTR_ERR(ops.datbuf); 610 } else { 611 ops.datbuf = NULL; 612 } 613 614 if (usr_oob) { 615 ops.oobbuf = memdup_user(usr_oob, ops.ooblen); 616 if (IS_ERR(ops.oobbuf)) { 617 kfree(ops.datbuf); 618 return PTR_ERR(ops.oobbuf); 619 } 620 } else { 621 ops.oobbuf = NULL; 622 } 623 624 ret = mtd_write_oob(mtd, (loff_t)req.start, &ops); 625 626 kfree(ops.datbuf); 627 kfree(ops.oobbuf); 628 629 return ret; 630 } 631 632 static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) 633 { 634 struct mtd_file_info *mfi = file->private_data; 635 struct mtd_info *mtd = mfi->mtd; 636 struct mtd_info *master = mtd_get_master(mtd); 637 void __user *argp = (void __user *)arg; 638 int ret = 0; 639 struct mtd_info_user info; 640 641 pr_debug("MTD_ioctl\n"); 642 643 /* 644 * Check the file mode to require "dangerous" commands to have write 645 * permissions. 646 */ 647 switch (cmd) { 648 /* "safe" commands */ 649 case MEMGETREGIONCOUNT: 650 case MEMGETREGIONINFO: 651 case MEMGETINFO: 652 case MEMREADOOB: 653 case MEMREADOOB64: 654 case MEMLOCK: 655 case MEMUNLOCK: 656 case MEMISLOCKED: 657 case MEMGETOOBSEL: 658 case MEMGETBADBLOCK: 659 case MEMSETBADBLOCK: 660 case OTPSELECT: 661 case OTPGETREGIONCOUNT: 662 case OTPGETREGIONINFO: 663 case OTPLOCK: 664 case ECCGETLAYOUT: 665 case ECCGETSTATS: 666 case MTDFILEMODE: 667 case BLKPG: 668 case BLKRRPART: 669 break; 670 671 /* "dangerous" commands */ 672 case MEMERASE: 673 case MEMERASE64: 674 case MEMWRITEOOB: 675 case MEMWRITEOOB64: 676 case MEMWRITE: 677 if (!(file->f_mode & FMODE_WRITE)) 678 return -EPERM; 679 break; 680 681 default: 682 return -ENOTTY; 683 } 684 685 switch (cmd) { 686 case MEMGETREGIONCOUNT: 687 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) 688 return -EFAULT; 689 break; 690 691 case MEMGETREGIONINFO: 692 { 693 uint32_t ur_idx; 694 struct mtd_erase_region_info *kr; 695 struct region_info_user __user *ur = argp; 696 697 if (get_user(ur_idx, &(ur->regionindex))) 698 return -EFAULT; 699 700 if (ur_idx >= mtd->numeraseregions) 701 return -EINVAL; 702 703 kr = &(mtd->eraseregions[ur_idx]); 704 705 if (put_user(kr->offset, &(ur->offset)) 706 || put_user(kr->erasesize, &(ur->erasesize)) 707 || put_user(kr->numblocks, &(ur->numblocks))) 708 return -EFAULT; 709 710 break; 711 } 712 713 case MEMGETINFO: 714 memset(&info, 0, sizeof(info)); 715 info.type = mtd->type; 716 info.flags = mtd->flags; 717 info.size = mtd->size; 718 info.erasesize = mtd->erasesize; 719 info.writesize = mtd->writesize; 720 info.oobsize = mtd->oobsize; 721 /* The below field is obsolete */ 722 info.padding = 0; 723 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 724 return -EFAULT; 725 break; 726 727 case MEMERASE: 728 case MEMERASE64: 729 { 730 struct erase_info *erase; 731 732 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); 733 if (!erase) 734 ret = -ENOMEM; 735 else { 736 if (cmd == MEMERASE64) { 737 struct erase_info_user64 einfo64; 738 739 if (copy_from_user(&einfo64, argp, 740 sizeof(struct erase_info_user64))) { 741 kfree(erase); 742 return -EFAULT; 743 } 744 erase->addr = einfo64.start; 745 erase->len = einfo64.length; 746 } else { 747 struct erase_info_user einfo32; 748 749 if (copy_from_user(&einfo32, argp, 750 sizeof(struct erase_info_user))) { 751 kfree(erase); 752 return -EFAULT; 753 } 754 erase->addr = einfo32.start; 755 erase->len = einfo32.length; 756 } 757 758 ret = mtd_erase(mtd, erase); 759 kfree(erase); 760 } 761 break; 762 } 763 764 case MEMWRITEOOB: 765 { 766 struct mtd_oob_buf buf; 767 struct mtd_oob_buf __user *buf_user = argp; 768 769 /* NOTE: writes return length to buf_user->length */ 770 if (copy_from_user(&buf, argp, sizeof(buf))) 771 ret = -EFAULT; 772 else 773 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 774 buf.ptr, &buf_user->length); 775 break; 776 } 777 778 case MEMREADOOB: 779 { 780 struct mtd_oob_buf buf; 781 struct mtd_oob_buf __user *buf_user = argp; 782 783 /* NOTE: writes return length to buf_user->start */ 784 if (copy_from_user(&buf, argp, sizeof(buf))) 785 ret = -EFAULT; 786 else 787 ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 788 buf.ptr, &buf_user->start); 789 break; 790 } 791 792 case MEMWRITEOOB64: 793 { 794 struct mtd_oob_buf64 buf; 795 struct mtd_oob_buf64 __user *buf_user = argp; 796 797 if (copy_from_user(&buf, argp, sizeof(buf))) 798 ret = -EFAULT; 799 else 800 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 801 (void __user *)(uintptr_t)buf.usr_ptr, 802 &buf_user->length); 803 break; 804 } 805 806 case MEMREADOOB64: 807 { 808 struct mtd_oob_buf64 buf; 809 struct mtd_oob_buf64 __user *buf_user = argp; 810 811 if (copy_from_user(&buf, argp, sizeof(buf))) 812 ret = -EFAULT; 813 else 814 ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 815 (void __user *)(uintptr_t)buf.usr_ptr, 816 &buf_user->length); 817 break; 818 } 819 820 case MEMWRITE: 821 { 822 ret = mtdchar_write_ioctl(mtd, 823 (struct mtd_write_req __user *)arg); 824 break; 825 } 826 827 case MEMLOCK: 828 { 829 struct erase_info_user einfo; 830 831 if (copy_from_user(&einfo, argp, sizeof(einfo))) 832 return -EFAULT; 833 834 ret = mtd_lock(mtd, einfo.start, einfo.length); 835 break; 836 } 837 838 case MEMUNLOCK: 839 { 840 struct erase_info_user einfo; 841 842 if (copy_from_user(&einfo, argp, sizeof(einfo))) 843 return -EFAULT; 844 845 ret = mtd_unlock(mtd, einfo.start, einfo.length); 846 break; 847 } 848 849 case MEMISLOCKED: 850 { 851 struct erase_info_user einfo; 852 853 if (copy_from_user(&einfo, argp, sizeof(einfo))) 854 return -EFAULT; 855 856 ret = mtd_is_locked(mtd, einfo.start, einfo.length); 857 break; 858 } 859 860 /* Legacy interface */ 861 case MEMGETOOBSEL: 862 { 863 struct nand_oobinfo oi; 864 865 if (!master->ooblayout) 866 return -EOPNOTSUPP; 867 868 ret = get_oobinfo(mtd, &oi); 869 if (ret) 870 return ret; 871 872 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) 873 return -EFAULT; 874 break; 875 } 876 877 case MEMGETBADBLOCK: 878 { 879 loff_t offs; 880 881 if (copy_from_user(&offs, argp, sizeof(loff_t))) 882 return -EFAULT; 883 return mtd_block_isbad(mtd, offs); 884 } 885 886 case MEMSETBADBLOCK: 887 { 888 loff_t offs; 889 890 if (copy_from_user(&offs, argp, sizeof(loff_t))) 891 return -EFAULT; 892 return mtd_block_markbad(mtd, offs); 893 } 894 895 case OTPSELECT: 896 { 897 int mode; 898 if (copy_from_user(&mode, argp, sizeof(int))) 899 return -EFAULT; 900 901 mfi->mode = MTD_FILE_MODE_NORMAL; 902 903 ret = otp_select_filemode(mfi, mode); 904 905 file->f_pos = 0; 906 break; 907 } 908 909 case OTPGETREGIONCOUNT: 910 case OTPGETREGIONINFO: 911 { 912 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 913 size_t retlen; 914 if (!buf) 915 return -ENOMEM; 916 switch (mfi->mode) { 917 case MTD_FILE_MODE_OTP_FACTORY: 918 ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf); 919 break; 920 case MTD_FILE_MODE_OTP_USER: 921 ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf); 922 break; 923 default: 924 ret = -EINVAL; 925 break; 926 } 927 if (!ret) { 928 if (cmd == OTPGETREGIONCOUNT) { 929 int nbr = retlen / sizeof(struct otp_info); 930 ret = copy_to_user(argp, &nbr, sizeof(int)); 931 } else 932 ret = copy_to_user(argp, buf, retlen); 933 if (ret) 934 ret = -EFAULT; 935 } 936 kfree(buf); 937 break; 938 } 939 940 case OTPLOCK: 941 { 942 struct otp_info oinfo; 943 944 if (mfi->mode != MTD_FILE_MODE_OTP_USER) 945 return -EINVAL; 946 if (copy_from_user(&oinfo, argp, sizeof(oinfo))) 947 return -EFAULT; 948 ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length); 949 break; 950 } 951 952 /* This ioctl is being deprecated - it truncates the ECC layout */ 953 case ECCGETLAYOUT: 954 { 955 struct nand_ecclayout_user *usrlay; 956 957 if (!master->ooblayout) 958 return -EOPNOTSUPP; 959 960 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL); 961 if (!usrlay) 962 return -ENOMEM; 963 964 shrink_ecclayout(mtd, usrlay); 965 966 if (copy_to_user(argp, usrlay, sizeof(*usrlay))) 967 ret = -EFAULT; 968 kfree(usrlay); 969 break; 970 } 971 972 case ECCGETSTATS: 973 { 974 if (copy_to_user(argp, &mtd->ecc_stats, 975 sizeof(struct mtd_ecc_stats))) 976 return -EFAULT; 977 break; 978 } 979 980 case MTDFILEMODE: 981 { 982 mfi->mode = 0; 983 984 switch(arg) { 985 case MTD_FILE_MODE_OTP_FACTORY: 986 case MTD_FILE_MODE_OTP_USER: 987 ret = otp_select_filemode(mfi, arg); 988 break; 989 990 case MTD_FILE_MODE_RAW: 991 if (!mtd_has_oob(mtd)) 992 return -EOPNOTSUPP; 993 mfi->mode = arg; 994 995 case MTD_FILE_MODE_NORMAL: 996 break; 997 default: 998 ret = -EINVAL; 999 } 1000 file->f_pos = 0; 1001 break; 1002 } 1003 1004 case BLKPG: 1005 { 1006 struct blkpg_ioctl_arg __user *blk_arg = argp; 1007 struct blkpg_ioctl_arg a; 1008 1009 if (copy_from_user(&a, blk_arg, sizeof(a))) 1010 ret = -EFAULT; 1011 else 1012 ret = mtdchar_blkpg_ioctl(mtd, &a); 1013 break; 1014 } 1015 1016 case BLKRRPART: 1017 { 1018 /* No reread partition feature. Just return ok */ 1019 ret = 0; 1020 break; 1021 } 1022 } 1023 1024 return ret; 1025 } /* memory_ioctl */ 1026 1027 static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) 1028 { 1029 int ret; 1030 1031 mutex_lock(&mtd_mutex); 1032 ret = mtdchar_ioctl(file, cmd, arg); 1033 mutex_unlock(&mtd_mutex); 1034 1035 return ret; 1036 } 1037 1038 #ifdef CONFIG_COMPAT 1039 1040 struct mtd_oob_buf32 { 1041 u_int32_t start; 1042 u_int32_t length; 1043 compat_caddr_t ptr; /* unsigned char* */ 1044 }; 1045 1046 #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32) 1047 #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32) 1048 1049 static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd, 1050 unsigned long arg) 1051 { 1052 struct mtd_file_info *mfi = file->private_data; 1053 struct mtd_info *mtd = mfi->mtd; 1054 void __user *argp = compat_ptr(arg); 1055 int ret = 0; 1056 1057 mutex_lock(&mtd_mutex); 1058 1059 switch (cmd) { 1060 case MEMWRITEOOB32: 1061 { 1062 struct mtd_oob_buf32 buf; 1063 struct mtd_oob_buf32 __user *buf_user = argp; 1064 1065 if (!(file->f_mode & FMODE_WRITE)) { 1066 ret = -EPERM; 1067 break; 1068 } 1069 1070 if (copy_from_user(&buf, argp, sizeof(buf))) 1071 ret = -EFAULT; 1072 else 1073 ret = mtdchar_writeoob(file, mtd, buf.start, 1074 buf.length, compat_ptr(buf.ptr), 1075 &buf_user->length); 1076 break; 1077 } 1078 1079 case MEMREADOOB32: 1080 { 1081 struct mtd_oob_buf32 buf; 1082 struct mtd_oob_buf32 __user *buf_user = argp; 1083 1084 /* NOTE: writes return length to buf->start */ 1085 if (copy_from_user(&buf, argp, sizeof(buf))) 1086 ret = -EFAULT; 1087 else 1088 ret = mtdchar_readoob(file, mtd, buf.start, 1089 buf.length, compat_ptr(buf.ptr), 1090 &buf_user->start); 1091 break; 1092 } 1093 1094 case BLKPG: 1095 { 1096 /* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */ 1097 struct blkpg_compat_ioctl_arg __user *uarg = argp; 1098 struct blkpg_compat_ioctl_arg compat_arg; 1099 struct blkpg_ioctl_arg a; 1100 1101 if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) { 1102 ret = -EFAULT; 1103 break; 1104 } 1105 1106 memset(&a, 0, sizeof(a)); 1107 a.op = compat_arg.op; 1108 a.flags = compat_arg.flags; 1109 a.datalen = compat_arg.datalen; 1110 a.data = compat_ptr(compat_arg.data); 1111 1112 ret = mtdchar_blkpg_ioctl(mtd, &a); 1113 break; 1114 } 1115 1116 default: 1117 ret = mtdchar_ioctl(file, cmd, (unsigned long)argp); 1118 } 1119 1120 mutex_unlock(&mtd_mutex); 1121 1122 return ret; 1123 } 1124 1125 #endif /* CONFIG_COMPAT */ 1126 1127 /* 1128 * try to determine where a shared mapping can be made 1129 * - only supported for NOMMU at the moment (MMU can't doesn't copy private 1130 * mappings) 1131 */ 1132 #ifndef CONFIG_MMU 1133 static unsigned long mtdchar_get_unmapped_area(struct file *file, 1134 unsigned long addr, 1135 unsigned long len, 1136 unsigned long pgoff, 1137 unsigned long flags) 1138 { 1139 struct mtd_file_info *mfi = file->private_data; 1140 struct mtd_info *mtd = mfi->mtd; 1141 unsigned long offset; 1142 int ret; 1143 1144 if (addr != 0) 1145 return (unsigned long) -EINVAL; 1146 1147 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) 1148 return (unsigned long) -EINVAL; 1149 1150 offset = pgoff << PAGE_SHIFT; 1151 if (offset > mtd->size - len) 1152 return (unsigned long) -EINVAL; 1153 1154 ret = mtd_get_unmapped_area(mtd, len, offset, flags); 1155 return ret == -EOPNOTSUPP ? -ENODEV : ret; 1156 } 1157 1158 static unsigned mtdchar_mmap_capabilities(struct file *file) 1159 { 1160 struct mtd_file_info *mfi = file->private_data; 1161 1162 return mtd_mmap_capabilities(mfi->mtd); 1163 } 1164 #endif 1165 1166 /* 1167 * set up a mapping for shared memory segments 1168 */ 1169 static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) 1170 { 1171 #ifdef CONFIG_MMU 1172 struct mtd_file_info *mfi = file->private_data; 1173 struct mtd_info *mtd = mfi->mtd; 1174 struct map_info *map = mtd->priv; 1175 1176 /* This is broken because it assumes the MTD device is map-based 1177 and that mtd->priv is a valid struct map_info. It should be 1178 replaced with something that uses the mtd_get_unmapped_area() 1179 operation properly. */ 1180 if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { 1181 #ifdef pgprot_noncached 1182 if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory)) 1183 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1184 #endif 1185 return vm_iomap_memory(vma, map->phys, map->size); 1186 } 1187 return -ENODEV; 1188 #else 1189 return vma->vm_flags & VM_SHARED ? 0 : -EACCES; 1190 #endif 1191 } 1192 1193 static const struct file_operations mtd_fops = { 1194 .owner = THIS_MODULE, 1195 .llseek = mtdchar_lseek, 1196 .read = mtdchar_read, 1197 .write = mtdchar_write, 1198 .unlocked_ioctl = mtdchar_unlocked_ioctl, 1199 #ifdef CONFIG_COMPAT 1200 .compat_ioctl = mtdchar_compat_ioctl, 1201 #endif 1202 .open = mtdchar_open, 1203 .release = mtdchar_close, 1204 .mmap = mtdchar_mmap, 1205 #ifndef CONFIG_MMU 1206 .get_unmapped_area = mtdchar_get_unmapped_area, 1207 .mmap_capabilities = mtdchar_mmap_capabilities, 1208 #endif 1209 }; 1210 1211 int __init init_mtdchar(void) 1212 { 1213 int ret; 1214 1215 ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, 1216 "mtd", &mtd_fops); 1217 if (ret < 0) { 1218 pr_err("Can't allocate major number %d for MTD\n", 1219 MTD_CHAR_MAJOR); 1220 return ret; 1221 } 1222 1223 return ret; 1224 } 1225 1226 void __exit cleanup_mtdchar(void) 1227 { 1228 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1229 } 1230 1231 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); 1232