1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> 4 */ 5 6 #include <linux/device.h> 7 #include <linux/fs.h> 8 #include <linux/mm.h> 9 #include <linux/err.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/sched.h> 15 #include <linux/mutex.h> 16 #include <linux/backing-dev.h> 17 #include <linux/compat.h> 18 #include <linux/mount.h> 19 #include <linux/blkpg.h> 20 #include <linux/magic.h> 21 #include <linux/major.h> 22 #include <linux/mtd/mtd.h> 23 #include <linux/mtd/partitions.h> 24 #include <linux/mtd/map.h> 25 26 #include <linux/uaccess.h> 27 28 #include "mtdcore.h" 29 30 static DEFINE_MUTEX(mtd_mutex); 31 32 /* 33 * Data structure to hold the pointer to the mtd device as well 34 * as mode information of various use cases. 35 */ 36 struct mtd_file_info { 37 struct mtd_info *mtd; 38 enum mtd_file_modes mode; 39 }; 40 41 static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig) 42 { 43 struct mtd_file_info *mfi = file->private_data; 44 return fixed_size_llseek(file, offset, orig, mfi->mtd->size); 45 } 46 47 static int mtdchar_open(struct inode *inode, struct file *file) 48 { 49 int minor = iminor(inode); 50 int devnum = minor >> 1; 51 int ret = 0; 52 struct mtd_info *mtd; 53 struct mtd_file_info *mfi; 54 55 pr_debug("MTD_open\n"); 56 57 /* You can't open the RO devices RW */ 58 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) 59 return -EACCES; 60 61 mutex_lock(&mtd_mutex); 62 mtd = get_mtd_device(NULL, devnum); 63 64 if (IS_ERR(mtd)) { 65 ret = PTR_ERR(mtd); 66 goto out; 67 } 68 69 if (mtd->type == MTD_ABSENT) { 70 ret = -ENODEV; 71 goto out1; 72 } 73 74 /* You can't open it RW if it's not a writeable device */ 75 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { 76 ret = -EACCES; 77 goto out1; 78 } 79 80 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 81 if (!mfi) { 82 ret = -ENOMEM; 83 goto out1; 84 } 85 mfi->mtd = mtd; 86 file->private_data = mfi; 87 mutex_unlock(&mtd_mutex); 88 return 0; 89 90 out1: 91 put_mtd_device(mtd); 92 out: 93 mutex_unlock(&mtd_mutex); 94 return ret; 95 } /* mtdchar_open */ 96 97 /*====================================================================*/ 98 99 static int mtdchar_close(struct inode *inode, struct file *file) 100 { 101 struct mtd_file_info *mfi = file->private_data; 102 struct mtd_info *mtd = mfi->mtd; 103 104 pr_debug("MTD_close\n"); 105 106 /* Only sync if opened RW */ 107 if ((file->f_mode & FMODE_WRITE)) 108 mtd_sync(mtd); 109 110 put_mtd_device(mtd); 111 file->private_data = NULL; 112 kfree(mfi); 113 114 return 0; 115 } /* mtdchar_close */ 116 117 /* Back in June 2001, dwmw2 wrote: 118 * 119 * FIXME: This _really_ needs to die. In 2.5, we should lock the 120 * userspace buffer down and use it directly with readv/writev. 121 * 122 * The implementation below, using mtd_kmalloc_up_to, mitigates 123 * allocation failures when the system is under low-memory situations 124 * or if memory is highly fragmented at the cost of reducing the 125 * performance of the requested transfer due to a smaller buffer size. 126 * 127 * A more complex but more memory-efficient implementation based on 128 * get_user_pages and iovecs to cover extents of those pages is a 129 * longer-term goal, as intimated by dwmw2 above. However, for the 130 * write case, this requires yet more complex head and tail transfer 131 * handling when those head and tail offsets and sizes are such that 132 * alignment requirements are not met in the NAND subdriver. 133 */ 134 135 static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count, 136 loff_t *ppos) 137 { 138 struct mtd_file_info *mfi = file->private_data; 139 struct mtd_info *mtd = mfi->mtd; 140 size_t retlen; 141 size_t total_retlen=0; 142 int ret=0; 143 int len; 144 size_t size = count; 145 char *kbuf; 146 147 pr_debug("MTD_read\n"); 148 149 if (*ppos + count > mtd->size) { 150 if (*ppos < mtd->size) 151 count = mtd->size - *ppos; 152 else 153 count = 0; 154 } 155 156 if (!count) 157 return 0; 158 159 kbuf = mtd_kmalloc_up_to(mtd, &size); 160 if (!kbuf) 161 return -ENOMEM; 162 163 while (count) { 164 len = min_t(size_t, count, size); 165 166 switch (mfi->mode) { 167 case MTD_FILE_MODE_OTP_FACTORY: 168 ret = mtd_read_fact_prot_reg(mtd, *ppos, len, 169 &retlen, kbuf); 170 break; 171 case MTD_FILE_MODE_OTP_USER: 172 ret = mtd_read_user_prot_reg(mtd, *ppos, len, 173 &retlen, kbuf); 174 break; 175 case MTD_FILE_MODE_RAW: 176 { 177 struct mtd_oob_ops ops = {}; 178 179 ops.mode = MTD_OPS_RAW; 180 ops.datbuf = kbuf; 181 ops.oobbuf = NULL; 182 ops.len = len; 183 184 ret = mtd_read_oob(mtd, *ppos, &ops); 185 retlen = ops.retlen; 186 break; 187 } 188 default: 189 ret = mtd_read(mtd, *ppos, len, &retlen, kbuf); 190 } 191 /* Nand returns -EBADMSG on ECC errors, but it returns 192 * the data. For our userspace tools it is important 193 * to dump areas with ECC errors! 194 * For kernel internal usage it also might return -EUCLEAN 195 * to signal the caller that a bitflip has occurred and has 196 * been corrected by the ECC algorithm. 197 * Userspace software which accesses NAND this way 198 * must be aware of the fact that it deals with NAND 199 */ 200 if (!ret || mtd_is_bitflip_or_eccerr(ret)) { 201 *ppos += retlen; 202 if (copy_to_user(buf, kbuf, retlen)) { 203 kfree(kbuf); 204 return -EFAULT; 205 } 206 else 207 total_retlen += retlen; 208 209 count -= retlen; 210 buf += retlen; 211 if (retlen == 0) 212 count = 0; 213 } 214 else { 215 kfree(kbuf); 216 return ret; 217 } 218 219 } 220 221 kfree(kbuf); 222 return total_retlen; 223 } /* mtdchar_read */ 224 225 static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count, 226 loff_t *ppos) 227 { 228 struct mtd_file_info *mfi = file->private_data; 229 struct mtd_info *mtd = mfi->mtd; 230 size_t size = count; 231 char *kbuf; 232 size_t retlen; 233 size_t total_retlen=0; 234 int ret=0; 235 int len; 236 237 pr_debug("MTD_write\n"); 238 239 if (*ppos >= mtd->size) 240 return -ENOSPC; 241 242 if (*ppos + count > mtd->size) 243 count = mtd->size - *ppos; 244 245 if (!count) 246 return 0; 247 248 kbuf = mtd_kmalloc_up_to(mtd, &size); 249 if (!kbuf) 250 return -ENOMEM; 251 252 while (count) { 253 len = min_t(size_t, count, size); 254 255 if (copy_from_user(kbuf, buf, len)) { 256 kfree(kbuf); 257 return -EFAULT; 258 } 259 260 switch (mfi->mode) { 261 case MTD_FILE_MODE_OTP_FACTORY: 262 ret = -EROFS; 263 break; 264 case MTD_FILE_MODE_OTP_USER: 265 ret = mtd_write_user_prot_reg(mtd, *ppos, len, 266 &retlen, kbuf); 267 break; 268 269 case MTD_FILE_MODE_RAW: 270 { 271 struct mtd_oob_ops ops = {}; 272 273 ops.mode = MTD_OPS_RAW; 274 ops.datbuf = kbuf; 275 ops.oobbuf = NULL; 276 ops.ooboffs = 0; 277 ops.len = len; 278 279 ret = mtd_write_oob(mtd, *ppos, &ops); 280 retlen = ops.retlen; 281 break; 282 } 283 284 default: 285 ret = mtd_write(mtd, *ppos, len, &retlen, kbuf); 286 } 287 288 /* 289 * Return -ENOSPC only if no data could be written at all. 290 * Otherwise just return the number of bytes that actually 291 * have been written. 292 */ 293 if ((ret == -ENOSPC) && (total_retlen)) 294 break; 295 296 if (!ret) { 297 *ppos += retlen; 298 total_retlen += retlen; 299 count -= retlen; 300 buf += retlen; 301 } 302 else { 303 kfree(kbuf); 304 return ret; 305 } 306 } 307 308 kfree(kbuf); 309 return total_retlen; 310 } /* mtdchar_write */ 311 312 /*====================================================================== 313 314 IOCTL calls for getting device parameters. 315 316 ======================================================================*/ 317 318 static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 319 { 320 struct mtd_info *mtd = mfi->mtd; 321 size_t retlen; 322 323 switch (mode) { 324 case MTD_OTP_FACTORY: 325 if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == 326 -EOPNOTSUPP) 327 return -EOPNOTSUPP; 328 329 mfi->mode = MTD_FILE_MODE_OTP_FACTORY; 330 break; 331 case MTD_OTP_USER: 332 if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) == 333 -EOPNOTSUPP) 334 return -EOPNOTSUPP; 335 336 mfi->mode = MTD_FILE_MODE_OTP_USER; 337 break; 338 case MTD_OTP_OFF: 339 mfi->mode = MTD_FILE_MODE_NORMAL; 340 break; 341 default: 342 return -EINVAL; 343 } 344 345 return 0; 346 } 347 348 static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, 349 uint64_t start, uint32_t length, void __user *ptr, 350 uint32_t __user *retp) 351 { 352 struct mtd_info *master = mtd_get_master(mtd); 353 struct mtd_file_info *mfi = file->private_data; 354 struct mtd_oob_ops ops = {}; 355 uint32_t retlen; 356 int ret = 0; 357 358 if (length > 4096) 359 return -EINVAL; 360 361 if (!master->_write_oob) 362 return -EOPNOTSUPP; 363 364 ops.ooblen = length; 365 ops.ooboffs = start & (mtd->writesize - 1); 366 ops.datbuf = NULL; 367 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 368 MTD_OPS_PLACE_OOB; 369 370 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 371 return -EINVAL; 372 373 ops.oobbuf = memdup_user(ptr, length); 374 if (IS_ERR(ops.oobbuf)) 375 return PTR_ERR(ops.oobbuf); 376 377 start &= ~((uint64_t)mtd->writesize - 1); 378 ret = mtd_write_oob(mtd, start, &ops); 379 380 if (ops.oobretlen > 0xFFFFFFFFU) 381 ret = -EOVERFLOW; 382 retlen = ops.oobretlen; 383 if (copy_to_user(retp, &retlen, sizeof(length))) 384 ret = -EFAULT; 385 386 kfree(ops.oobbuf); 387 return ret; 388 } 389 390 static int mtdchar_readoob(struct file *file, struct mtd_info *mtd, 391 uint64_t start, uint32_t length, void __user *ptr, 392 uint32_t __user *retp) 393 { 394 struct mtd_file_info *mfi = file->private_data; 395 struct mtd_oob_ops ops = {}; 396 int ret = 0; 397 398 if (length > 4096) 399 return -EINVAL; 400 401 ops.ooblen = length; 402 ops.ooboffs = start & (mtd->writesize - 1); 403 ops.datbuf = NULL; 404 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 405 MTD_OPS_PLACE_OOB; 406 407 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 408 return -EINVAL; 409 410 ops.oobbuf = kmalloc(length, GFP_KERNEL); 411 if (!ops.oobbuf) 412 return -ENOMEM; 413 414 start &= ~((uint64_t)mtd->writesize - 1); 415 ret = mtd_read_oob(mtd, start, &ops); 416 417 if (put_user(ops.oobretlen, retp)) 418 ret = -EFAULT; 419 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf, 420 ops.oobretlen)) 421 ret = -EFAULT; 422 423 kfree(ops.oobbuf); 424 425 /* 426 * NAND returns -EBADMSG on ECC errors, but it returns the OOB 427 * data. For our userspace tools it is important to dump areas 428 * with ECC errors! 429 * For kernel internal usage it also might return -EUCLEAN 430 * to signal the caller that a bitflip has occurred and has 431 * been corrected by the ECC algorithm. 432 * 433 * Note: currently the standard NAND function, nand_read_oob_std, 434 * does not calculate ECC for the OOB area, so do not rely on 435 * this behavior unless you have replaced it with your own. 436 */ 437 if (mtd_is_bitflip_or_eccerr(ret)) 438 return 0; 439 440 return ret; 441 } 442 443 /* 444 * Copies (and truncates, if necessary) OOB layout information to the 445 * deprecated layout struct, nand_ecclayout_user. This is necessary only to 446 * support the deprecated API ioctl ECCGETLAYOUT while allowing all new 447 * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops 448 * can describe any kind of OOB layout with almost zero overhead from a 449 * memory usage point of view). 450 */ 451 static int shrink_ecclayout(struct mtd_info *mtd, 452 struct nand_ecclayout_user *to) 453 { 454 struct mtd_oob_region oobregion; 455 int i, section = 0, ret; 456 457 if (!mtd || !to) 458 return -EINVAL; 459 460 memset(to, 0, sizeof(*to)); 461 462 to->eccbytes = 0; 463 for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) { 464 u32 eccpos; 465 466 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); 467 if (ret < 0) { 468 if (ret != -ERANGE) 469 return ret; 470 471 break; 472 } 473 474 eccpos = oobregion.offset; 475 for (; i < MTD_MAX_ECCPOS_ENTRIES && 476 eccpos < oobregion.offset + oobregion.length; i++) { 477 to->eccpos[i] = eccpos++; 478 to->eccbytes++; 479 } 480 } 481 482 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) { 483 ret = mtd_ooblayout_free(mtd, i, &oobregion); 484 if (ret < 0) { 485 if (ret != -ERANGE) 486 return ret; 487 488 break; 489 } 490 491 to->oobfree[i].offset = oobregion.offset; 492 to->oobfree[i].length = oobregion.length; 493 to->oobavail += to->oobfree[i].length; 494 } 495 496 return 0; 497 } 498 499 static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to) 500 { 501 struct mtd_oob_region oobregion; 502 int i, section = 0, ret; 503 504 if (!mtd || !to) 505 return -EINVAL; 506 507 memset(to, 0, sizeof(*to)); 508 509 to->eccbytes = 0; 510 for (i = 0; i < ARRAY_SIZE(to->eccpos);) { 511 u32 eccpos; 512 513 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); 514 if (ret < 0) { 515 if (ret != -ERANGE) 516 return ret; 517 518 break; 519 } 520 521 if (oobregion.length + i > ARRAY_SIZE(to->eccpos)) 522 return -EINVAL; 523 524 eccpos = oobregion.offset; 525 for (; eccpos < oobregion.offset + oobregion.length; i++) { 526 to->eccpos[i] = eccpos++; 527 to->eccbytes++; 528 } 529 } 530 531 for (i = 0; i < 8; i++) { 532 ret = mtd_ooblayout_free(mtd, i, &oobregion); 533 if (ret < 0) { 534 if (ret != -ERANGE) 535 return ret; 536 537 break; 538 } 539 540 to->oobfree[i][0] = oobregion.offset; 541 to->oobfree[i][1] = oobregion.length; 542 } 543 544 to->useecc = MTD_NANDECC_AUTOPLACE; 545 546 return 0; 547 } 548 549 static int mtdchar_blkpg_ioctl(struct mtd_info *mtd, 550 struct blkpg_ioctl_arg *arg) 551 { 552 struct blkpg_partition p; 553 554 if (!capable(CAP_SYS_ADMIN)) 555 return -EPERM; 556 557 if (copy_from_user(&p, arg->data, sizeof(p))) 558 return -EFAULT; 559 560 switch (arg->op) { 561 case BLKPG_ADD_PARTITION: 562 563 /* Only master mtd device must be used to add partitions */ 564 if (mtd_is_partition(mtd)) 565 return -EINVAL; 566 567 /* Sanitize user input */ 568 p.devname[BLKPG_DEVNAMELTH - 1] = '\0'; 569 570 return mtd_add_partition(mtd, p.devname, p.start, p.length); 571 572 case BLKPG_DEL_PARTITION: 573 574 if (p.pno < 0) 575 return -EINVAL; 576 577 return mtd_del_partition(mtd, p.pno); 578 579 default: 580 return -EINVAL; 581 } 582 } 583 584 static int mtdchar_write_ioctl(struct mtd_info *mtd, 585 struct mtd_write_req __user *argp) 586 { 587 struct mtd_info *master = mtd_get_master(mtd); 588 struct mtd_write_req req; 589 struct mtd_oob_ops ops = {}; 590 const void __user *usr_data, *usr_oob; 591 int ret; 592 593 if (copy_from_user(&req, argp, sizeof(req))) 594 return -EFAULT; 595 596 usr_data = (const void __user *)(uintptr_t)req.usr_data; 597 usr_oob = (const void __user *)(uintptr_t)req.usr_oob; 598 599 if (!master->_write_oob) 600 return -EOPNOTSUPP; 601 ops.mode = req.mode; 602 ops.len = (size_t)req.len; 603 ops.ooblen = (size_t)req.ooblen; 604 ops.ooboffs = 0; 605 606 if (usr_data) { 607 ops.datbuf = memdup_user(usr_data, ops.len); 608 if (IS_ERR(ops.datbuf)) 609 return PTR_ERR(ops.datbuf); 610 } else { 611 ops.datbuf = NULL; 612 } 613 614 if (usr_oob) { 615 ops.oobbuf = memdup_user(usr_oob, ops.ooblen); 616 if (IS_ERR(ops.oobbuf)) { 617 kfree(ops.datbuf); 618 return PTR_ERR(ops.oobbuf); 619 } 620 } else { 621 ops.oobbuf = NULL; 622 } 623 624 ret = mtd_write_oob(mtd, (loff_t)req.start, &ops); 625 626 kfree(ops.datbuf); 627 kfree(ops.oobbuf); 628 629 return ret; 630 } 631 632 static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) 633 { 634 struct mtd_file_info *mfi = file->private_data; 635 struct mtd_info *mtd = mfi->mtd; 636 struct mtd_info *master = mtd_get_master(mtd); 637 void __user *argp = (void __user *)arg; 638 int ret = 0; 639 struct mtd_info_user info; 640 641 pr_debug("MTD_ioctl\n"); 642 643 /* 644 * Check the file mode to require "dangerous" commands to have write 645 * permissions. 646 */ 647 switch (cmd) { 648 /* "safe" commands */ 649 case MEMGETREGIONCOUNT: 650 case MEMGETREGIONINFO: 651 case MEMGETINFO: 652 case MEMREADOOB: 653 case MEMREADOOB64: 654 case MEMLOCK: 655 case MEMUNLOCK: 656 case MEMISLOCKED: 657 case MEMGETOOBSEL: 658 case MEMGETBADBLOCK: 659 case MEMSETBADBLOCK: 660 case OTPSELECT: 661 case OTPGETREGIONCOUNT: 662 case OTPGETREGIONINFO: 663 case OTPLOCK: 664 case ECCGETLAYOUT: 665 case ECCGETSTATS: 666 case MTDFILEMODE: 667 case BLKPG: 668 case BLKRRPART: 669 break; 670 671 /* "dangerous" commands */ 672 case MEMERASE: 673 case MEMERASE64: 674 case MEMWRITEOOB: 675 case MEMWRITEOOB64: 676 case MEMWRITE: 677 if (!(file->f_mode & FMODE_WRITE)) 678 return -EPERM; 679 break; 680 681 default: 682 return -ENOTTY; 683 } 684 685 switch (cmd) { 686 case MEMGETREGIONCOUNT: 687 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) 688 return -EFAULT; 689 break; 690 691 case MEMGETREGIONINFO: 692 { 693 uint32_t ur_idx; 694 struct mtd_erase_region_info *kr; 695 struct region_info_user __user *ur = argp; 696 697 if (get_user(ur_idx, &(ur->regionindex))) 698 return -EFAULT; 699 700 if (ur_idx >= mtd->numeraseregions) 701 return -EINVAL; 702 703 kr = &(mtd->eraseregions[ur_idx]); 704 705 if (put_user(kr->offset, &(ur->offset)) 706 || put_user(kr->erasesize, &(ur->erasesize)) 707 || put_user(kr->numblocks, &(ur->numblocks))) 708 return -EFAULT; 709 710 break; 711 } 712 713 case MEMGETINFO: 714 memset(&info, 0, sizeof(info)); 715 info.type = mtd->type; 716 info.flags = mtd->flags; 717 info.size = mtd->size; 718 info.erasesize = mtd->erasesize; 719 info.writesize = mtd->writesize; 720 info.oobsize = mtd->oobsize; 721 /* The below field is obsolete */ 722 info.padding = 0; 723 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 724 return -EFAULT; 725 break; 726 727 case MEMERASE: 728 case MEMERASE64: 729 { 730 struct erase_info *erase; 731 732 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); 733 if (!erase) 734 ret = -ENOMEM; 735 else { 736 if (cmd == MEMERASE64) { 737 struct erase_info_user64 einfo64; 738 739 if (copy_from_user(&einfo64, argp, 740 sizeof(struct erase_info_user64))) { 741 kfree(erase); 742 return -EFAULT; 743 } 744 erase->addr = einfo64.start; 745 erase->len = einfo64.length; 746 } else { 747 struct erase_info_user einfo32; 748 749 if (copy_from_user(&einfo32, argp, 750 sizeof(struct erase_info_user))) { 751 kfree(erase); 752 return -EFAULT; 753 } 754 erase->addr = einfo32.start; 755 erase->len = einfo32.length; 756 } 757 758 ret = mtd_erase(mtd, erase); 759 kfree(erase); 760 } 761 break; 762 } 763 764 case MEMWRITEOOB: 765 { 766 struct mtd_oob_buf buf; 767 struct mtd_oob_buf __user *buf_user = argp; 768 769 /* NOTE: writes return length to buf_user->length */ 770 if (copy_from_user(&buf, argp, sizeof(buf))) 771 ret = -EFAULT; 772 else 773 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 774 buf.ptr, &buf_user->length); 775 break; 776 } 777 778 case MEMREADOOB: 779 { 780 struct mtd_oob_buf buf; 781 struct mtd_oob_buf __user *buf_user = argp; 782 783 /* NOTE: writes return length to buf_user->start */ 784 if (copy_from_user(&buf, argp, sizeof(buf))) 785 ret = -EFAULT; 786 else 787 ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 788 buf.ptr, &buf_user->start); 789 break; 790 } 791 792 case MEMWRITEOOB64: 793 { 794 struct mtd_oob_buf64 buf; 795 struct mtd_oob_buf64 __user *buf_user = argp; 796 797 if (copy_from_user(&buf, argp, sizeof(buf))) 798 ret = -EFAULT; 799 else 800 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 801 (void __user *)(uintptr_t)buf.usr_ptr, 802 &buf_user->length); 803 break; 804 } 805 806 case MEMREADOOB64: 807 { 808 struct mtd_oob_buf64 buf; 809 struct mtd_oob_buf64 __user *buf_user = argp; 810 811 if (copy_from_user(&buf, argp, sizeof(buf))) 812 ret = -EFAULT; 813 else 814 ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 815 (void __user *)(uintptr_t)buf.usr_ptr, 816 &buf_user->length); 817 break; 818 } 819 820 case MEMWRITE: 821 { 822 ret = mtdchar_write_ioctl(mtd, 823 (struct mtd_write_req __user *)arg); 824 break; 825 } 826 827 case MEMLOCK: 828 { 829 struct erase_info_user einfo; 830 831 if (copy_from_user(&einfo, argp, sizeof(einfo))) 832 return -EFAULT; 833 834 ret = mtd_lock(mtd, einfo.start, einfo.length); 835 break; 836 } 837 838 case MEMUNLOCK: 839 { 840 struct erase_info_user einfo; 841 842 if (copy_from_user(&einfo, argp, sizeof(einfo))) 843 return -EFAULT; 844 845 ret = mtd_unlock(mtd, einfo.start, einfo.length); 846 break; 847 } 848 849 case MEMISLOCKED: 850 { 851 struct erase_info_user einfo; 852 853 if (copy_from_user(&einfo, argp, sizeof(einfo))) 854 return -EFAULT; 855 856 ret = mtd_is_locked(mtd, einfo.start, einfo.length); 857 break; 858 } 859 860 /* Legacy interface */ 861 case MEMGETOOBSEL: 862 { 863 struct nand_oobinfo oi; 864 865 if (!master->ooblayout) 866 return -EOPNOTSUPP; 867 868 ret = get_oobinfo(mtd, &oi); 869 if (ret) 870 return ret; 871 872 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) 873 return -EFAULT; 874 break; 875 } 876 877 case MEMGETBADBLOCK: 878 { 879 loff_t offs; 880 881 if (copy_from_user(&offs, argp, sizeof(loff_t))) 882 return -EFAULT; 883 return mtd_block_isbad(mtd, offs); 884 break; 885 } 886 887 case MEMSETBADBLOCK: 888 { 889 loff_t offs; 890 891 if (copy_from_user(&offs, argp, sizeof(loff_t))) 892 return -EFAULT; 893 return mtd_block_markbad(mtd, offs); 894 break; 895 } 896 897 case OTPSELECT: 898 { 899 int mode; 900 if (copy_from_user(&mode, argp, sizeof(int))) 901 return -EFAULT; 902 903 mfi->mode = MTD_FILE_MODE_NORMAL; 904 905 ret = otp_select_filemode(mfi, mode); 906 907 file->f_pos = 0; 908 break; 909 } 910 911 case OTPGETREGIONCOUNT: 912 case OTPGETREGIONINFO: 913 { 914 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 915 size_t retlen; 916 if (!buf) 917 return -ENOMEM; 918 switch (mfi->mode) { 919 case MTD_FILE_MODE_OTP_FACTORY: 920 ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf); 921 break; 922 case MTD_FILE_MODE_OTP_USER: 923 ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf); 924 break; 925 default: 926 ret = -EINVAL; 927 break; 928 } 929 if (!ret) { 930 if (cmd == OTPGETREGIONCOUNT) { 931 int nbr = retlen / sizeof(struct otp_info); 932 ret = copy_to_user(argp, &nbr, sizeof(int)); 933 } else 934 ret = copy_to_user(argp, buf, retlen); 935 if (ret) 936 ret = -EFAULT; 937 } 938 kfree(buf); 939 break; 940 } 941 942 case OTPLOCK: 943 { 944 struct otp_info oinfo; 945 946 if (mfi->mode != MTD_FILE_MODE_OTP_USER) 947 return -EINVAL; 948 if (copy_from_user(&oinfo, argp, sizeof(oinfo))) 949 return -EFAULT; 950 ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length); 951 break; 952 } 953 954 /* This ioctl is being deprecated - it truncates the ECC layout */ 955 case ECCGETLAYOUT: 956 { 957 struct nand_ecclayout_user *usrlay; 958 959 if (!master->ooblayout) 960 return -EOPNOTSUPP; 961 962 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL); 963 if (!usrlay) 964 return -ENOMEM; 965 966 shrink_ecclayout(mtd, usrlay); 967 968 if (copy_to_user(argp, usrlay, sizeof(*usrlay))) 969 ret = -EFAULT; 970 kfree(usrlay); 971 break; 972 } 973 974 case ECCGETSTATS: 975 { 976 if (copy_to_user(argp, &mtd->ecc_stats, 977 sizeof(struct mtd_ecc_stats))) 978 return -EFAULT; 979 break; 980 } 981 982 case MTDFILEMODE: 983 { 984 mfi->mode = 0; 985 986 switch(arg) { 987 case MTD_FILE_MODE_OTP_FACTORY: 988 case MTD_FILE_MODE_OTP_USER: 989 ret = otp_select_filemode(mfi, arg); 990 break; 991 992 case MTD_FILE_MODE_RAW: 993 if (!mtd_has_oob(mtd)) 994 return -EOPNOTSUPP; 995 mfi->mode = arg; 996 997 case MTD_FILE_MODE_NORMAL: 998 break; 999 default: 1000 ret = -EINVAL; 1001 } 1002 file->f_pos = 0; 1003 break; 1004 } 1005 1006 case BLKPG: 1007 { 1008 struct blkpg_ioctl_arg __user *blk_arg = argp; 1009 struct blkpg_ioctl_arg a; 1010 1011 if (copy_from_user(&a, blk_arg, sizeof(a))) 1012 ret = -EFAULT; 1013 else 1014 ret = mtdchar_blkpg_ioctl(mtd, &a); 1015 break; 1016 } 1017 1018 case BLKRRPART: 1019 { 1020 /* No reread partition feature. Just return ok */ 1021 ret = 0; 1022 break; 1023 } 1024 } 1025 1026 return ret; 1027 } /* memory_ioctl */ 1028 1029 static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) 1030 { 1031 int ret; 1032 1033 mutex_lock(&mtd_mutex); 1034 ret = mtdchar_ioctl(file, cmd, arg); 1035 mutex_unlock(&mtd_mutex); 1036 1037 return ret; 1038 } 1039 1040 #ifdef CONFIG_COMPAT 1041 1042 struct mtd_oob_buf32 { 1043 u_int32_t start; 1044 u_int32_t length; 1045 compat_caddr_t ptr; /* unsigned char* */ 1046 }; 1047 1048 #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32) 1049 #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32) 1050 1051 static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd, 1052 unsigned long arg) 1053 { 1054 struct mtd_file_info *mfi = file->private_data; 1055 struct mtd_info *mtd = mfi->mtd; 1056 void __user *argp = compat_ptr(arg); 1057 int ret = 0; 1058 1059 mutex_lock(&mtd_mutex); 1060 1061 switch (cmd) { 1062 case MEMWRITEOOB32: 1063 { 1064 struct mtd_oob_buf32 buf; 1065 struct mtd_oob_buf32 __user *buf_user = argp; 1066 1067 if (!(file->f_mode & FMODE_WRITE)) { 1068 ret = -EPERM; 1069 break; 1070 } 1071 1072 if (copy_from_user(&buf, argp, sizeof(buf))) 1073 ret = -EFAULT; 1074 else 1075 ret = mtdchar_writeoob(file, mtd, buf.start, 1076 buf.length, compat_ptr(buf.ptr), 1077 &buf_user->length); 1078 break; 1079 } 1080 1081 case MEMREADOOB32: 1082 { 1083 struct mtd_oob_buf32 buf; 1084 struct mtd_oob_buf32 __user *buf_user = argp; 1085 1086 /* NOTE: writes return length to buf->start */ 1087 if (copy_from_user(&buf, argp, sizeof(buf))) 1088 ret = -EFAULT; 1089 else 1090 ret = mtdchar_readoob(file, mtd, buf.start, 1091 buf.length, compat_ptr(buf.ptr), 1092 &buf_user->start); 1093 break; 1094 } 1095 1096 case BLKPG: 1097 { 1098 /* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */ 1099 struct blkpg_compat_ioctl_arg __user *uarg = argp; 1100 struct blkpg_compat_ioctl_arg compat_arg; 1101 struct blkpg_ioctl_arg a; 1102 1103 if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) { 1104 ret = -EFAULT; 1105 break; 1106 } 1107 1108 memset(&a, 0, sizeof(a)); 1109 a.op = compat_arg.op; 1110 a.flags = compat_arg.flags; 1111 a.datalen = compat_arg.datalen; 1112 a.data = compat_ptr(compat_arg.data); 1113 1114 ret = mtdchar_blkpg_ioctl(mtd, &a); 1115 break; 1116 } 1117 1118 default: 1119 ret = mtdchar_ioctl(file, cmd, (unsigned long)argp); 1120 } 1121 1122 mutex_unlock(&mtd_mutex); 1123 1124 return ret; 1125 } 1126 1127 #endif /* CONFIG_COMPAT */ 1128 1129 /* 1130 * try to determine where a shared mapping can be made 1131 * - only supported for NOMMU at the moment (MMU can't doesn't copy private 1132 * mappings) 1133 */ 1134 #ifndef CONFIG_MMU 1135 static unsigned long mtdchar_get_unmapped_area(struct file *file, 1136 unsigned long addr, 1137 unsigned long len, 1138 unsigned long pgoff, 1139 unsigned long flags) 1140 { 1141 struct mtd_file_info *mfi = file->private_data; 1142 struct mtd_info *mtd = mfi->mtd; 1143 unsigned long offset; 1144 int ret; 1145 1146 if (addr != 0) 1147 return (unsigned long) -EINVAL; 1148 1149 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) 1150 return (unsigned long) -EINVAL; 1151 1152 offset = pgoff << PAGE_SHIFT; 1153 if (offset > mtd->size - len) 1154 return (unsigned long) -EINVAL; 1155 1156 ret = mtd_get_unmapped_area(mtd, len, offset, flags); 1157 return ret == -EOPNOTSUPP ? -ENODEV : ret; 1158 } 1159 1160 static unsigned mtdchar_mmap_capabilities(struct file *file) 1161 { 1162 struct mtd_file_info *mfi = file->private_data; 1163 1164 return mtd_mmap_capabilities(mfi->mtd); 1165 } 1166 #endif 1167 1168 /* 1169 * set up a mapping for shared memory segments 1170 */ 1171 static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) 1172 { 1173 #ifdef CONFIG_MMU 1174 struct mtd_file_info *mfi = file->private_data; 1175 struct mtd_info *mtd = mfi->mtd; 1176 struct map_info *map = mtd->priv; 1177 1178 /* This is broken because it assumes the MTD device is map-based 1179 and that mtd->priv is a valid struct map_info. It should be 1180 replaced with something that uses the mtd_get_unmapped_area() 1181 operation properly. */ 1182 if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { 1183 #ifdef pgprot_noncached 1184 if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory)) 1185 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1186 #endif 1187 return vm_iomap_memory(vma, map->phys, map->size); 1188 } 1189 return -ENODEV; 1190 #else 1191 return vma->vm_flags & VM_SHARED ? 0 : -EACCES; 1192 #endif 1193 } 1194 1195 static const struct file_operations mtd_fops = { 1196 .owner = THIS_MODULE, 1197 .llseek = mtdchar_lseek, 1198 .read = mtdchar_read, 1199 .write = mtdchar_write, 1200 .unlocked_ioctl = mtdchar_unlocked_ioctl, 1201 #ifdef CONFIG_COMPAT 1202 .compat_ioctl = mtdchar_compat_ioctl, 1203 #endif 1204 .open = mtdchar_open, 1205 .release = mtdchar_close, 1206 .mmap = mtdchar_mmap, 1207 #ifndef CONFIG_MMU 1208 .get_unmapped_area = mtdchar_get_unmapped_area, 1209 .mmap_capabilities = mtdchar_mmap_capabilities, 1210 #endif 1211 }; 1212 1213 int __init init_mtdchar(void) 1214 { 1215 int ret; 1216 1217 ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, 1218 "mtd", &mtd_fops); 1219 if (ret < 0) { 1220 pr_err("Can't allocate major number %d for MTD\n", 1221 MTD_CHAR_MAJOR); 1222 return ret; 1223 } 1224 1225 return ret; 1226 } 1227 1228 void __exit cleanup_mtdchar(void) 1229 { 1230 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1231 } 1232 1233 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); 1234