1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> 4 */ 5 6 #include <linux/device.h> 7 #include <linux/fs.h> 8 #include <linux/mm.h> 9 #include <linux/err.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/sched.h> 15 #include <linux/mutex.h> 16 #include <linux/backing-dev.h> 17 #include <linux/compat.h> 18 #include <linux/mount.h> 19 #include <linux/blkpg.h> 20 #include <linux/magic.h> 21 #include <linux/major.h> 22 #include <linux/mtd/mtd.h> 23 #include <linux/mtd/partitions.h> 24 #include <linux/mtd/map.h> 25 26 #include <linux/uaccess.h> 27 28 #include "mtdcore.h" 29 30 /* 31 * Data structure to hold the pointer to the mtd device as well 32 * as mode information of various use cases. 33 */ 34 struct mtd_file_info { 35 struct mtd_info *mtd; 36 enum mtd_file_modes mode; 37 }; 38 39 static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig) 40 { 41 struct mtd_file_info *mfi = file->private_data; 42 return fixed_size_llseek(file, offset, orig, mfi->mtd->size); 43 } 44 45 static int mtdchar_open(struct inode *inode, struct file *file) 46 { 47 int minor = iminor(inode); 48 int devnum = minor >> 1; 49 int ret = 0; 50 struct mtd_info *mtd; 51 struct mtd_file_info *mfi; 52 53 pr_debug("MTD_open\n"); 54 55 /* You can't open the RO devices RW */ 56 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) 57 return -EACCES; 58 59 mtd = get_mtd_device(NULL, devnum); 60 61 if (IS_ERR(mtd)) 62 return PTR_ERR(mtd); 63 64 if (mtd->type == MTD_ABSENT) { 65 ret = -ENODEV; 66 goto out1; 67 } 68 69 /* You can't open it RW if it's not a writeable device */ 70 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { 71 ret = -EACCES; 72 goto out1; 73 } 74 75 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 76 if (!mfi) { 77 ret = -ENOMEM; 78 goto out1; 79 } 80 mfi->mtd = mtd; 81 file->private_data = mfi; 82 return 0; 83 84 out1: 85 put_mtd_device(mtd); 86 return ret; 87 } /* mtdchar_open */ 88 89 /*====================================================================*/ 90 91 static int mtdchar_close(struct inode *inode, struct file *file) 92 { 93 struct mtd_file_info *mfi = file->private_data; 94 struct mtd_info *mtd = mfi->mtd; 95 96 pr_debug("MTD_close\n"); 97 98 /* Only sync if opened RW */ 99 if ((file->f_mode & FMODE_WRITE)) 100 mtd_sync(mtd); 101 102 put_mtd_device(mtd); 103 file->private_data = NULL; 104 kfree(mfi); 105 106 return 0; 107 } /* mtdchar_close */ 108 109 /* Back in June 2001, dwmw2 wrote: 110 * 111 * FIXME: This _really_ needs to die. In 2.5, we should lock the 112 * userspace buffer down and use it directly with readv/writev. 113 * 114 * The implementation below, using mtd_kmalloc_up_to, mitigates 115 * allocation failures when the system is under low-memory situations 116 * or if memory is highly fragmented at the cost of reducing the 117 * performance of the requested transfer due to a smaller buffer size. 118 * 119 * A more complex but more memory-efficient implementation based on 120 * get_user_pages and iovecs to cover extents of those pages is a 121 * longer-term goal, as intimated by dwmw2 above. However, for the 122 * write case, this requires yet more complex head and tail transfer 123 * handling when those head and tail offsets and sizes are such that 124 * alignment requirements are not met in the NAND subdriver. 125 */ 126 127 static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count, 128 loff_t *ppos) 129 { 130 struct mtd_file_info *mfi = file->private_data; 131 struct mtd_info *mtd = mfi->mtd; 132 size_t retlen; 133 size_t total_retlen=0; 134 int ret=0; 135 int len; 136 size_t size = count; 137 char *kbuf; 138 139 pr_debug("MTD_read\n"); 140 141 if (*ppos + count > mtd->size) { 142 if (*ppos < mtd->size) 143 count = mtd->size - *ppos; 144 else 145 count = 0; 146 } 147 148 if (!count) 149 return 0; 150 151 kbuf = mtd_kmalloc_up_to(mtd, &size); 152 if (!kbuf) 153 return -ENOMEM; 154 155 while (count) { 156 len = min_t(size_t, count, size); 157 158 switch (mfi->mode) { 159 case MTD_FILE_MODE_OTP_FACTORY: 160 ret = mtd_read_fact_prot_reg(mtd, *ppos, len, 161 &retlen, kbuf); 162 break; 163 case MTD_FILE_MODE_OTP_USER: 164 ret = mtd_read_user_prot_reg(mtd, *ppos, len, 165 &retlen, kbuf); 166 break; 167 case MTD_FILE_MODE_RAW: 168 { 169 struct mtd_oob_ops ops = {}; 170 171 ops.mode = MTD_OPS_RAW; 172 ops.datbuf = kbuf; 173 ops.oobbuf = NULL; 174 ops.len = len; 175 176 ret = mtd_read_oob(mtd, *ppos, &ops); 177 retlen = ops.retlen; 178 break; 179 } 180 default: 181 ret = mtd_read(mtd, *ppos, len, &retlen, kbuf); 182 } 183 /* Nand returns -EBADMSG on ECC errors, but it returns 184 * the data. For our userspace tools it is important 185 * to dump areas with ECC errors! 186 * For kernel internal usage it also might return -EUCLEAN 187 * to signal the caller that a bitflip has occurred and has 188 * been corrected by the ECC algorithm. 189 * Userspace software which accesses NAND this way 190 * must be aware of the fact that it deals with NAND 191 */ 192 if (!ret || mtd_is_bitflip_or_eccerr(ret)) { 193 *ppos += retlen; 194 if (copy_to_user(buf, kbuf, retlen)) { 195 kfree(kbuf); 196 return -EFAULT; 197 } 198 else 199 total_retlen += retlen; 200 201 count -= retlen; 202 buf += retlen; 203 if (retlen == 0) 204 count = 0; 205 } 206 else { 207 kfree(kbuf); 208 return ret; 209 } 210 211 } 212 213 kfree(kbuf); 214 return total_retlen; 215 } /* mtdchar_read */ 216 217 static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count, 218 loff_t *ppos) 219 { 220 struct mtd_file_info *mfi = file->private_data; 221 struct mtd_info *mtd = mfi->mtd; 222 size_t size = count; 223 char *kbuf; 224 size_t retlen; 225 size_t total_retlen=0; 226 int ret=0; 227 int len; 228 229 pr_debug("MTD_write\n"); 230 231 if (*ppos >= mtd->size) 232 return -ENOSPC; 233 234 if (*ppos + count > mtd->size) 235 count = mtd->size - *ppos; 236 237 if (!count) 238 return 0; 239 240 kbuf = mtd_kmalloc_up_to(mtd, &size); 241 if (!kbuf) 242 return -ENOMEM; 243 244 while (count) { 245 len = min_t(size_t, count, size); 246 247 if (copy_from_user(kbuf, buf, len)) { 248 kfree(kbuf); 249 return -EFAULT; 250 } 251 252 switch (mfi->mode) { 253 case MTD_FILE_MODE_OTP_FACTORY: 254 ret = -EROFS; 255 break; 256 case MTD_FILE_MODE_OTP_USER: 257 ret = mtd_write_user_prot_reg(mtd, *ppos, len, 258 &retlen, kbuf); 259 break; 260 261 case MTD_FILE_MODE_RAW: 262 { 263 struct mtd_oob_ops ops = {}; 264 265 ops.mode = MTD_OPS_RAW; 266 ops.datbuf = kbuf; 267 ops.oobbuf = NULL; 268 ops.ooboffs = 0; 269 ops.len = len; 270 271 ret = mtd_write_oob(mtd, *ppos, &ops); 272 retlen = ops.retlen; 273 break; 274 } 275 276 default: 277 ret = mtd_write(mtd, *ppos, len, &retlen, kbuf); 278 } 279 280 /* 281 * Return -ENOSPC only if no data could be written at all. 282 * Otherwise just return the number of bytes that actually 283 * have been written. 284 */ 285 if ((ret == -ENOSPC) && (total_retlen)) 286 break; 287 288 if (!ret) { 289 *ppos += retlen; 290 total_retlen += retlen; 291 count -= retlen; 292 buf += retlen; 293 } 294 else { 295 kfree(kbuf); 296 return ret; 297 } 298 } 299 300 kfree(kbuf); 301 return total_retlen; 302 } /* mtdchar_write */ 303 304 /*====================================================================== 305 306 IOCTL calls for getting device parameters. 307 308 ======================================================================*/ 309 310 static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 311 { 312 struct mtd_info *mtd = mfi->mtd; 313 size_t retlen; 314 315 switch (mode) { 316 case MTD_OTP_FACTORY: 317 if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == 318 -EOPNOTSUPP) 319 return -EOPNOTSUPP; 320 321 mfi->mode = MTD_FILE_MODE_OTP_FACTORY; 322 break; 323 case MTD_OTP_USER: 324 if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) == 325 -EOPNOTSUPP) 326 return -EOPNOTSUPP; 327 328 mfi->mode = MTD_FILE_MODE_OTP_USER; 329 break; 330 case MTD_OTP_OFF: 331 mfi->mode = MTD_FILE_MODE_NORMAL; 332 break; 333 default: 334 return -EINVAL; 335 } 336 337 return 0; 338 } 339 340 static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, 341 uint64_t start, uint32_t length, void __user *ptr, 342 uint32_t __user *retp) 343 { 344 struct mtd_info *master = mtd_get_master(mtd); 345 struct mtd_file_info *mfi = file->private_data; 346 struct mtd_oob_ops ops = {}; 347 uint32_t retlen; 348 int ret = 0; 349 350 if (length > 4096) 351 return -EINVAL; 352 353 if (!master->_write_oob) 354 return -EOPNOTSUPP; 355 356 ops.ooblen = length; 357 ops.ooboffs = start & (mtd->writesize - 1); 358 ops.datbuf = NULL; 359 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 360 MTD_OPS_PLACE_OOB; 361 362 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 363 return -EINVAL; 364 365 ops.oobbuf = memdup_user(ptr, length); 366 if (IS_ERR(ops.oobbuf)) 367 return PTR_ERR(ops.oobbuf); 368 369 start &= ~((uint64_t)mtd->writesize - 1); 370 ret = mtd_write_oob(mtd, start, &ops); 371 372 if (ops.oobretlen > 0xFFFFFFFFU) 373 ret = -EOVERFLOW; 374 retlen = ops.oobretlen; 375 if (copy_to_user(retp, &retlen, sizeof(length))) 376 ret = -EFAULT; 377 378 kfree(ops.oobbuf); 379 return ret; 380 } 381 382 static int mtdchar_readoob(struct file *file, struct mtd_info *mtd, 383 uint64_t start, uint32_t length, void __user *ptr, 384 uint32_t __user *retp) 385 { 386 struct mtd_file_info *mfi = file->private_data; 387 struct mtd_oob_ops ops = {}; 388 int ret = 0; 389 390 if (length > 4096) 391 return -EINVAL; 392 393 ops.ooblen = length; 394 ops.ooboffs = start & (mtd->writesize - 1); 395 ops.datbuf = NULL; 396 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 397 MTD_OPS_PLACE_OOB; 398 399 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 400 return -EINVAL; 401 402 ops.oobbuf = kmalloc(length, GFP_KERNEL); 403 if (!ops.oobbuf) 404 return -ENOMEM; 405 406 start &= ~((uint64_t)mtd->writesize - 1); 407 ret = mtd_read_oob(mtd, start, &ops); 408 409 if (put_user(ops.oobretlen, retp)) 410 ret = -EFAULT; 411 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf, 412 ops.oobretlen)) 413 ret = -EFAULT; 414 415 kfree(ops.oobbuf); 416 417 /* 418 * NAND returns -EBADMSG on ECC errors, but it returns the OOB 419 * data. For our userspace tools it is important to dump areas 420 * with ECC errors! 421 * For kernel internal usage it also might return -EUCLEAN 422 * to signal the caller that a bitflip has occurred and has 423 * been corrected by the ECC algorithm. 424 * 425 * Note: currently the standard NAND function, nand_read_oob_std, 426 * does not calculate ECC for the OOB area, so do not rely on 427 * this behavior unless you have replaced it with your own. 428 */ 429 if (mtd_is_bitflip_or_eccerr(ret)) 430 return 0; 431 432 return ret; 433 } 434 435 /* 436 * Copies (and truncates, if necessary) OOB layout information to the 437 * deprecated layout struct, nand_ecclayout_user. This is necessary only to 438 * support the deprecated API ioctl ECCGETLAYOUT while allowing all new 439 * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops 440 * can describe any kind of OOB layout with almost zero overhead from a 441 * memory usage point of view). 442 */ 443 static int shrink_ecclayout(struct mtd_info *mtd, 444 struct nand_ecclayout_user *to) 445 { 446 struct mtd_oob_region oobregion; 447 int i, section = 0, ret; 448 449 if (!mtd || !to) 450 return -EINVAL; 451 452 memset(to, 0, sizeof(*to)); 453 454 to->eccbytes = 0; 455 for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) { 456 u32 eccpos; 457 458 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); 459 if (ret < 0) { 460 if (ret != -ERANGE) 461 return ret; 462 463 break; 464 } 465 466 eccpos = oobregion.offset; 467 for (; i < MTD_MAX_ECCPOS_ENTRIES && 468 eccpos < oobregion.offset + oobregion.length; i++) { 469 to->eccpos[i] = eccpos++; 470 to->eccbytes++; 471 } 472 } 473 474 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) { 475 ret = mtd_ooblayout_free(mtd, i, &oobregion); 476 if (ret < 0) { 477 if (ret != -ERANGE) 478 return ret; 479 480 break; 481 } 482 483 to->oobfree[i].offset = oobregion.offset; 484 to->oobfree[i].length = oobregion.length; 485 to->oobavail += to->oobfree[i].length; 486 } 487 488 return 0; 489 } 490 491 static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to) 492 { 493 struct mtd_oob_region oobregion; 494 int i, section = 0, ret; 495 496 if (!mtd || !to) 497 return -EINVAL; 498 499 memset(to, 0, sizeof(*to)); 500 501 to->eccbytes = 0; 502 for (i = 0; i < ARRAY_SIZE(to->eccpos);) { 503 u32 eccpos; 504 505 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); 506 if (ret < 0) { 507 if (ret != -ERANGE) 508 return ret; 509 510 break; 511 } 512 513 if (oobregion.length + i > ARRAY_SIZE(to->eccpos)) 514 return -EINVAL; 515 516 eccpos = oobregion.offset; 517 for (; eccpos < oobregion.offset + oobregion.length; i++) { 518 to->eccpos[i] = eccpos++; 519 to->eccbytes++; 520 } 521 } 522 523 for (i = 0; i < 8; i++) { 524 ret = mtd_ooblayout_free(mtd, i, &oobregion); 525 if (ret < 0) { 526 if (ret != -ERANGE) 527 return ret; 528 529 break; 530 } 531 532 to->oobfree[i][0] = oobregion.offset; 533 to->oobfree[i][1] = oobregion.length; 534 } 535 536 to->useecc = MTD_NANDECC_AUTOPLACE; 537 538 return 0; 539 } 540 541 static int mtdchar_blkpg_ioctl(struct mtd_info *mtd, 542 struct blkpg_ioctl_arg *arg) 543 { 544 struct blkpg_partition p; 545 546 if (!capable(CAP_SYS_ADMIN)) 547 return -EPERM; 548 549 if (copy_from_user(&p, arg->data, sizeof(p))) 550 return -EFAULT; 551 552 switch (arg->op) { 553 case BLKPG_ADD_PARTITION: 554 555 /* Only master mtd device must be used to add partitions */ 556 if (mtd_is_partition(mtd)) 557 return -EINVAL; 558 559 /* Sanitize user input */ 560 p.devname[BLKPG_DEVNAMELTH - 1] = '\0'; 561 562 return mtd_add_partition(mtd, p.devname, p.start, p.length); 563 564 case BLKPG_DEL_PARTITION: 565 566 if (p.pno < 0) 567 return -EINVAL; 568 569 return mtd_del_partition(mtd, p.pno); 570 571 default: 572 return -EINVAL; 573 } 574 } 575 576 static void adjust_oob_length(struct mtd_info *mtd, uint64_t start, 577 struct mtd_oob_ops *ops) 578 { 579 uint32_t start_page, end_page; 580 u32 oob_per_page; 581 582 if (ops->len == 0 || ops->ooblen == 0) 583 return; 584 585 start_page = mtd_div_by_ws(start, mtd); 586 end_page = mtd_div_by_ws(start + ops->len - 1, mtd); 587 oob_per_page = mtd_oobavail(mtd, ops); 588 589 ops->ooblen = min_t(size_t, ops->ooblen, 590 (end_page - start_page + 1) * oob_per_page); 591 } 592 593 static int mtdchar_write_ioctl(struct mtd_info *mtd, 594 struct mtd_write_req __user *argp) 595 { 596 struct mtd_info *master = mtd_get_master(mtd); 597 struct mtd_write_req req; 598 const void __user *usr_data, *usr_oob; 599 uint8_t *datbuf = NULL, *oobbuf = NULL; 600 size_t datbuf_len, oobbuf_len; 601 int ret = 0; 602 603 if (copy_from_user(&req, argp, sizeof(req))) 604 return -EFAULT; 605 606 usr_data = (const void __user *)(uintptr_t)req.usr_data; 607 usr_oob = (const void __user *)(uintptr_t)req.usr_oob; 608 609 if (!master->_write_oob) 610 return -EOPNOTSUPP; 611 612 if (!usr_data) 613 req.len = 0; 614 615 if (!usr_oob) 616 req.ooblen = 0; 617 618 req.len &= 0xffffffff; 619 req.ooblen &= 0xffffffff; 620 621 if (req.start + req.len > mtd->size) 622 return -EINVAL; 623 624 datbuf_len = min_t(size_t, req.len, mtd->erasesize); 625 if (datbuf_len > 0) { 626 datbuf = kvmalloc(datbuf_len, GFP_KERNEL); 627 if (!datbuf) 628 return -ENOMEM; 629 } 630 631 oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize); 632 if (oobbuf_len > 0) { 633 oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL); 634 if (!oobbuf) { 635 kvfree(datbuf); 636 return -ENOMEM; 637 } 638 } 639 640 while (req.len > 0 || (!usr_data && req.ooblen > 0)) { 641 struct mtd_oob_ops ops = { 642 .mode = req.mode, 643 .len = min_t(size_t, req.len, datbuf_len), 644 .ooblen = min_t(size_t, req.ooblen, oobbuf_len), 645 .datbuf = datbuf, 646 .oobbuf = oobbuf, 647 }; 648 649 /* 650 * Shorten non-page-aligned, eraseblock-sized writes so that 651 * the write ends on an eraseblock boundary. This is necessary 652 * for adjust_oob_length() to properly handle non-page-aligned 653 * writes. 654 */ 655 if (ops.len == mtd->erasesize) 656 ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd); 657 658 /* 659 * For writes which are not OOB-only, adjust the amount of OOB 660 * data written according to the number of data pages written. 661 * This is necessary to prevent OOB data from being skipped 662 * over in data+OOB writes requiring multiple mtd_write_oob() 663 * calls to be completed. 664 */ 665 adjust_oob_length(mtd, req.start, &ops); 666 667 if (copy_from_user(datbuf, usr_data, ops.len) || 668 copy_from_user(oobbuf, usr_oob, ops.ooblen)) { 669 ret = -EFAULT; 670 break; 671 } 672 673 ret = mtd_write_oob(mtd, req.start, &ops); 674 if (ret) 675 break; 676 677 req.start += ops.retlen; 678 req.len -= ops.retlen; 679 usr_data += ops.retlen; 680 681 req.ooblen -= ops.oobretlen; 682 usr_oob += ops.oobretlen; 683 } 684 685 kvfree(datbuf); 686 kvfree(oobbuf); 687 688 return ret; 689 } 690 691 static int mtdchar_read_ioctl(struct mtd_info *mtd, 692 struct mtd_read_req __user *argp) 693 { 694 struct mtd_info *master = mtd_get_master(mtd); 695 struct mtd_read_req req; 696 void __user *usr_data, *usr_oob; 697 uint8_t *datbuf = NULL, *oobbuf = NULL; 698 size_t datbuf_len, oobbuf_len; 699 size_t orig_len, orig_ooblen; 700 int ret = 0; 701 702 if (copy_from_user(&req, argp, sizeof(req))) 703 return -EFAULT; 704 705 orig_len = req.len; 706 orig_ooblen = req.ooblen; 707 708 usr_data = (void __user *)(uintptr_t)req.usr_data; 709 usr_oob = (void __user *)(uintptr_t)req.usr_oob; 710 711 if (!master->_read_oob) 712 return -EOPNOTSUPP; 713 714 if (!usr_data) 715 req.len = 0; 716 717 if (!usr_oob) 718 req.ooblen = 0; 719 720 req.ecc_stats.uncorrectable_errors = 0; 721 req.ecc_stats.corrected_bitflips = 0; 722 req.ecc_stats.max_bitflips = 0; 723 724 req.len &= 0xffffffff; 725 req.ooblen &= 0xffffffff; 726 727 if (req.start + req.len > mtd->size) { 728 ret = -EINVAL; 729 goto out; 730 } 731 732 datbuf_len = min_t(size_t, req.len, mtd->erasesize); 733 if (datbuf_len > 0) { 734 datbuf = kvmalloc(datbuf_len, GFP_KERNEL); 735 if (!datbuf) { 736 ret = -ENOMEM; 737 goto out; 738 } 739 } 740 741 oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize); 742 if (oobbuf_len > 0) { 743 oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL); 744 if (!oobbuf) { 745 ret = -ENOMEM; 746 goto out; 747 } 748 } 749 750 while (req.len > 0 || (!usr_data && req.ooblen > 0)) { 751 struct mtd_req_stats stats; 752 struct mtd_oob_ops ops = { 753 .mode = req.mode, 754 .len = min_t(size_t, req.len, datbuf_len), 755 .ooblen = min_t(size_t, req.ooblen, oobbuf_len), 756 .datbuf = datbuf, 757 .oobbuf = oobbuf, 758 .stats = &stats, 759 }; 760 761 /* 762 * Shorten non-page-aligned, eraseblock-sized reads so that the 763 * read ends on an eraseblock boundary. This is necessary in 764 * order to prevent OOB data for some pages from being 765 * duplicated in the output of non-page-aligned reads requiring 766 * multiple mtd_read_oob() calls to be completed. 767 */ 768 if (ops.len == mtd->erasesize) 769 ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd); 770 771 ret = mtd_read_oob(mtd, (loff_t)req.start, &ops); 772 773 req.ecc_stats.uncorrectable_errors += 774 stats.uncorrectable_errors; 775 req.ecc_stats.corrected_bitflips += stats.corrected_bitflips; 776 req.ecc_stats.max_bitflips = 777 max(req.ecc_stats.max_bitflips, stats.max_bitflips); 778 779 if (ret && !mtd_is_bitflip_or_eccerr(ret)) 780 break; 781 782 if (copy_to_user(usr_data, ops.datbuf, ops.retlen) || 783 copy_to_user(usr_oob, ops.oobbuf, ops.oobretlen)) { 784 ret = -EFAULT; 785 break; 786 } 787 788 req.start += ops.retlen; 789 req.len -= ops.retlen; 790 usr_data += ops.retlen; 791 792 req.ooblen -= ops.oobretlen; 793 usr_oob += ops.oobretlen; 794 } 795 796 /* 797 * As multiple iterations of the above loop (and therefore multiple 798 * mtd_read_oob() calls) may be necessary to complete the read request, 799 * adjust the final return code to ensure it accounts for all detected 800 * ECC errors. 801 */ 802 if (!ret || mtd_is_bitflip(ret)) { 803 if (req.ecc_stats.uncorrectable_errors > 0) 804 ret = -EBADMSG; 805 else if (req.ecc_stats.corrected_bitflips > 0) 806 ret = -EUCLEAN; 807 } 808 809 out: 810 req.len = orig_len - req.len; 811 req.ooblen = orig_ooblen - req.ooblen; 812 813 if (copy_to_user(argp, &req, sizeof(req))) 814 ret = -EFAULT; 815 816 kvfree(datbuf); 817 kvfree(oobbuf); 818 819 return ret; 820 } 821 822 static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) 823 { 824 struct mtd_file_info *mfi = file->private_data; 825 struct mtd_info *mtd = mfi->mtd; 826 struct mtd_info *master = mtd_get_master(mtd); 827 void __user *argp = (void __user *)arg; 828 int ret = 0; 829 struct mtd_info_user info; 830 831 pr_debug("MTD_ioctl\n"); 832 833 /* 834 * Check the file mode to require "dangerous" commands to have write 835 * permissions. 836 */ 837 switch (cmd) { 838 /* "safe" commands */ 839 case MEMGETREGIONCOUNT: 840 case MEMGETREGIONINFO: 841 case MEMGETINFO: 842 case MEMREADOOB: 843 case MEMREADOOB64: 844 case MEMREAD: 845 case MEMISLOCKED: 846 case MEMGETOOBSEL: 847 case MEMGETBADBLOCK: 848 case OTPSELECT: 849 case OTPGETREGIONCOUNT: 850 case OTPGETREGIONINFO: 851 case ECCGETLAYOUT: 852 case ECCGETSTATS: 853 case MTDFILEMODE: 854 case BLKPG: 855 case BLKRRPART: 856 break; 857 858 /* "dangerous" commands */ 859 case MEMERASE: 860 case MEMERASE64: 861 case MEMLOCK: 862 case MEMUNLOCK: 863 case MEMSETBADBLOCK: 864 case MEMWRITEOOB: 865 case MEMWRITEOOB64: 866 case MEMWRITE: 867 case OTPLOCK: 868 case OTPERASE: 869 if (!(file->f_mode & FMODE_WRITE)) 870 return -EPERM; 871 break; 872 873 default: 874 return -ENOTTY; 875 } 876 877 switch (cmd) { 878 case MEMGETREGIONCOUNT: 879 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) 880 return -EFAULT; 881 break; 882 883 case MEMGETREGIONINFO: 884 { 885 uint32_t ur_idx; 886 struct mtd_erase_region_info *kr; 887 struct region_info_user __user *ur = argp; 888 889 if (get_user(ur_idx, &(ur->regionindex))) 890 return -EFAULT; 891 892 if (ur_idx >= mtd->numeraseregions) 893 return -EINVAL; 894 895 kr = &(mtd->eraseregions[ur_idx]); 896 897 if (put_user(kr->offset, &(ur->offset)) 898 || put_user(kr->erasesize, &(ur->erasesize)) 899 || put_user(kr->numblocks, &(ur->numblocks))) 900 return -EFAULT; 901 902 break; 903 } 904 905 case MEMGETINFO: 906 memset(&info, 0, sizeof(info)); 907 info.type = mtd->type; 908 info.flags = mtd->flags; 909 info.size = mtd->size; 910 info.erasesize = mtd->erasesize; 911 info.writesize = mtd->writesize; 912 info.oobsize = mtd->oobsize; 913 /* The below field is obsolete */ 914 info.padding = 0; 915 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 916 return -EFAULT; 917 break; 918 919 case MEMERASE: 920 case MEMERASE64: 921 { 922 struct erase_info *erase; 923 924 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); 925 if (!erase) 926 ret = -ENOMEM; 927 else { 928 if (cmd == MEMERASE64) { 929 struct erase_info_user64 einfo64; 930 931 if (copy_from_user(&einfo64, argp, 932 sizeof(struct erase_info_user64))) { 933 kfree(erase); 934 return -EFAULT; 935 } 936 erase->addr = einfo64.start; 937 erase->len = einfo64.length; 938 } else { 939 struct erase_info_user einfo32; 940 941 if (copy_from_user(&einfo32, argp, 942 sizeof(struct erase_info_user))) { 943 kfree(erase); 944 return -EFAULT; 945 } 946 erase->addr = einfo32.start; 947 erase->len = einfo32.length; 948 } 949 950 ret = mtd_erase(mtd, erase); 951 kfree(erase); 952 } 953 break; 954 } 955 956 case MEMWRITEOOB: 957 { 958 struct mtd_oob_buf buf; 959 struct mtd_oob_buf __user *buf_user = argp; 960 961 /* NOTE: writes return length to buf_user->length */ 962 if (copy_from_user(&buf, argp, sizeof(buf))) 963 ret = -EFAULT; 964 else 965 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 966 buf.ptr, &buf_user->length); 967 break; 968 } 969 970 case MEMREADOOB: 971 { 972 struct mtd_oob_buf buf; 973 struct mtd_oob_buf __user *buf_user = argp; 974 975 /* NOTE: writes return length to buf_user->start */ 976 if (copy_from_user(&buf, argp, sizeof(buf))) 977 ret = -EFAULT; 978 else 979 ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 980 buf.ptr, &buf_user->start); 981 break; 982 } 983 984 case MEMWRITEOOB64: 985 { 986 struct mtd_oob_buf64 buf; 987 struct mtd_oob_buf64 __user *buf_user = argp; 988 989 if (copy_from_user(&buf, argp, sizeof(buf))) 990 ret = -EFAULT; 991 else 992 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 993 (void __user *)(uintptr_t)buf.usr_ptr, 994 &buf_user->length); 995 break; 996 } 997 998 case MEMREADOOB64: 999 { 1000 struct mtd_oob_buf64 buf; 1001 struct mtd_oob_buf64 __user *buf_user = argp; 1002 1003 if (copy_from_user(&buf, argp, sizeof(buf))) 1004 ret = -EFAULT; 1005 else 1006 ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 1007 (void __user *)(uintptr_t)buf.usr_ptr, 1008 &buf_user->length); 1009 break; 1010 } 1011 1012 case MEMWRITE: 1013 { 1014 ret = mtdchar_write_ioctl(mtd, 1015 (struct mtd_write_req __user *)arg); 1016 break; 1017 } 1018 1019 case MEMREAD: 1020 { 1021 ret = mtdchar_read_ioctl(mtd, 1022 (struct mtd_read_req __user *)arg); 1023 break; 1024 } 1025 1026 case MEMLOCK: 1027 { 1028 struct erase_info_user einfo; 1029 1030 if (copy_from_user(&einfo, argp, sizeof(einfo))) 1031 return -EFAULT; 1032 1033 ret = mtd_lock(mtd, einfo.start, einfo.length); 1034 break; 1035 } 1036 1037 case MEMUNLOCK: 1038 { 1039 struct erase_info_user einfo; 1040 1041 if (copy_from_user(&einfo, argp, sizeof(einfo))) 1042 return -EFAULT; 1043 1044 ret = mtd_unlock(mtd, einfo.start, einfo.length); 1045 break; 1046 } 1047 1048 case MEMISLOCKED: 1049 { 1050 struct erase_info_user einfo; 1051 1052 if (copy_from_user(&einfo, argp, sizeof(einfo))) 1053 return -EFAULT; 1054 1055 ret = mtd_is_locked(mtd, einfo.start, einfo.length); 1056 break; 1057 } 1058 1059 /* Legacy interface */ 1060 case MEMGETOOBSEL: 1061 { 1062 struct nand_oobinfo oi; 1063 1064 if (!master->ooblayout) 1065 return -EOPNOTSUPP; 1066 1067 ret = get_oobinfo(mtd, &oi); 1068 if (ret) 1069 return ret; 1070 1071 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) 1072 return -EFAULT; 1073 break; 1074 } 1075 1076 case MEMGETBADBLOCK: 1077 { 1078 loff_t offs; 1079 1080 if (copy_from_user(&offs, argp, sizeof(loff_t))) 1081 return -EFAULT; 1082 return mtd_block_isbad(mtd, offs); 1083 } 1084 1085 case MEMSETBADBLOCK: 1086 { 1087 loff_t offs; 1088 1089 if (copy_from_user(&offs, argp, sizeof(loff_t))) 1090 return -EFAULT; 1091 return mtd_block_markbad(mtd, offs); 1092 } 1093 1094 case OTPSELECT: 1095 { 1096 int mode; 1097 if (copy_from_user(&mode, argp, sizeof(int))) 1098 return -EFAULT; 1099 1100 mfi->mode = MTD_FILE_MODE_NORMAL; 1101 1102 ret = otp_select_filemode(mfi, mode); 1103 1104 file->f_pos = 0; 1105 break; 1106 } 1107 1108 case OTPGETREGIONCOUNT: 1109 case OTPGETREGIONINFO: 1110 { 1111 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 1112 size_t retlen; 1113 if (!buf) 1114 return -ENOMEM; 1115 switch (mfi->mode) { 1116 case MTD_FILE_MODE_OTP_FACTORY: 1117 ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf); 1118 break; 1119 case MTD_FILE_MODE_OTP_USER: 1120 ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf); 1121 break; 1122 default: 1123 ret = -EINVAL; 1124 break; 1125 } 1126 if (!ret) { 1127 if (cmd == OTPGETREGIONCOUNT) { 1128 int nbr = retlen / sizeof(struct otp_info); 1129 ret = copy_to_user(argp, &nbr, sizeof(int)); 1130 } else 1131 ret = copy_to_user(argp, buf, retlen); 1132 if (ret) 1133 ret = -EFAULT; 1134 } 1135 kfree(buf); 1136 break; 1137 } 1138 1139 case OTPLOCK: 1140 case OTPERASE: 1141 { 1142 struct otp_info oinfo; 1143 1144 if (mfi->mode != MTD_FILE_MODE_OTP_USER) 1145 return -EINVAL; 1146 if (copy_from_user(&oinfo, argp, sizeof(oinfo))) 1147 return -EFAULT; 1148 if (cmd == OTPLOCK) 1149 ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length); 1150 else 1151 ret = mtd_erase_user_prot_reg(mtd, oinfo.start, oinfo.length); 1152 break; 1153 } 1154 1155 /* This ioctl is being deprecated - it truncates the ECC layout */ 1156 case ECCGETLAYOUT: 1157 { 1158 struct nand_ecclayout_user *usrlay; 1159 1160 if (!master->ooblayout) 1161 return -EOPNOTSUPP; 1162 1163 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL); 1164 if (!usrlay) 1165 return -ENOMEM; 1166 1167 shrink_ecclayout(mtd, usrlay); 1168 1169 if (copy_to_user(argp, usrlay, sizeof(*usrlay))) 1170 ret = -EFAULT; 1171 kfree(usrlay); 1172 break; 1173 } 1174 1175 case ECCGETSTATS: 1176 { 1177 if (copy_to_user(argp, &mtd->ecc_stats, 1178 sizeof(struct mtd_ecc_stats))) 1179 return -EFAULT; 1180 break; 1181 } 1182 1183 case MTDFILEMODE: 1184 { 1185 mfi->mode = 0; 1186 1187 switch(arg) { 1188 case MTD_FILE_MODE_OTP_FACTORY: 1189 case MTD_FILE_MODE_OTP_USER: 1190 ret = otp_select_filemode(mfi, arg); 1191 break; 1192 1193 case MTD_FILE_MODE_RAW: 1194 if (!mtd_has_oob(mtd)) 1195 return -EOPNOTSUPP; 1196 mfi->mode = arg; 1197 break; 1198 1199 case MTD_FILE_MODE_NORMAL: 1200 break; 1201 default: 1202 ret = -EINVAL; 1203 } 1204 file->f_pos = 0; 1205 break; 1206 } 1207 1208 case BLKPG: 1209 { 1210 struct blkpg_ioctl_arg __user *blk_arg = argp; 1211 struct blkpg_ioctl_arg a; 1212 1213 if (copy_from_user(&a, blk_arg, sizeof(a))) 1214 ret = -EFAULT; 1215 else 1216 ret = mtdchar_blkpg_ioctl(mtd, &a); 1217 break; 1218 } 1219 1220 case BLKRRPART: 1221 { 1222 /* No reread partition feature. Just return ok */ 1223 ret = 0; 1224 break; 1225 } 1226 } 1227 1228 return ret; 1229 } /* memory_ioctl */ 1230 1231 static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) 1232 { 1233 struct mtd_file_info *mfi = file->private_data; 1234 struct mtd_info *mtd = mfi->mtd; 1235 struct mtd_info *master = mtd_get_master(mtd); 1236 int ret; 1237 1238 mutex_lock(&master->master.chrdev_lock); 1239 ret = mtdchar_ioctl(file, cmd, arg); 1240 mutex_unlock(&master->master.chrdev_lock); 1241 1242 return ret; 1243 } 1244 1245 #ifdef CONFIG_COMPAT 1246 1247 struct mtd_oob_buf32 { 1248 u_int32_t start; 1249 u_int32_t length; 1250 compat_caddr_t ptr; /* unsigned char* */ 1251 }; 1252 1253 #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32) 1254 #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32) 1255 1256 static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd, 1257 unsigned long arg) 1258 { 1259 struct mtd_file_info *mfi = file->private_data; 1260 struct mtd_info *mtd = mfi->mtd; 1261 struct mtd_info *master = mtd_get_master(mtd); 1262 void __user *argp = compat_ptr(arg); 1263 int ret = 0; 1264 1265 mutex_lock(&master->master.chrdev_lock); 1266 1267 switch (cmd) { 1268 case MEMWRITEOOB32: 1269 { 1270 struct mtd_oob_buf32 buf; 1271 struct mtd_oob_buf32 __user *buf_user = argp; 1272 1273 if (!(file->f_mode & FMODE_WRITE)) { 1274 ret = -EPERM; 1275 break; 1276 } 1277 1278 if (copy_from_user(&buf, argp, sizeof(buf))) 1279 ret = -EFAULT; 1280 else 1281 ret = mtdchar_writeoob(file, mtd, buf.start, 1282 buf.length, compat_ptr(buf.ptr), 1283 &buf_user->length); 1284 break; 1285 } 1286 1287 case MEMREADOOB32: 1288 { 1289 struct mtd_oob_buf32 buf; 1290 struct mtd_oob_buf32 __user *buf_user = argp; 1291 1292 /* NOTE: writes return length to buf->start */ 1293 if (copy_from_user(&buf, argp, sizeof(buf))) 1294 ret = -EFAULT; 1295 else 1296 ret = mtdchar_readoob(file, mtd, buf.start, 1297 buf.length, compat_ptr(buf.ptr), 1298 &buf_user->start); 1299 break; 1300 } 1301 1302 case BLKPG: 1303 { 1304 /* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */ 1305 struct blkpg_compat_ioctl_arg __user *uarg = argp; 1306 struct blkpg_compat_ioctl_arg compat_arg; 1307 struct blkpg_ioctl_arg a; 1308 1309 if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) { 1310 ret = -EFAULT; 1311 break; 1312 } 1313 1314 memset(&a, 0, sizeof(a)); 1315 a.op = compat_arg.op; 1316 a.flags = compat_arg.flags; 1317 a.datalen = compat_arg.datalen; 1318 a.data = compat_ptr(compat_arg.data); 1319 1320 ret = mtdchar_blkpg_ioctl(mtd, &a); 1321 break; 1322 } 1323 1324 default: 1325 ret = mtdchar_ioctl(file, cmd, (unsigned long)argp); 1326 } 1327 1328 mutex_unlock(&master->master.chrdev_lock); 1329 1330 return ret; 1331 } 1332 1333 #endif /* CONFIG_COMPAT */ 1334 1335 /* 1336 * try to determine where a shared mapping can be made 1337 * - only supported for NOMMU at the moment (MMU can't doesn't copy private 1338 * mappings) 1339 */ 1340 #ifndef CONFIG_MMU 1341 static unsigned long mtdchar_get_unmapped_area(struct file *file, 1342 unsigned long addr, 1343 unsigned long len, 1344 unsigned long pgoff, 1345 unsigned long flags) 1346 { 1347 struct mtd_file_info *mfi = file->private_data; 1348 struct mtd_info *mtd = mfi->mtd; 1349 unsigned long offset; 1350 int ret; 1351 1352 if (addr != 0) 1353 return (unsigned long) -EINVAL; 1354 1355 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) 1356 return (unsigned long) -EINVAL; 1357 1358 offset = pgoff << PAGE_SHIFT; 1359 if (offset > mtd->size - len) 1360 return (unsigned long) -EINVAL; 1361 1362 ret = mtd_get_unmapped_area(mtd, len, offset, flags); 1363 return ret == -EOPNOTSUPP ? -ENODEV : ret; 1364 } 1365 1366 static unsigned mtdchar_mmap_capabilities(struct file *file) 1367 { 1368 struct mtd_file_info *mfi = file->private_data; 1369 1370 return mtd_mmap_capabilities(mfi->mtd); 1371 } 1372 #endif 1373 1374 /* 1375 * set up a mapping for shared memory segments 1376 */ 1377 static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) 1378 { 1379 #ifdef CONFIG_MMU 1380 struct mtd_file_info *mfi = file->private_data; 1381 struct mtd_info *mtd = mfi->mtd; 1382 struct map_info *map = mtd->priv; 1383 1384 /* This is broken because it assumes the MTD device is map-based 1385 and that mtd->priv is a valid struct map_info. It should be 1386 replaced with something that uses the mtd_get_unmapped_area() 1387 operation properly. */ 1388 if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { 1389 #ifdef pgprot_noncached 1390 if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory)) 1391 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1392 #endif 1393 return vm_iomap_memory(vma, map->phys, map->size); 1394 } 1395 return -ENODEV; 1396 #else 1397 return vma->vm_flags & VM_SHARED ? 0 : -EACCES; 1398 #endif 1399 } 1400 1401 static const struct file_operations mtd_fops = { 1402 .owner = THIS_MODULE, 1403 .llseek = mtdchar_lseek, 1404 .read = mtdchar_read, 1405 .write = mtdchar_write, 1406 .unlocked_ioctl = mtdchar_unlocked_ioctl, 1407 #ifdef CONFIG_COMPAT 1408 .compat_ioctl = mtdchar_compat_ioctl, 1409 #endif 1410 .open = mtdchar_open, 1411 .release = mtdchar_close, 1412 .mmap = mtdchar_mmap, 1413 #ifndef CONFIG_MMU 1414 .get_unmapped_area = mtdchar_get_unmapped_area, 1415 .mmap_capabilities = mtdchar_mmap_capabilities, 1416 #endif 1417 }; 1418 1419 int __init init_mtdchar(void) 1420 { 1421 int ret; 1422 1423 ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, 1424 "mtd", &mtd_fops); 1425 if (ret < 0) { 1426 pr_err("Can't allocate major number %d for MTD\n", 1427 MTD_CHAR_MAJOR); 1428 return ret; 1429 } 1430 1431 return ret; 1432 } 1433 1434 void __exit cleanup_mtdchar(void) 1435 { 1436 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1437 } 1438 1439 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); 1440