1 /* 2 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al. 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 * 6 */ 7 8 #ifndef __MTD_MTD_H__ 9 #define __MTD_MTD_H__ 10 11 #ifndef __UBOOT__ 12 #include <linux/types.h> 13 #include <linux/uio.h> 14 #include <linux/notifier.h> 15 #include <linux/device.h> 16 17 #include <mtd/mtd-abi.h> 18 19 #include <asm/div64.h> 20 #else 21 #include <linux/compat.h> 22 #include <mtd/mtd-abi.h> 23 #include <linux/errno.h> 24 #include <div64.h> 25 26 #define MAX_MTD_DEVICES 32 27 #endif 28 29 #define MTD_ERASE_PENDING 0x01 30 #define MTD_ERASING 0x02 31 #define MTD_ERASE_SUSPEND 0x04 32 #define MTD_ERASE_DONE 0x08 33 #define MTD_ERASE_FAILED 0x10 34 35 #define MTD_FAIL_ADDR_UNKNOWN -1LL 36 37 /* 38 * If the erase fails, fail_addr might indicate exactly which block failed. If 39 * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level 40 * or was not specific to any particular block. 41 */ 42 struct erase_info { 43 struct mtd_info *mtd; 44 uint64_t addr; 45 uint64_t len; 46 uint64_t fail_addr; 47 u_long time; 48 u_long retries; 49 unsigned dev; 50 unsigned cell; 51 void (*callback) (struct erase_info *self); 52 u_long priv; 53 u_char state; 54 struct erase_info *next; 55 int scrub; 56 }; 57 58 struct mtd_erase_region_info { 59 uint64_t offset; /* At which this region starts, from the beginning of the MTD */ 60 uint32_t erasesize; /* For this region */ 61 uint32_t numblocks; /* Number of blocks of erasesize in this region */ 62 unsigned long *lockmap; /* If keeping bitmap of locks */ 63 }; 64 65 /** 66 * struct mtd_oob_ops - oob operation operands 67 * @mode: operation mode 68 * 69 * @len: number of data bytes to write/read 70 * 71 * @retlen: number of data bytes written/read 72 * 73 * @ooblen: number of oob bytes to write/read 74 * @oobretlen: number of oob bytes written/read 75 * @ooboffs: offset of oob data in the oob area (only relevant when 76 * mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW) 77 * @datbuf: data buffer - if NULL only oob data are read/written 78 * @oobbuf: oob data buffer 79 * 80 * Note, it is allowed to read more than one OOB area at one go, but not write. 81 * The interface assumes that the OOB write requests program only one page's 82 * OOB area. 83 */ 84 struct mtd_oob_ops { 85 unsigned int mode; 86 size_t len; 87 size_t retlen; 88 size_t ooblen; 89 size_t oobretlen; 90 uint32_t ooboffs; 91 uint8_t *datbuf; 92 uint8_t *oobbuf; 93 }; 94 95 #ifdef CONFIG_SYS_NAND_MAX_OOBFREE 96 #define MTD_MAX_OOBFREE_ENTRIES_LARGE CONFIG_SYS_NAND_MAX_OOBFREE 97 #else 98 #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32 99 #endif 100 101 #ifdef CONFIG_SYS_NAND_MAX_ECCPOS 102 #define MTD_MAX_ECCPOS_ENTRIES_LARGE CONFIG_SYS_NAND_MAX_ECCPOS 103 #else 104 #define MTD_MAX_ECCPOS_ENTRIES_LARGE 680 105 #endif 106 /** 107 * struct mtd_oob_region - oob region definition 108 * @offset: region offset 109 * @length: region length 110 * 111 * This structure describes a region of the OOB area, and is used 112 * to retrieve ECC or free bytes sections. 113 * Each section is defined by an offset within the OOB area and a 114 * length. 115 */ 116 struct mtd_oob_region { 117 u32 offset; 118 u32 length; 119 }; 120 121 /* 122 * struct mtd_ooblayout_ops - NAND OOB layout operations 123 * @ecc: function returning an ECC region in the OOB area. 124 * Should return -ERANGE if %section exceeds the total number of 125 * ECC sections. 126 * @free: function returning a free region in the OOB area. 127 * Should return -ERANGE if %section exceeds the total number of 128 * free sections. 129 */ 130 struct mtd_ooblayout_ops { 131 int (*ecc)(struct mtd_info *mtd, int section, 132 struct mtd_oob_region *oobecc); 133 int (*free)(struct mtd_info *mtd, int section, 134 struct mtd_oob_region *oobfree); 135 }; 136 137 /* 138 * Internal ECC layout control structure. For historical reasons, there is a 139 * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained 140 * for export to user-space via the ECCGETLAYOUT ioctl. 141 * nand_ecclayout should be expandable in the future simply by the above macros. 142 */ 143 struct nand_ecclayout { 144 __u32 eccbytes; 145 __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE]; 146 __u32 oobavail; 147 struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE]; 148 }; 149 150 struct module; /* only needed for owner field in mtd_info */ 151 152 struct mtd_info { 153 u_char type; 154 uint32_t flags; 155 uint64_t size; // Total size of the MTD 156 157 /* "Major" erase size for the device. Naïve users may take this 158 * to be the only erase size available, or may use the more detailed 159 * information below if they desire 160 */ 161 uint32_t erasesize; 162 /* Minimal writable flash unit size. In case of NOR flash it is 1 (even 163 * though individual bits can be cleared), in case of NAND flash it is 164 * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR 165 * it is of ECC block size, etc. It is illegal to have writesize = 0. 166 * Any driver registering a struct mtd_info must ensure a writesize of 167 * 1 or larger. 168 */ 169 uint32_t writesize; 170 171 /* 172 * Size of the write buffer used by the MTD. MTD devices having a write 173 * buffer can write multiple writesize chunks at a time. E.g. while 174 * writing 4 * writesize bytes to a device with 2 * writesize bytes 175 * buffer the MTD driver can (but doesn't have to) do 2 writesize 176 * operations, but not 4. Currently, all NANDs have writebufsize 177 * equivalent to writesize (NAND page size). Some NOR flashes do have 178 * writebufsize greater than writesize. 179 */ 180 uint32_t writebufsize; 181 182 uint32_t oobsize; // Amount of OOB data per block (e.g. 16) 183 uint32_t oobavail; // Available OOB bytes per block 184 185 /* 186 * If erasesize is a power of 2 then the shift is stored in 187 * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize. 188 */ 189 unsigned int erasesize_shift; 190 unsigned int writesize_shift; 191 /* Masks based on erasesize_shift and writesize_shift */ 192 unsigned int erasesize_mask; 193 unsigned int writesize_mask; 194 195 /* 196 * read ops return -EUCLEAN if max number of bitflips corrected on any 197 * one region comprising an ecc step equals or exceeds this value. 198 * Settable by driver, else defaults to ecc_strength. User can override 199 * in sysfs. N.B. The meaning of the -EUCLEAN return code has changed; 200 * see Documentation/ABI/testing/sysfs-class-mtd for more detail. 201 */ 202 unsigned int bitflip_threshold; 203 204 // Kernel-only stuff starts here. 205 #ifndef __UBOOT__ 206 const char *name; 207 #else 208 char *name; 209 #endif 210 int index; 211 212 /* OOB layout description */ 213 const struct mtd_ooblayout_ops *ooblayout; 214 215 /* ECC layout structure pointer - read only! */ 216 struct nand_ecclayout *ecclayout; 217 218 /* the ecc step size. */ 219 unsigned int ecc_step_size; 220 221 /* max number of correctible bit errors per ecc step */ 222 unsigned int ecc_strength; 223 224 /* Data for variable erase regions. If numeraseregions is zero, 225 * it means that the whole device has erasesize as given above. 226 */ 227 int numeraseregions; 228 struct mtd_erase_region_info *eraseregions; 229 230 /* 231 * Do not call via these pointers, use corresponding mtd_*() 232 * wrappers instead. 233 */ 234 int (*_erase) (struct mtd_info *mtd, struct erase_info *instr); 235 #ifndef __UBOOT__ 236 int (*_point) (struct mtd_info *mtd, loff_t from, size_t len, 237 size_t *retlen, void **virt, resource_size_t *phys); 238 int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len); 239 #endif 240 unsigned long (*_get_unmapped_area) (struct mtd_info *mtd, 241 unsigned long len, 242 unsigned long offset, 243 unsigned long flags); 244 int (*_read) (struct mtd_info *mtd, loff_t from, size_t len, 245 size_t *retlen, u_char *buf); 246 int (*_write) (struct mtd_info *mtd, loff_t to, size_t len, 247 size_t *retlen, const u_char *buf); 248 int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len, 249 size_t *retlen, const u_char *buf); 250 int (*_read_oob) (struct mtd_info *mtd, loff_t from, 251 struct mtd_oob_ops *ops); 252 int (*_write_oob) (struct mtd_info *mtd, loff_t to, 253 struct mtd_oob_ops *ops); 254 int (*_get_fact_prot_info) (struct mtd_info *mtd, size_t len, 255 size_t *retlen, struct otp_info *buf); 256 int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, 257 size_t len, size_t *retlen, u_char *buf); 258 int (*_get_user_prot_info) (struct mtd_info *mtd, size_t len, 259 size_t *retlen, struct otp_info *buf); 260 int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from, 261 size_t len, size_t *retlen, u_char *buf); 262 int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to, 263 size_t len, size_t *retlen, u_char *buf); 264 int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, 265 size_t len); 266 #ifndef __UBOOT__ 267 int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs, 268 unsigned long count, loff_t to, size_t *retlen); 269 #endif 270 void (*_sync) (struct mtd_info *mtd); 271 int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 272 int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 273 int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 274 int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs); 275 int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs); 276 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); 277 #ifndef __UBOOT__ 278 int (*_suspend) (struct mtd_info *mtd); 279 void (*_resume) (struct mtd_info *mtd); 280 void (*_reboot) (struct mtd_info *mtd); 281 #endif 282 /* 283 * If the driver is something smart, like UBI, it may need to maintain 284 * its own reference counting. The below functions are only for driver. 285 */ 286 int (*_get_device) (struct mtd_info *mtd); 287 void (*_put_device) (struct mtd_info *mtd); 288 289 #ifndef __UBOOT__ 290 /* Backing device capabilities for this device 291 * - provides mmap capabilities 292 */ 293 struct backing_dev_info *backing_dev_info; 294 295 struct notifier_block reboot_notifier; /* default mode before reboot */ 296 #endif 297 298 /* ECC status information */ 299 struct mtd_ecc_stats ecc_stats; 300 /* Subpage shift (NAND) */ 301 int subpage_sft; 302 303 void *priv; 304 305 struct module *owner; 306 #ifndef __UBOOT__ 307 struct device dev; 308 #else 309 struct udevice *dev; 310 #endif 311 int usecount; 312 }; 313 314 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, 315 struct mtd_oob_region *oobecc); 316 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, 317 int *section, 318 struct mtd_oob_region *oobregion); 319 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, 320 const u8 *oobbuf, int start, int nbytes); 321 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, 322 u8 *oobbuf, int start, int nbytes); 323 int mtd_ooblayout_free(struct mtd_info *mtd, int section, 324 struct mtd_oob_region *oobfree); 325 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, 326 const u8 *oobbuf, int start, int nbytes); 327 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, 328 u8 *oobbuf, int start, int nbytes); 329 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd); 330 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd); 331 332 static inline void mtd_set_ooblayout(struct mtd_info *mtd, 333 const struct mtd_ooblayout_ops *ooblayout) 334 { 335 mtd->ooblayout = ooblayout; 336 } 337 338 static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops) 339 { 340 return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize; 341 } 342 343 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr); 344 #ifndef __UBOOT__ 345 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 346 void **virt, resource_size_t *phys); 347 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len); 348 #endif 349 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, 350 unsigned long offset, unsigned long flags); 351 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 352 u_char *buf); 353 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 354 const u_char *buf); 355 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 356 const u_char *buf); 357 358 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops); 359 360 static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to, 361 struct mtd_oob_ops *ops) 362 { 363 ops->retlen = ops->oobretlen = 0; 364 if (!mtd->_write_oob) 365 return -EOPNOTSUPP; 366 if (!(mtd->flags & MTD_WRITEABLE)) 367 return -EROFS; 368 return mtd->_write_oob(mtd, to, ops); 369 } 370 371 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 372 struct otp_info *buf); 373 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 374 size_t *retlen, u_char *buf); 375 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 376 struct otp_info *buf); 377 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 378 size_t *retlen, u_char *buf); 379 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, 380 size_t *retlen, u_char *buf); 381 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len); 382 383 #ifndef __UBOOT__ 384 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 385 unsigned long count, loff_t to, size_t *retlen); 386 #endif 387 388 static inline void mtd_sync(struct mtd_info *mtd) 389 { 390 if (mtd->_sync) 391 mtd->_sync(mtd); 392 } 393 394 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 395 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 396 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); 397 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs); 398 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs); 399 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs); 400 401 #ifndef __UBOOT__ 402 static inline int mtd_suspend(struct mtd_info *mtd) 403 { 404 return mtd->_suspend ? mtd->_suspend(mtd) : 0; 405 } 406 407 static inline void mtd_resume(struct mtd_info *mtd) 408 { 409 if (mtd->_resume) 410 mtd->_resume(mtd); 411 } 412 #endif 413 414 static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) 415 { 416 if (mtd->erasesize_shift) 417 return sz >> mtd->erasesize_shift; 418 do_div(sz, mtd->erasesize); 419 return sz; 420 } 421 422 static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd) 423 { 424 if (mtd->erasesize_shift) 425 return sz & mtd->erasesize_mask; 426 return do_div(sz, mtd->erasesize); 427 } 428 429 static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd) 430 { 431 if (mtd->writesize_shift) 432 return sz >> mtd->writesize_shift; 433 do_div(sz, mtd->writesize); 434 return sz; 435 } 436 437 static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) 438 { 439 if (mtd->writesize_shift) 440 return sz & mtd->writesize_mask; 441 return do_div(sz, mtd->writesize); 442 } 443 444 static inline int mtd_has_oob(const struct mtd_info *mtd) 445 { 446 return mtd->_read_oob && mtd->_write_oob; 447 } 448 449 static inline int mtd_type_is_nand(const struct mtd_info *mtd) 450 { 451 return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH; 452 } 453 454 static inline int mtd_can_have_bb(const struct mtd_info *mtd) 455 { 456 return !!mtd->_block_isbad; 457 } 458 459 /* Kernel-side ioctl definitions */ 460 461 struct mtd_partition; 462 struct mtd_part_parser_data; 463 464 extern int mtd_device_parse_register(struct mtd_info *mtd, 465 const char * const *part_probe_types, 466 struct mtd_part_parser_data *parser_data, 467 const struct mtd_partition *defparts, 468 int defnr_parts); 469 #define mtd_device_register(master, parts, nr_parts) \ 470 mtd_device_parse_register(master, NULL, NULL, parts, nr_parts) 471 extern int mtd_device_unregister(struct mtd_info *master); 472 extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num); 473 extern int __get_mtd_device(struct mtd_info *mtd); 474 extern void __put_mtd_device(struct mtd_info *mtd); 475 extern struct mtd_info *get_mtd_device_nm(const char *name); 476 extern void put_mtd_device(struct mtd_info *mtd); 477 478 479 #ifndef __UBOOT__ 480 struct mtd_notifier { 481 void (*add)(struct mtd_info *mtd); 482 void (*remove)(struct mtd_info *mtd); 483 struct list_head list; 484 }; 485 486 487 extern void register_mtd_user (struct mtd_notifier *new); 488 extern int unregister_mtd_user (struct mtd_notifier *old); 489 #endif 490 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size); 491 492 #ifdef CONFIG_MTD_PARTITIONS 493 void mtd_erase_callback(struct erase_info *instr); 494 #else 495 static inline void mtd_erase_callback(struct erase_info *instr) 496 { 497 if (instr->callback) 498 instr->callback(instr); 499 } 500 #endif 501 502 static inline int mtd_is_bitflip(int err) { 503 return err == -EUCLEAN; 504 } 505 506 static inline int mtd_is_eccerr(int err) { 507 return err == -EBADMSG; 508 } 509 510 static inline int mtd_is_bitflip_or_eccerr(int err) { 511 return mtd_is_bitflip(err) || mtd_is_eccerr(err); 512 } 513 514 unsigned mtd_mmap_capabilities(struct mtd_info *mtd); 515 516 #ifdef __UBOOT__ 517 /* drivers/mtd/mtdcore.h */ 518 int add_mtd_device(struct mtd_info *mtd); 519 int del_mtd_device(struct mtd_info *mtd); 520 int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); 521 int del_mtd_partitions(struct mtd_info *); 522 523 int mtd_arg_off(const char *arg, int *idx, loff_t *off, loff_t *size, 524 loff_t *maxsize, int devtype, uint64_t chipsize); 525 int mtd_arg_off_size(int argc, char *const argv[], int *idx, loff_t *off, 526 loff_t *size, loff_t *maxsize, int devtype, 527 uint64_t chipsize); 528 529 /* drivers/mtd/mtdcore.c */ 530 void mtd_get_len_incl_bad(struct mtd_info *mtd, uint64_t offset, 531 const uint64_t length, uint64_t *len_incl_bad, 532 int *truncated); 533 #endif 534 #endif /* __MTD_MTD_H__ */ 535