xref: /openbmc/linux/drivers/mtd/mtdcore.c (revision bfa87ac8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Core registration and callback routines for MTD
4  * drivers and users.
5  *
6  * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7  * Copyright © 2006      Red Hat UK Limited
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/ptrace.h>
13 #include <linux/seq_file.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/major.h>
17 #include <linux/fs.h>
18 #include <linux/err.h>
19 #include <linux/ioctl.h>
20 #include <linux/init.h>
21 #include <linux/of.h>
22 #include <linux/proc_fs.h>
23 #include <linux/idr.h>
24 #include <linux/backing-dev.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/reboot.h>
28 #include <linux/leds.h>
29 #include <linux/debugfs.h>
30 #include <linux/nvmem-provider.h>
31 
32 #include <linux/mtd/mtd.h>
33 #include <linux/mtd/partitions.h>
34 
35 #include "mtdcore.h"
36 
37 struct backing_dev_info *mtd_bdi;
38 
39 #ifdef CONFIG_PM_SLEEP
40 
41 static int mtd_cls_suspend(struct device *dev)
42 {
43 	struct mtd_info *mtd = dev_get_drvdata(dev);
44 
45 	return mtd ? mtd_suspend(mtd) : 0;
46 }
47 
48 static int mtd_cls_resume(struct device *dev)
49 {
50 	struct mtd_info *mtd = dev_get_drvdata(dev);
51 
52 	if (mtd)
53 		mtd_resume(mtd);
54 	return 0;
55 }
56 
57 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
58 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
59 #else
60 #define MTD_CLS_PM_OPS NULL
61 #endif
62 
63 static struct class mtd_class = {
64 	.name = "mtd",
65 	.owner = THIS_MODULE,
66 	.pm = MTD_CLS_PM_OPS,
67 };
68 
69 static DEFINE_IDR(mtd_idr);
70 
71 /* These are exported solely for the purpose of mtd_blkdevs.c. You
72    should not use them for _anything_ else */
73 DEFINE_MUTEX(mtd_table_mutex);
74 EXPORT_SYMBOL_GPL(mtd_table_mutex);
75 
76 struct mtd_info *__mtd_next_device(int i)
77 {
78 	return idr_get_next(&mtd_idr, &i);
79 }
80 EXPORT_SYMBOL_GPL(__mtd_next_device);
81 
82 static LIST_HEAD(mtd_notifiers);
83 
84 
85 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
86 
87 /* REVISIT once MTD uses the driver model better, whoever allocates
88  * the mtd_info will probably want to use the release() hook...
89  */
90 static void mtd_release(struct device *dev)
91 {
92 	struct mtd_info *mtd = dev_get_drvdata(dev);
93 	dev_t index = MTD_DEVT(mtd->index);
94 
95 	/* remove /dev/mtdXro node */
96 	device_destroy(&mtd_class, index + 1);
97 }
98 
99 #define MTD_DEVICE_ATTR_RO(name) \
100 static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
101 
102 #define MTD_DEVICE_ATTR_RW(name) \
103 static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
104 
105 static ssize_t mtd_type_show(struct device *dev,
106 		struct device_attribute *attr, char *buf)
107 {
108 	struct mtd_info *mtd = dev_get_drvdata(dev);
109 	char *type;
110 
111 	switch (mtd->type) {
112 	case MTD_ABSENT:
113 		type = "absent";
114 		break;
115 	case MTD_RAM:
116 		type = "ram";
117 		break;
118 	case MTD_ROM:
119 		type = "rom";
120 		break;
121 	case MTD_NORFLASH:
122 		type = "nor";
123 		break;
124 	case MTD_NANDFLASH:
125 		type = "nand";
126 		break;
127 	case MTD_DATAFLASH:
128 		type = "dataflash";
129 		break;
130 	case MTD_UBIVOLUME:
131 		type = "ubi";
132 		break;
133 	case MTD_MLCNANDFLASH:
134 		type = "mlc-nand";
135 		break;
136 	default:
137 		type = "unknown";
138 	}
139 
140 	return sysfs_emit(buf, "%s\n", type);
141 }
142 MTD_DEVICE_ATTR_RO(type);
143 
144 static ssize_t mtd_flags_show(struct device *dev,
145 		struct device_attribute *attr, char *buf)
146 {
147 	struct mtd_info *mtd = dev_get_drvdata(dev);
148 
149 	return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
150 }
151 MTD_DEVICE_ATTR_RO(flags);
152 
153 static ssize_t mtd_size_show(struct device *dev,
154 		struct device_attribute *attr, char *buf)
155 {
156 	struct mtd_info *mtd = dev_get_drvdata(dev);
157 
158 	return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
159 }
160 MTD_DEVICE_ATTR_RO(size);
161 
162 static ssize_t mtd_erasesize_show(struct device *dev,
163 		struct device_attribute *attr, char *buf)
164 {
165 	struct mtd_info *mtd = dev_get_drvdata(dev);
166 
167 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
168 }
169 MTD_DEVICE_ATTR_RO(erasesize);
170 
171 static ssize_t mtd_writesize_show(struct device *dev,
172 		struct device_attribute *attr, char *buf)
173 {
174 	struct mtd_info *mtd = dev_get_drvdata(dev);
175 
176 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
177 }
178 MTD_DEVICE_ATTR_RO(writesize);
179 
180 static ssize_t mtd_subpagesize_show(struct device *dev,
181 		struct device_attribute *attr, char *buf)
182 {
183 	struct mtd_info *mtd = dev_get_drvdata(dev);
184 	unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
185 
186 	return sysfs_emit(buf, "%u\n", subpagesize);
187 }
188 MTD_DEVICE_ATTR_RO(subpagesize);
189 
190 static ssize_t mtd_oobsize_show(struct device *dev,
191 		struct device_attribute *attr, char *buf)
192 {
193 	struct mtd_info *mtd = dev_get_drvdata(dev);
194 
195 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
196 }
197 MTD_DEVICE_ATTR_RO(oobsize);
198 
199 static ssize_t mtd_oobavail_show(struct device *dev,
200 				 struct device_attribute *attr, char *buf)
201 {
202 	struct mtd_info *mtd = dev_get_drvdata(dev);
203 
204 	return sysfs_emit(buf, "%u\n", mtd->oobavail);
205 }
206 MTD_DEVICE_ATTR_RO(oobavail);
207 
208 static ssize_t mtd_numeraseregions_show(struct device *dev,
209 		struct device_attribute *attr, char *buf)
210 {
211 	struct mtd_info *mtd = dev_get_drvdata(dev);
212 
213 	return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
214 }
215 MTD_DEVICE_ATTR_RO(numeraseregions);
216 
217 static ssize_t mtd_name_show(struct device *dev,
218 		struct device_attribute *attr, char *buf)
219 {
220 	struct mtd_info *mtd = dev_get_drvdata(dev);
221 
222 	return sysfs_emit(buf, "%s\n", mtd->name);
223 }
224 MTD_DEVICE_ATTR_RO(name);
225 
226 static ssize_t mtd_ecc_strength_show(struct device *dev,
227 				     struct device_attribute *attr, char *buf)
228 {
229 	struct mtd_info *mtd = dev_get_drvdata(dev);
230 
231 	return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
232 }
233 MTD_DEVICE_ATTR_RO(ecc_strength);
234 
235 static ssize_t mtd_bitflip_threshold_show(struct device *dev,
236 					  struct device_attribute *attr,
237 					  char *buf)
238 {
239 	struct mtd_info *mtd = dev_get_drvdata(dev);
240 
241 	return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
242 }
243 
244 static ssize_t mtd_bitflip_threshold_store(struct device *dev,
245 					   struct device_attribute *attr,
246 					   const char *buf, size_t count)
247 {
248 	struct mtd_info *mtd = dev_get_drvdata(dev);
249 	unsigned int bitflip_threshold;
250 	int retval;
251 
252 	retval = kstrtouint(buf, 0, &bitflip_threshold);
253 	if (retval)
254 		return retval;
255 
256 	mtd->bitflip_threshold = bitflip_threshold;
257 	return count;
258 }
259 MTD_DEVICE_ATTR_RW(bitflip_threshold);
260 
261 static ssize_t mtd_ecc_step_size_show(struct device *dev,
262 		struct device_attribute *attr, char *buf)
263 {
264 	struct mtd_info *mtd = dev_get_drvdata(dev);
265 
266 	return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
267 
268 }
269 MTD_DEVICE_ATTR_RO(ecc_step_size);
270 
271 static ssize_t mtd_corrected_bits_show(struct device *dev,
272 		struct device_attribute *attr, char *buf)
273 {
274 	struct mtd_info *mtd = dev_get_drvdata(dev);
275 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
276 
277 	return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
278 }
279 MTD_DEVICE_ATTR_RO(corrected_bits);	/* ecc stats corrected */
280 
281 static ssize_t mtd_ecc_failures_show(struct device *dev,
282 		struct device_attribute *attr, char *buf)
283 {
284 	struct mtd_info *mtd = dev_get_drvdata(dev);
285 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
286 
287 	return sysfs_emit(buf, "%u\n", ecc_stats->failed);
288 }
289 MTD_DEVICE_ATTR_RO(ecc_failures);	/* ecc stats errors */
290 
291 static ssize_t mtd_bad_blocks_show(struct device *dev,
292 		struct device_attribute *attr, char *buf)
293 {
294 	struct mtd_info *mtd = dev_get_drvdata(dev);
295 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
296 
297 	return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
298 }
299 MTD_DEVICE_ATTR_RO(bad_blocks);
300 
301 static ssize_t mtd_bbt_blocks_show(struct device *dev,
302 		struct device_attribute *attr, char *buf)
303 {
304 	struct mtd_info *mtd = dev_get_drvdata(dev);
305 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
306 
307 	return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
308 }
309 MTD_DEVICE_ATTR_RO(bbt_blocks);
310 
311 static struct attribute *mtd_attrs[] = {
312 	&dev_attr_type.attr,
313 	&dev_attr_flags.attr,
314 	&dev_attr_size.attr,
315 	&dev_attr_erasesize.attr,
316 	&dev_attr_writesize.attr,
317 	&dev_attr_subpagesize.attr,
318 	&dev_attr_oobsize.attr,
319 	&dev_attr_oobavail.attr,
320 	&dev_attr_numeraseregions.attr,
321 	&dev_attr_name.attr,
322 	&dev_attr_ecc_strength.attr,
323 	&dev_attr_ecc_step_size.attr,
324 	&dev_attr_corrected_bits.attr,
325 	&dev_attr_ecc_failures.attr,
326 	&dev_attr_bad_blocks.attr,
327 	&dev_attr_bbt_blocks.attr,
328 	&dev_attr_bitflip_threshold.attr,
329 	NULL,
330 };
331 ATTRIBUTE_GROUPS(mtd);
332 
333 static const struct device_type mtd_devtype = {
334 	.name		= "mtd",
335 	.groups		= mtd_groups,
336 	.release	= mtd_release,
337 };
338 
339 static bool mtd_expert_analysis_mode;
340 
341 #ifdef CONFIG_DEBUG_FS
342 bool mtd_check_expert_analysis_mode(void)
343 {
344 	const char *mtd_expert_analysis_warning =
345 		"Bad block checks have been entirely disabled.\n"
346 		"This is only reserved for post-mortem forensics and debug purposes.\n"
347 		"Never enable this mode if you do not know what you are doing!\n";
348 
349 	return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning);
350 }
351 EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode);
352 #endif
353 
354 static struct dentry *dfs_dir_mtd;
355 
356 static void mtd_debugfs_populate(struct mtd_info *mtd)
357 {
358 	struct device *dev = &mtd->dev;
359 
360 	if (IS_ERR_OR_NULL(dfs_dir_mtd))
361 		return;
362 
363 	mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
364 }
365 
366 #ifndef CONFIG_MMU
367 unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
368 {
369 	switch (mtd->type) {
370 	case MTD_RAM:
371 		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
372 			NOMMU_MAP_READ | NOMMU_MAP_WRITE;
373 	case MTD_ROM:
374 		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
375 			NOMMU_MAP_READ;
376 	default:
377 		return NOMMU_MAP_COPY;
378 	}
379 }
380 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
381 #endif
382 
383 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
384 			       void *cmd)
385 {
386 	struct mtd_info *mtd;
387 
388 	mtd = container_of(n, struct mtd_info, reboot_notifier);
389 	mtd->_reboot(mtd);
390 
391 	return NOTIFY_DONE;
392 }
393 
394 /**
395  * mtd_wunit_to_pairing_info - get pairing information of a wunit
396  * @mtd: pointer to new MTD device info structure
397  * @wunit: write unit we are interested in
398  * @info: returned pairing information
399  *
400  * Retrieve pairing information associated to the wunit.
401  * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
402  * paired together, and where programming a page may influence the page it is
403  * paired with.
404  * The notion of page is replaced by the term wunit (write-unit) to stay
405  * consistent with the ->writesize field.
406  *
407  * The @wunit argument can be extracted from an absolute offset using
408  * mtd_offset_to_wunit(). @info is filled with the pairing information attached
409  * to @wunit.
410  *
411  * From the pairing info the MTD user can find all the wunits paired with
412  * @wunit using the following loop:
413  *
414  * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
415  *	info.pair = i;
416  *	mtd_pairing_info_to_wunit(mtd, &info);
417  *	...
418  * }
419  */
420 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
421 			      struct mtd_pairing_info *info)
422 {
423 	struct mtd_info *master = mtd_get_master(mtd);
424 	int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
425 
426 	if (wunit < 0 || wunit >= npairs)
427 		return -EINVAL;
428 
429 	if (master->pairing && master->pairing->get_info)
430 		return master->pairing->get_info(master, wunit, info);
431 
432 	info->group = 0;
433 	info->pair = wunit;
434 
435 	return 0;
436 }
437 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
438 
439 /**
440  * mtd_pairing_info_to_wunit - get wunit from pairing information
441  * @mtd: pointer to new MTD device info structure
442  * @info: pairing information struct
443  *
444  * Returns a positive number representing the wunit associated to the info
445  * struct, or a negative error code.
446  *
447  * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
448  * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
449  * doc).
450  *
451  * It can also be used to only program the first page of each pair (i.e.
452  * page attached to group 0), which allows one to use an MLC NAND in
453  * software-emulated SLC mode:
454  *
455  * info.group = 0;
456  * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
457  * for (info.pair = 0; info.pair < npairs; info.pair++) {
458  *	wunit = mtd_pairing_info_to_wunit(mtd, &info);
459  *	mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
460  *		  mtd->writesize, &retlen, buf + (i * mtd->writesize));
461  * }
462  */
463 int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
464 			      const struct mtd_pairing_info *info)
465 {
466 	struct mtd_info *master = mtd_get_master(mtd);
467 	int ngroups = mtd_pairing_groups(master);
468 	int npairs = mtd_wunit_per_eb(master) / ngroups;
469 
470 	if (!info || info->pair < 0 || info->pair >= npairs ||
471 	    info->group < 0 || info->group >= ngroups)
472 		return -EINVAL;
473 
474 	if (master->pairing && master->pairing->get_wunit)
475 		return mtd->pairing->get_wunit(master, info);
476 
477 	return info->pair;
478 }
479 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
480 
481 /**
482  * mtd_pairing_groups - get the number of pairing groups
483  * @mtd: pointer to new MTD device info structure
484  *
485  * Returns the number of pairing groups.
486  *
487  * This number is usually equal to the number of bits exposed by a single
488  * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
489  * to iterate over all pages of a given pair.
490  */
491 int mtd_pairing_groups(struct mtd_info *mtd)
492 {
493 	struct mtd_info *master = mtd_get_master(mtd);
494 
495 	if (!master->pairing || !master->pairing->ngroups)
496 		return 1;
497 
498 	return master->pairing->ngroups;
499 }
500 EXPORT_SYMBOL_GPL(mtd_pairing_groups);
501 
502 static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
503 			      void *val, size_t bytes)
504 {
505 	struct mtd_info *mtd = priv;
506 	size_t retlen;
507 	int err;
508 
509 	err = mtd_read(mtd, offset, bytes, &retlen, val);
510 	if (err && err != -EUCLEAN)
511 		return err;
512 
513 	return retlen == bytes ? 0 : -EIO;
514 }
515 
516 static int mtd_nvmem_add(struct mtd_info *mtd)
517 {
518 	struct device_node *node = mtd_get_of_node(mtd);
519 	struct nvmem_config config = {};
520 
521 	config.id = -1;
522 	config.dev = &mtd->dev;
523 	config.name = dev_name(&mtd->dev);
524 	config.owner = THIS_MODULE;
525 	config.reg_read = mtd_nvmem_reg_read;
526 	config.size = mtd->size;
527 	config.word_size = 1;
528 	config.stride = 1;
529 	config.read_only = true;
530 	config.root_only = true;
531 	config.ignore_wp = true;
532 	config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
533 	config.priv = mtd;
534 
535 	mtd->nvmem = nvmem_register(&config);
536 	if (IS_ERR(mtd->nvmem)) {
537 		/* Just ignore if there is no NVMEM support in the kernel */
538 		if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
539 			mtd->nvmem = NULL;
540 		} else {
541 			dev_err(&mtd->dev, "Failed to register NVMEM device\n");
542 			return PTR_ERR(mtd->nvmem);
543 		}
544 	}
545 
546 	return 0;
547 }
548 
549 static void mtd_check_of_node(struct mtd_info *mtd)
550 {
551 	struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
552 	const char *pname, *prefix = "partition-";
553 	int plen, mtd_name_len, offset, prefix_len;
554 	struct mtd_info *parent;
555 	bool found = false;
556 
557 	/* Check if MTD already has a device node */
558 	if (dev_of_node(&mtd->dev))
559 		return;
560 
561 	/* Check if a partitions node exist */
562 	if (!mtd_is_partition(mtd))
563 		return;
564 	parent = mtd->parent;
565 	parent_dn = of_node_get(dev_of_node(&parent->dev));
566 	if (!parent_dn)
567 		return;
568 
569 	partitions = of_get_child_by_name(parent_dn, "partitions");
570 	if (!partitions)
571 		goto exit_parent;
572 
573 	prefix_len = strlen(prefix);
574 	mtd_name_len = strlen(mtd->name);
575 
576 	/* Search if a partition is defined with the same name */
577 	for_each_child_of_node(partitions, mtd_dn) {
578 		offset = 0;
579 
580 		/* Skip partition with no/wrong prefix */
581 		if (!of_node_name_prefix(mtd_dn, "partition-"))
582 			continue;
583 
584 		/* Label have priority. Check that first */
585 		if (of_property_read_string(mtd_dn, "label", &pname)) {
586 			of_property_read_string(mtd_dn, "name", &pname);
587 			offset = prefix_len;
588 		}
589 
590 		plen = strlen(pname) - offset;
591 		if (plen == mtd_name_len &&
592 		    !strncmp(mtd->name, pname + offset, plen)) {
593 			found = true;
594 			break;
595 		}
596 	}
597 
598 	if (!found)
599 		goto exit_partitions;
600 
601 	/* Set of_node only for nvmem */
602 	if (of_device_is_compatible(mtd_dn, "nvmem-cells"))
603 		mtd_set_of_node(mtd, mtd_dn);
604 
605 exit_partitions:
606 	of_node_put(partitions);
607 exit_parent:
608 	of_node_put(parent_dn);
609 }
610 
611 /**
612  *	add_mtd_device - register an MTD device
613  *	@mtd: pointer to new MTD device info structure
614  *
615  *	Add a device to the list of MTD devices present in the system, and
616  *	notify each currently active MTD 'user' of its arrival. Returns
617  *	zero on success or non-zero on failure.
618  */
619 
620 int add_mtd_device(struct mtd_info *mtd)
621 {
622 	struct device_node *np = mtd_get_of_node(mtd);
623 	struct mtd_info *master = mtd_get_master(mtd);
624 	struct mtd_notifier *not;
625 	int i, error, ofidx;
626 
627 	/*
628 	 * May occur, for instance, on buggy drivers which call
629 	 * mtd_device_parse_register() multiple times on the same master MTD,
630 	 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
631 	 */
632 	if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
633 		return -EEXIST;
634 
635 	BUG_ON(mtd->writesize == 0);
636 
637 	/*
638 	 * MTD drivers should implement ->_{write,read}() or
639 	 * ->_{write,read}_oob(), but not both.
640 	 */
641 	if (WARN_ON((mtd->_write && mtd->_write_oob) ||
642 		    (mtd->_read && mtd->_read_oob)))
643 		return -EINVAL;
644 
645 	if (WARN_ON((!mtd->erasesize || !master->_erase) &&
646 		    !(mtd->flags & MTD_NO_ERASE)))
647 		return -EINVAL;
648 
649 	/*
650 	 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
651 	 * master is an MLC NAND and has a proper pairing scheme defined.
652 	 * We also reject masters that implement ->_writev() for now, because
653 	 * NAND controller drivers don't implement this hook, and adding the
654 	 * SLC -> MLC address/length conversion to this path is useless if we
655 	 * don't have a user.
656 	 */
657 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
658 	    (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
659 	     !master->pairing || master->_writev))
660 		return -EINVAL;
661 
662 	mutex_lock(&mtd_table_mutex);
663 
664 	ofidx = -1;
665 	if (np)
666 		ofidx = of_alias_get_id(np, "mtd");
667 	if (ofidx >= 0)
668 		i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
669 	else
670 		i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
671 	if (i < 0) {
672 		error = i;
673 		goto fail_locked;
674 	}
675 
676 	mtd->index = i;
677 	mtd->usecount = 0;
678 
679 	/* default value if not set by driver */
680 	if (mtd->bitflip_threshold == 0)
681 		mtd->bitflip_threshold = mtd->ecc_strength;
682 
683 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
684 		int ngroups = mtd_pairing_groups(master);
685 
686 		mtd->erasesize /= ngroups;
687 		mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
688 			    mtd->erasesize;
689 	}
690 
691 	if (is_power_of_2(mtd->erasesize))
692 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
693 	else
694 		mtd->erasesize_shift = 0;
695 
696 	if (is_power_of_2(mtd->writesize))
697 		mtd->writesize_shift = ffs(mtd->writesize) - 1;
698 	else
699 		mtd->writesize_shift = 0;
700 
701 	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
702 	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
703 
704 	/* Some chips always power up locked. Unlock them now */
705 	if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
706 		error = mtd_unlock(mtd, 0, mtd->size);
707 		if (error && error != -EOPNOTSUPP)
708 			printk(KERN_WARNING
709 			       "%s: unlock failed, writes may not work\n",
710 			       mtd->name);
711 		/* Ignore unlock failures? */
712 		error = 0;
713 	}
714 
715 	/* Caller should have set dev.parent to match the
716 	 * physical device, if appropriate.
717 	 */
718 	mtd->dev.type = &mtd_devtype;
719 	mtd->dev.class = &mtd_class;
720 	mtd->dev.devt = MTD_DEVT(i);
721 	dev_set_name(&mtd->dev, "mtd%d", i);
722 	dev_set_drvdata(&mtd->dev, mtd);
723 	mtd_check_of_node(mtd);
724 	of_node_get(mtd_get_of_node(mtd));
725 	error = device_register(&mtd->dev);
726 	if (error)
727 		goto fail_added;
728 
729 	/* Add the nvmem provider */
730 	error = mtd_nvmem_add(mtd);
731 	if (error)
732 		goto fail_nvmem_add;
733 
734 	mtd_debugfs_populate(mtd);
735 
736 	device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
737 		      "mtd%dro", i);
738 
739 	pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
740 	/* No need to get a refcount on the module containing
741 	   the notifier, since we hold the mtd_table_mutex */
742 	list_for_each_entry(not, &mtd_notifiers, list)
743 		not->add(mtd);
744 
745 	mutex_unlock(&mtd_table_mutex);
746 	/* We _know_ we aren't being removed, because
747 	   our caller is still holding us here. So none
748 	   of this try_ nonsense, and no bitching about it
749 	   either. :) */
750 	__module_get(THIS_MODULE);
751 	return 0;
752 
753 fail_nvmem_add:
754 	device_unregister(&mtd->dev);
755 fail_added:
756 	of_node_put(mtd_get_of_node(mtd));
757 	idr_remove(&mtd_idr, i);
758 fail_locked:
759 	mutex_unlock(&mtd_table_mutex);
760 	return error;
761 }
762 
763 /**
764  *	del_mtd_device - unregister an MTD device
765  *	@mtd: pointer to MTD device info structure
766  *
767  *	Remove a device from the list of MTD devices present in the system,
768  *	and notify each currently active MTD 'user' of its departure.
769  *	Returns zero on success or 1 on failure, which currently will happen
770  *	if the requested device does not appear to be present in the list.
771  */
772 
773 int del_mtd_device(struct mtd_info *mtd)
774 {
775 	int ret;
776 	struct mtd_notifier *not;
777 
778 	mutex_lock(&mtd_table_mutex);
779 
780 	if (idr_find(&mtd_idr, mtd->index) != mtd) {
781 		ret = -ENODEV;
782 		goto out_error;
783 	}
784 
785 	/* No need to get a refcount on the module containing
786 		the notifier, since we hold the mtd_table_mutex */
787 	list_for_each_entry(not, &mtd_notifiers, list)
788 		not->remove(mtd);
789 
790 	if (mtd->usecount) {
791 		printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
792 		       mtd->index, mtd->name, mtd->usecount);
793 		ret = -EBUSY;
794 	} else {
795 		debugfs_remove_recursive(mtd->dbg.dfs_dir);
796 
797 		/* Try to remove the NVMEM provider */
798 		nvmem_unregister(mtd->nvmem);
799 
800 		device_unregister(&mtd->dev);
801 
802 		/* Clear dev so mtd can be safely re-registered later if desired */
803 		memset(&mtd->dev, 0, sizeof(mtd->dev));
804 
805 		idr_remove(&mtd_idr, mtd->index);
806 		of_node_put(mtd_get_of_node(mtd));
807 
808 		module_put(THIS_MODULE);
809 		ret = 0;
810 	}
811 
812 out_error:
813 	mutex_unlock(&mtd_table_mutex);
814 	return ret;
815 }
816 
817 /*
818  * Set a few defaults based on the parent devices, if not provided by the
819  * driver
820  */
821 static void mtd_set_dev_defaults(struct mtd_info *mtd)
822 {
823 	if (mtd->dev.parent) {
824 		if (!mtd->owner && mtd->dev.parent->driver)
825 			mtd->owner = mtd->dev.parent->driver->owner;
826 		if (!mtd->name)
827 			mtd->name = dev_name(mtd->dev.parent);
828 	} else {
829 		pr_debug("mtd device won't show a device symlink in sysfs\n");
830 	}
831 
832 	INIT_LIST_HEAD(&mtd->partitions);
833 	mutex_init(&mtd->master.partitions_lock);
834 	mutex_init(&mtd->master.chrdev_lock);
835 }
836 
837 static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
838 {
839 	struct otp_info *info;
840 	ssize_t size = 0;
841 	unsigned int i;
842 	size_t retlen;
843 	int ret;
844 
845 	info = kmalloc(PAGE_SIZE, GFP_KERNEL);
846 	if (!info)
847 		return -ENOMEM;
848 
849 	if (is_user)
850 		ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
851 	else
852 		ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
853 	if (ret)
854 		goto err;
855 
856 	for (i = 0; i < retlen / sizeof(*info); i++)
857 		size += info[i].length;
858 
859 	kfree(info);
860 	return size;
861 
862 err:
863 	kfree(info);
864 
865 	/* ENODATA means there is no OTP region. */
866 	return ret == -ENODATA ? 0 : ret;
867 }
868 
869 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
870 						   const char *compatible,
871 						   int size,
872 						   nvmem_reg_read_t reg_read)
873 {
874 	struct nvmem_device *nvmem = NULL;
875 	struct nvmem_config config = {};
876 	struct device_node *np;
877 
878 	/* DT binding is optional */
879 	np = of_get_compatible_child(mtd->dev.of_node, compatible);
880 
881 	/* OTP nvmem will be registered on the physical device */
882 	config.dev = mtd->dev.parent;
883 	config.name = kasprintf(GFP_KERNEL, "%s-%s", dev_name(&mtd->dev), compatible);
884 	config.id = NVMEM_DEVID_NONE;
885 	config.owner = THIS_MODULE;
886 	config.type = NVMEM_TYPE_OTP;
887 	config.root_only = true;
888 	config.ignore_wp = true;
889 	config.reg_read = reg_read;
890 	config.size = size;
891 	config.of_node = np;
892 	config.priv = mtd;
893 
894 	nvmem = nvmem_register(&config);
895 	/* Just ignore if there is no NVMEM support in the kernel */
896 	if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
897 		nvmem = NULL;
898 
899 	of_node_put(np);
900 	kfree(config.name);
901 
902 	return nvmem;
903 }
904 
905 static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
906 				       void *val, size_t bytes)
907 {
908 	struct mtd_info *mtd = priv;
909 	size_t retlen;
910 	int ret;
911 
912 	ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
913 	if (ret)
914 		return ret;
915 
916 	return retlen == bytes ? 0 : -EIO;
917 }
918 
919 static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
920 				       void *val, size_t bytes)
921 {
922 	struct mtd_info *mtd = priv;
923 	size_t retlen;
924 	int ret;
925 
926 	ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
927 	if (ret)
928 		return ret;
929 
930 	return retlen == bytes ? 0 : -EIO;
931 }
932 
933 static int mtd_otp_nvmem_add(struct mtd_info *mtd)
934 {
935 	struct nvmem_device *nvmem;
936 	ssize_t size;
937 	int err;
938 
939 	if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
940 		size = mtd_otp_size(mtd, true);
941 		if (size < 0)
942 			return size;
943 
944 		if (size > 0) {
945 			nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
946 						       mtd_nvmem_user_otp_reg_read);
947 			if (IS_ERR(nvmem)) {
948 				dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
949 				return PTR_ERR(nvmem);
950 			}
951 			mtd->otp_user_nvmem = nvmem;
952 		}
953 	}
954 
955 	if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
956 		size = mtd_otp_size(mtd, false);
957 		if (size < 0) {
958 			err = size;
959 			goto err;
960 		}
961 
962 		if (size > 0) {
963 			nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
964 						       mtd_nvmem_fact_otp_reg_read);
965 			if (IS_ERR(nvmem)) {
966 				dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
967 				err = PTR_ERR(nvmem);
968 				goto err;
969 			}
970 			mtd->otp_factory_nvmem = nvmem;
971 		}
972 	}
973 
974 	return 0;
975 
976 err:
977 	nvmem_unregister(mtd->otp_user_nvmem);
978 	return err;
979 }
980 
981 /**
982  * mtd_device_parse_register - parse partitions and register an MTD device.
983  *
984  * @mtd: the MTD device to register
985  * @types: the list of MTD partition probes to try, see
986  *         'parse_mtd_partitions()' for more information
987  * @parser_data: MTD partition parser-specific data
988  * @parts: fallback partition information to register, if parsing fails;
989  *         only valid if %nr_parts > %0
990  * @nr_parts: the number of partitions in parts, if zero then the full
991  *            MTD device is registered if no partition info is found
992  *
993  * This function aggregates MTD partitions parsing (done by
994  * 'parse_mtd_partitions()') and MTD device and partitions registering. It
995  * basically follows the most common pattern found in many MTD drivers:
996  *
997  * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
998  *   registered first.
999  * * Then It tries to probe partitions on MTD device @mtd using parsers
1000  *   specified in @types (if @types is %NULL, then the default list of parsers
1001  *   is used, see 'parse_mtd_partitions()' for more information). If none are
1002  *   found this functions tries to fallback to information specified in
1003  *   @parts/@nr_parts.
1004  * * If no partitions were found this function just registers the MTD device
1005  *   @mtd and exits.
1006  *
1007  * Returns zero in case of success and a negative error code in case of failure.
1008  */
1009 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
1010 			      struct mtd_part_parser_data *parser_data,
1011 			      const struct mtd_partition *parts,
1012 			      int nr_parts)
1013 {
1014 	int ret;
1015 
1016 	mtd_set_dev_defaults(mtd);
1017 
1018 	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
1019 		ret = add_mtd_device(mtd);
1020 		if (ret)
1021 			return ret;
1022 	}
1023 
1024 	/* Prefer parsed partitions over driver-provided fallback */
1025 	ret = parse_mtd_partitions(mtd, types, parser_data);
1026 	if (ret == -EPROBE_DEFER)
1027 		goto out;
1028 
1029 	if (ret > 0)
1030 		ret = 0;
1031 	else if (nr_parts)
1032 		ret = add_mtd_partitions(mtd, parts, nr_parts);
1033 	else if (!device_is_registered(&mtd->dev))
1034 		ret = add_mtd_device(mtd);
1035 	else
1036 		ret = 0;
1037 
1038 	if (ret)
1039 		goto out;
1040 
1041 	/*
1042 	 * FIXME: some drivers unfortunately call this function more than once.
1043 	 * So we have to check if we've already assigned the reboot notifier.
1044 	 *
1045 	 * Generally, we can make multiple calls work for most cases, but it
1046 	 * does cause problems with parse_mtd_partitions() above (e.g.,
1047 	 * cmdlineparts will register partitions more than once).
1048 	 */
1049 	WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
1050 		  "MTD already registered\n");
1051 	if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
1052 		mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
1053 		register_reboot_notifier(&mtd->reboot_notifier);
1054 	}
1055 
1056 	ret = mtd_otp_nvmem_add(mtd);
1057 
1058 out:
1059 	if (ret && device_is_registered(&mtd->dev))
1060 		del_mtd_device(mtd);
1061 
1062 	return ret;
1063 }
1064 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1065 
1066 /**
1067  * mtd_device_unregister - unregister an existing MTD device.
1068  *
1069  * @master: the MTD device to unregister.  This will unregister both the master
1070  *          and any partitions if registered.
1071  */
1072 int mtd_device_unregister(struct mtd_info *master)
1073 {
1074 	int err;
1075 
1076 	if (master->_reboot) {
1077 		unregister_reboot_notifier(&master->reboot_notifier);
1078 		memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
1079 	}
1080 
1081 	nvmem_unregister(master->otp_user_nvmem);
1082 	nvmem_unregister(master->otp_factory_nvmem);
1083 
1084 	err = del_mtd_partitions(master);
1085 	if (err)
1086 		return err;
1087 
1088 	if (!device_is_registered(&master->dev))
1089 		return 0;
1090 
1091 	return del_mtd_device(master);
1092 }
1093 EXPORT_SYMBOL_GPL(mtd_device_unregister);
1094 
1095 /**
1096  *	register_mtd_user - register a 'user' of MTD devices.
1097  *	@new: pointer to notifier info structure
1098  *
1099  *	Registers a pair of callbacks function to be called upon addition
1100  *	or removal of MTD devices. Causes the 'add' callback to be immediately
1101  *	invoked for each MTD device currently present in the system.
1102  */
1103 void register_mtd_user (struct mtd_notifier *new)
1104 {
1105 	struct mtd_info *mtd;
1106 
1107 	mutex_lock(&mtd_table_mutex);
1108 
1109 	list_add(&new->list, &mtd_notifiers);
1110 
1111 	__module_get(THIS_MODULE);
1112 
1113 	mtd_for_each_device(mtd)
1114 		new->add(mtd);
1115 
1116 	mutex_unlock(&mtd_table_mutex);
1117 }
1118 EXPORT_SYMBOL_GPL(register_mtd_user);
1119 
1120 /**
1121  *	unregister_mtd_user - unregister a 'user' of MTD devices.
1122  *	@old: pointer to notifier info structure
1123  *
1124  *	Removes a callback function pair from the list of 'users' to be
1125  *	notified upon addition or removal of MTD devices. Causes the
1126  *	'remove' callback to be immediately invoked for each MTD device
1127  *	currently present in the system.
1128  */
1129 int unregister_mtd_user (struct mtd_notifier *old)
1130 {
1131 	struct mtd_info *mtd;
1132 
1133 	mutex_lock(&mtd_table_mutex);
1134 
1135 	module_put(THIS_MODULE);
1136 
1137 	mtd_for_each_device(mtd)
1138 		old->remove(mtd);
1139 
1140 	list_del(&old->list);
1141 	mutex_unlock(&mtd_table_mutex);
1142 	return 0;
1143 }
1144 EXPORT_SYMBOL_GPL(unregister_mtd_user);
1145 
1146 /**
1147  *	get_mtd_device - obtain a validated handle for an MTD device
1148  *	@mtd: last known address of the required MTD device
1149  *	@num: internal device number of the required MTD device
1150  *
1151  *	Given a number and NULL address, return the num'th entry in the device
1152  *	table, if any.	Given an address and num == -1, search the device table
1153  *	for a device with that address and return if it's still present. Given
1154  *	both, return the num'th driver only if its address matches. Return
1155  *	error code if not.
1156  */
1157 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1158 {
1159 	struct mtd_info *ret = NULL, *other;
1160 	int err = -ENODEV;
1161 
1162 	mutex_lock(&mtd_table_mutex);
1163 
1164 	if (num == -1) {
1165 		mtd_for_each_device(other) {
1166 			if (other == mtd) {
1167 				ret = mtd;
1168 				break;
1169 			}
1170 		}
1171 	} else if (num >= 0) {
1172 		ret = idr_find(&mtd_idr, num);
1173 		if (mtd && mtd != ret)
1174 			ret = NULL;
1175 	}
1176 
1177 	if (!ret) {
1178 		ret = ERR_PTR(err);
1179 		goto out;
1180 	}
1181 
1182 	err = __get_mtd_device(ret);
1183 	if (err)
1184 		ret = ERR_PTR(err);
1185 out:
1186 	mutex_unlock(&mtd_table_mutex);
1187 	return ret;
1188 }
1189 EXPORT_SYMBOL_GPL(get_mtd_device);
1190 
1191 
1192 int __get_mtd_device(struct mtd_info *mtd)
1193 {
1194 	struct mtd_info *master = mtd_get_master(mtd);
1195 	int err;
1196 
1197 	if (!try_module_get(master->owner))
1198 		return -ENODEV;
1199 
1200 	if (master->_get_device) {
1201 		err = master->_get_device(mtd);
1202 
1203 		if (err) {
1204 			module_put(master->owner);
1205 			return err;
1206 		}
1207 	}
1208 
1209 	master->usecount++;
1210 
1211 	while (mtd->parent) {
1212 		mtd->usecount++;
1213 		mtd = mtd->parent;
1214 	}
1215 
1216 	return 0;
1217 }
1218 EXPORT_SYMBOL_GPL(__get_mtd_device);
1219 
1220 /**
1221  * of_get_mtd_device_by_node - obtain an MTD device associated with a given node
1222  *
1223  * @np: device tree node
1224  */
1225 struct mtd_info *of_get_mtd_device_by_node(struct device_node *np)
1226 {
1227 	struct mtd_info *mtd = NULL;
1228 	struct mtd_info *tmp;
1229 	int err;
1230 
1231 	mutex_lock(&mtd_table_mutex);
1232 
1233 	err = -EPROBE_DEFER;
1234 	mtd_for_each_device(tmp) {
1235 		if (mtd_get_of_node(tmp) == np) {
1236 			mtd = tmp;
1237 			err = __get_mtd_device(mtd);
1238 			break;
1239 		}
1240 	}
1241 
1242 	mutex_unlock(&mtd_table_mutex);
1243 
1244 	return err ? ERR_PTR(err) : mtd;
1245 }
1246 EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node);
1247 
1248 /**
1249  *	get_mtd_device_nm - obtain a validated handle for an MTD device by
1250  *	device name
1251  *	@name: MTD device name to open
1252  *
1253  * 	This function returns MTD device description structure in case of
1254  * 	success and an error code in case of failure.
1255  */
1256 struct mtd_info *get_mtd_device_nm(const char *name)
1257 {
1258 	int err = -ENODEV;
1259 	struct mtd_info *mtd = NULL, *other;
1260 
1261 	mutex_lock(&mtd_table_mutex);
1262 
1263 	mtd_for_each_device(other) {
1264 		if (!strcmp(name, other->name)) {
1265 			mtd = other;
1266 			break;
1267 		}
1268 	}
1269 
1270 	if (!mtd)
1271 		goto out_unlock;
1272 
1273 	err = __get_mtd_device(mtd);
1274 	if (err)
1275 		goto out_unlock;
1276 
1277 	mutex_unlock(&mtd_table_mutex);
1278 	return mtd;
1279 
1280 out_unlock:
1281 	mutex_unlock(&mtd_table_mutex);
1282 	return ERR_PTR(err);
1283 }
1284 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1285 
1286 void put_mtd_device(struct mtd_info *mtd)
1287 {
1288 	mutex_lock(&mtd_table_mutex);
1289 	__put_mtd_device(mtd);
1290 	mutex_unlock(&mtd_table_mutex);
1291 
1292 }
1293 EXPORT_SYMBOL_GPL(put_mtd_device);
1294 
1295 void __put_mtd_device(struct mtd_info *mtd)
1296 {
1297 	struct mtd_info *master = mtd_get_master(mtd);
1298 
1299 	while (mtd->parent) {
1300 		--mtd->usecount;
1301 		BUG_ON(mtd->usecount < 0);
1302 		mtd = mtd->parent;
1303 	}
1304 
1305 	master->usecount--;
1306 
1307 	if (master->_put_device)
1308 		master->_put_device(master);
1309 
1310 	module_put(master->owner);
1311 }
1312 EXPORT_SYMBOL_GPL(__put_mtd_device);
1313 
1314 /*
1315  * Erase is an synchronous operation. Device drivers are epected to return a
1316  * negative error code if the operation failed and update instr->fail_addr
1317  * to point the portion that was not properly erased.
1318  */
1319 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1320 {
1321 	struct mtd_info *master = mtd_get_master(mtd);
1322 	u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1323 	struct erase_info adjinstr;
1324 	int ret;
1325 
1326 	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1327 	adjinstr = *instr;
1328 
1329 	if (!mtd->erasesize || !master->_erase)
1330 		return -ENOTSUPP;
1331 
1332 	if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1333 		return -EINVAL;
1334 	if (!(mtd->flags & MTD_WRITEABLE))
1335 		return -EROFS;
1336 
1337 	if (!instr->len)
1338 		return 0;
1339 
1340 	ledtrig_mtd_activity();
1341 
1342 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1343 		adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1344 				master->erasesize;
1345 		adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1346 				master->erasesize) -
1347 			       adjinstr.addr;
1348 	}
1349 
1350 	adjinstr.addr += mst_ofs;
1351 
1352 	ret = master->_erase(master, &adjinstr);
1353 
1354 	if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1355 		instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1356 		if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1357 			instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1358 							 master);
1359 			instr->fail_addr *= mtd->erasesize;
1360 		}
1361 	}
1362 
1363 	return ret;
1364 }
1365 EXPORT_SYMBOL_GPL(mtd_erase);
1366 
1367 /*
1368  * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1369  */
1370 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1371 	      void **virt, resource_size_t *phys)
1372 {
1373 	struct mtd_info *master = mtd_get_master(mtd);
1374 
1375 	*retlen = 0;
1376 	*virt = NULL;
1377 	if (phys)
1378 		*phys = 0;
1379 	if (!master->_point)
1380 		return -EOPNOTSUPP;
1381 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1382 		return -EINVAL;
1383 	if (!len)
1384 		return 0;
1385 
1386 	from = mtd_get_master_ofs(mtd, from);
1387 	return master->_point(master, from, len, retlen, virt, phys);
1388 }
1389 EXPORT_SYMBOL_GPL(mtd_point);
1390 
1391 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
1392 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1393 {
1394 	struct mtd_info *master = mtd_get_master(mtd);
1395 
1396 	if (!master->_unpoint)
1397 		return -EOPNOTSUPP;
1398 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1399 		return -EINVAL;
1400 	if (!len)
1401 		return 0;
1402 	return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1403 }
1404 EXPORT_SYMBOL_GPL(mtd_unpoint);
1405 
1406 /*
1407  * Allow NOMMU mmap() to directly map the device (if not NULL)
1408  * - return the address to which the offset maps
1409  * - return -ENOSYS to indicate refusal to do the mapping
1410  */
1411 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1412 				    unsigned long offset, unsigned long flags)
1413 {
1414 	size_t retlen;
1415 	void *virt;
1416 	int ret;
1417 
1418 	ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1419 	if (ret)
1420 		return ret;
1421 	if (retlen != len) {
1422 		mtd_unpoint(mtd, offset, retlen);
1423 		return -ENOSYS;
1424 	}
1425 	return (unsigned long)virt;
1426 }
1427 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1428 
1429 static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1430 				 const struct mtd_ecc_stats *old_stats)
1431 {
1432 	struct mtd_ecc_stats diff;
1433 
1434 	if (master == mtd)
1435 		return;
1436 
1437 	diff = master->ecc_stats;
1438 	diff.failed -= old_stats->failed;
1439 	diff.corrected -= old_stats->corrected;
1440 
1441 	while (mtd->parent) {
1442 		mtd->ecc_stats.failed += diff.failed;
1443 		mtd->ecc_stats.corrected += diff.corrected;
1444 		mtd = mtd->parent;
1445 	}
1446 }
1447 
1448 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1449 	     u_char *buf)
1450 {
1451 	struct mtd_oob_ops ops = {
1452 		.len = len,
1453 		.datbuf = buf,
1454 	};
1455 	int ret;
1456 
1457 	ret = mtd_read_oob(mtd, from, &ops);
1458 	*retlen = ops.retlen;
1459 
1460 	return ret;
1461 }
1462 EXPORT_SYMBOL_GPL(mtd_read);
1463 
1464 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1465 	      const u_char *buf)
1466 {
1467 	struct mtd_oob_ops ops = {
1468 		.len = len,
1469 		.datbuf = (u8 *)buf,
1470 	};
1471 	int ret;
1472 
1473 	ret = mtd_write_oob(mtd, to, &ops);
1474 	*retlen = ops.retlen;
1475 
1476 	return ret;
1477 }
1478 EXPORT_SYMBOL_GPL(mtd_write);
1479 
1480 /*
1481  * In blackbox flight recorder like scenarios we want to make successful writes
1482  * in interrupt context. panic_write() is only intended to be called when its
1483  * known the kernel is about to panic and we need the write to succeed. Since
1484  * the kernel is not going to be running for much longer, this function can
1485  * break locks and delay to ensure the write succeeds (but not sleep).
1486  */
1487 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1488 		    const u_char *buf)
1489 {
1490 	struct mtd_info *master = mtd_get_master(mtd);
1491 
1492 	*retlen = 0;
1493 	if (!master->_panic_write)
1494 		return -EOPNOTSUPP;
1495 	if (to < 0 || to >= mtd->size || len > mtd->size - to)
1496 		return -EINVAL;
1497 	if (!(mtd->flags & MTD_WRITEABLE))
1498 		return -EROFS;
1499 	if (!len)
1500 		return 0;
1501 	if (!master->oops_panic_write)
1502 		master->oops_panic_write = true;
1503 
1504 	return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1505 				    retlen, buf);
1506 }
1507 EXPORT_SYMBOL_GPL(mtd_panic_write);
1508 
1509 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1510 			     struct mtd_oob_ops *ops)
1511 {
1512 	/*
1513 	 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1514 	 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1515 	 *  this case.
1516 	 */
1517 	if (!ops->datbuf)
1518 		ops->len = 0;
1519 
1520 	if (!ops->oobbuf)
1521 		ops->ooblen = 0;
1522 
1523 	if (offs < 0 || offs + ops->len > mtd->size)
1524 		return -EINVAL;
1525 
1526 	if (ops->ooblen) {
1527 		size_t maxooblen;
1528 
1529 		if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1530 			return -EINVAL;
1531 
1532 		maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1533 				      mtd_div_by_ws(offs, mtd)) *
1534 			     mtd_oobavail(mtd, ops)) - ops->ooboffs;
1535 		if (ops->ooblen > maxooblen)
1536 			return -EINVAL;
1537 	}
1538 
1539 	return 0;
1540 }
1541 
1542 static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1543 			    struct mtd_oob_ops *ops)
1544 {
1545 	struct mtd_info *master = mtd_get_master(mtd);
1546 	int ret;
1547 
1548 	from = mtd_get_master_ofs(mtd, from);
1549 	if (master->_read_oob)
1550 		ret = master->_read_oob(master, from, ops);
1551 	else
1552 		ret = master->_read(master, from, ops->len, &ops->retlen,
1553 				    ops->datbuf);
1554 
1555 	return ret;
1556 }
1557 
1558 static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1559 			     struct mtd_oob_ops *ops)
1560 {
1561 	struct mtd_info *master = mtd_get_master(mtd);
1562 	int ret;
1563 
1564 	to = mtd_get_master_ofs(mtd, to);
1565 	if (master->_write_oob)
1566 		ret = master->_write_oob(master, to, ops);
1567 	else
1568 		ret = master->_write(master, to, ops->len, &ops->retlen,
1569 				     ops->datbuf);
1570 
1571 	return ret;
1572 }
1573 
1574 static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1575 			       struct mtd_oob_ops *ops)
1576 {
1577 	struct mtd_info *master = mtd_get_master(mtd);
1578 	int ngroups = mtd_pairing_groups(master);
1579 	int npairs = mtd_wunit_per_eb(master) / ngroups;
1580 	struct mtd_oob_ops adjops = *ops;
1581 	unsigned int wunit, oobavail;
1582 	struct mtd_pairing_info info;
1583 	int max_bitflips = 0;
1584 	u32 ebofs, pageofs;
1585 	loff_t base, pos;
1586 
1587 	ebofs = mtd_mod_by_eb(start, mtd);
1588 	base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1589 	info.group = 0;
1590 	info.pair = mtd_div_by_ws(ebofs, mtd);
1591 	pageofs = mtd_mod_by_ws(ebofs, mtd);
1592 	oobavail = mtd_oobavail(mtd, ops);
1593 
1594 	while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1595 		int ret;
1596 
1597 		if (info.pair >= npairs) {
1598 			info.pair = 0;
1599 			base += master->erasesize;
1600 		}
1601 
1602 		wunit = mtd_pairing_info_to_wunit(master, &info);
1603 		pos = mtd_wunit_to_offset(mtd, base, wunit);
1604 
1605 		adjops.len = ops->len - ops->retlen;
1606 		if (adjops.len > mtd->writesize - pageofs)
1607 			adjops.len = mtd->writesize - pageofs;
1608 
1609 		adjops.ooblen = ops->ooblen - ops->oobretlen;
1610 		if (adjops.ooblen > oobavail - adjops.ooboffs)
1611 			adjops.ooblen = oobavail - adjops.ooboffs;
1612 
1613 		if (read) {
1614 			ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1615 			if (ret > 0)
1616 				max_bitflips = max(max_bitflips, ret);
1617 		} else {
1618 			ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1619 		}
1620 
1621 		if (ret < 0)
1622 			return ret;
1623 
1624 		max_bitflips = max(max_bitflips, ret);
1625 		ops->retlen += adjops.retlen;
1626 		ops->oobretlen += adjops.oobretlen;
1627 		adjops.datbuf += adjops.retlen;
1628 		adjops.oobbuf += adjops.oobretlen;
1629 		adjops.ooboffs = 0;
1630 		pageofs = 0;
1631 		info.pair++;
1632 	}
1633 
1634 	return max_bitflips;
1635 }
1636 
1637 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1638 {
1639 	struct mtd_info *master = mtd_get_master(mtd);
1640 	struct mtd_ecc_stats old_stats = master->ecc_stats;
1641 	int ret_code;
1642 
1643 	ops->retlen = ops->oobretlen = 0;
1644 
1645 	ret_code = mtd_check_oob_ops(mtd, from, ops);
1646 	if (ret_code)
1647 		return ret_code;
1648 
1649 	ledtrig_mtd_activity();
1650 
1651 	/* Check the validity of a potential fallback on mtd->_read */
1652 	if (!master->_read_oob && (!master->_read || ops->oobbuf))
1653 		return -EOPNOTSUPP;
1654 
1655 	if (ops->stats)
1656 		memset(ops->stats, 0, sizeof(*ops->stats));
1657 
1658 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1659 		ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1660 	else
1661 		ret_code = mtd_read_oob_std(mtd, from, ops);
1662 
1663 	mtd_update_ecc_stats(mtd, master, &old_stats);
1664 
1665 	/*
1666 	 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1667 	 * similar to mtd->_read(), returning a non-negative integer
1668 	 * representing max bitflips. In other cases, mtd->_read_oob() may
1669 	 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1670 	 */
1671 	if (unlikely(ret_code < 0))
1672 		return ret_code;
1673 	if (mtd->ecc_strength == 0)
1674 		return 0;	/* device lacks ecc */
1675 	if (ops->stats)
1676 		ops->stats->max_bitflips = ret_code;
1677 	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1678 }
1679 EXPORT_SYMBOL_GPL(mtd_read_oob);
1680 
1681 int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1682 				struct mtd_oob_ops *ops)
1683 {
1684 	struct mtd_info *master = mtd_get_master(mtd);
1685 	int ret;
1686 
1687 	ops->retlen = ops->oobretlen = 0;
1688 
1689 	if (!(mtd->flags & MTD_WRITEABLE))
1690 		return -EROFS;
1691 
1692 	ret = mtd_check_oob_ops(mtd, to, ops);
1693 	if (ret)
1694 		return ret;
1695 
1696 	ledtrig_mtd_activity();
1697 
1698 	/* Check the validity of a potential fallback on mtd->_write */
1699 	if (!master->_write_oob && (!master->_write || ops->oobbuf))
1700 		return -EOPNOTSUPP;
1701 
1702 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1703 		return mtd_io_emulated_slc(mtd, to, false, ops);
1704 
1705 	return mtd_write_oob_std(mtd, to, ops);
1706 }
1707 EXPORT_SYMBOL_GPL(mtd_write_oob);
1708 
1709 /**
1710  * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1711  * @mtd: MTD device structure
1712  * @section: ECC section. Depending on the layout you may have all the ECC
1713  *	     bytes stored in a single contiguous section, or one section
1714  *	     per ECC chunk (and sometime several sections for a single ECC
1715  *	     ECC chunk)
1716  * @oobecc: OOB region struct filled with the appropriate ECC position
1717  *	    information
1718  *
1719  * This function returns ECC section information in the OOB area. If you want
1720  * to get all the ECC bytes information, then you should call
1721  * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1722  *
1723  * Returns zero on success, a negative error code otherwise.
1724  */
1725 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1726 		      struct mtd_oob_region *oobecc)
1727 {
1728 	struct mtd_info *master = mtd_get_master(mtd);
1729 
1730 	memset(oobecc, 0, sizeof(*oobecc));
1731 
1732 	if (!master || section < 0)
1733 		return -EINVAL;
1734 
1735 	if (!master->ooblayout || !master->ooblayout->ecc)
1736 		return -ENOTSUPP;
1737 
1738 	return master->ooblayout->ecc(master, section, oobecc);
1739 }
1740 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1741 
1742 /**
1743  * mtd_ooblayout_free - Get the OOB region definition of a specific free
1744  *			section
1745  * @mtd: MTD device structure
1746  * @section: Free section you are interested in. Depending on the layout
1747  *	     you may have all the free bytes stored in a single contiguous
1748  *	     section, or one section per ECC chunk plus an extra section
1749  *	     for the remaining bytes (or other funky layout).
1750  * @oobfree: OOB region struct filled with the appropriate free position
1751  *	     information
1752  *
1753  * This function returns free bytes position in the OOB area. If you want
1754  * to get all the free bytes information, then you should call
1755  * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1756  *
1757  * Returns zero on success, a negative error code otherwise.
1758  */
1759 int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1760 		       struct mtd_oob_region *oobfree)
1761 {
1762 	struct mtd_info *master = mtd_get_master(mtd);
1763 
1764 	memset(oobfree, 0, sizeof(*oobfree));
1765 
1766 	if (!master || section < 0)
1767 		return -EINVAL;
1768 
1769 	if (!master->ooblayout || !master->ooblayout->free)
1770 		return -ENOTSUPP;
1771 
1772 	return master->ooblayout->free(master, section, oobfree);
1773 }
1774 EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1775 
1776 /**
1777  * mtd_ooblayout_find_region - Find the region attached to a specific byte
1778  * @mtd: mtd info structure
1779  * @byte: the byte we are searching for
1780  * @sectionp: pointer where the section id will be stored
1781  * @oobregion: used to retrieve the ECC position
1782  * @iter: iterator function. Should be either mtd_ooblayout_free or
1783  *	  mtd_ooblayout_ecc depending on the region type you're searching for
1784  *
1785  * This function returns the section id and oobregion information of a
1786  * specific byte. For example, say you want to know where the 4th ECC byte is
1787  * stored, you'll use:
1788  *
1789  * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
1790  *
1791  * Returns zero on success, a negative error code otherwise.
1792  */
1793 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1794 				int *sectionp, struct mtd_oob_region *oobregion,
1795 				int (*iter)(struct mtd_info *,
1796 					    int section,
1797 					    struct mtd_oob_region *oobregion))
1798 {
1799 	int pos = 0, ret, section = 0;
1800 
1801 	memset(oobregion, 0, sizeof(*oobregion));
1802 
1803 	while (1) {
1804 		ret = iter(mtd, section, oobregion);
1805 		if (ret)
1806 			return ret;
1807 
1808 		if (pos + oobregion->length > byte)
1809 			break;
1810 
1811 		pos += oobregion->length;
1812 		section++;
1813 	}
1814 
1815 	/*
1816 	 * Adjust region info to make it start at the beginning at the
1817 	 * 'start' ECC byte.
1818 	 */
1819 	oobregion->offset += byte - pos;
1820 	oobregion->length -= byte - pos;
1821 	*sectionp = section;
1822 
1823 	return 0;
1824 }
1825 
1826 /**
1827  * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1828  *				  ECC byte
1829  * @mtd: mtd info structure
1830  * @eccbyte: the byte we are searching for
1831  * @section: pointer where the section id will be stored
1832  * @oobregion: OOB region information
1833  *
1834  * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1835  * byte.
1836  *
1837  * Returns zero on success, a negative error code otherwise.
1838  */
1839 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1840 				 int *section,
1841 				 struct mtd_oob_region *oobregion)
1842 {
1843 	return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1844 					 mtd_ooblayout_ecc);
1845 }
1846 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1847 
1848 /**
1849  * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1850  * @mtd: mtd info structure
1851  * @buf: destination buffer to store OOB bytes
1852  * @oobbuf: OOB buffer
1853  * @start: first byte to retrieve
1854  * @nbytes: number of bytes to retrieve
1855  * @iter: section iterator
1856  *
1857  * Extract bytes attached to a specific category (ECC or free)
1858  * from the OOB buffer and copy them into buf.
1859  *
1860  * Returns zero on success, a negative error code otherwise.
1861  */
1862 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1863 				const u8 *oobbuf, int start, int nbytes,
1864 				int (*iter)(struct mtd_info *,
1865 					    int section,
1866 					    struct mtd_oob_region *oobregion))
1867 {
1868 	struct mtd_oob_region oobregion;
1869 	int section, ret;
1870 
1871 	ret = mtd_ooblayout_find_region(mtd, start, &section,
1872 					&oobregion, iter);
1873 
1874 	while (!ret) {
1875 		int cnt;
1876 
1877 		cnt = min_t(int, nbytes, oobregion.length);
1878 		memcpy(buf, oobbuf + oobregion.offset, cnt);
1879 		buf += cnt;
1880 		nbytes -= cnt;
1881 
1882 		if (!nbytes)
1883 			break;
1884 
1885 		ret = iter(mtd, ++section, &oobregion);
1886 	}
1887 
1888 	return ret;
1889 }
1890 
1891 /**
1892  * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1893  * @mtd: mtd info structure
1894  * @buf: source buffer to get OOB bytes from
1895  * @oobbuf: OOB buffer
1896  * @start: first OOB byte to set
1897  * @nbytes: number of OOB bytes to set
1898  * @iter: section iterator
1899  *
1900  * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1901  * is selected by passing the appropriate iterator.
1902  *
1903  * Returns zero on success, a negative error code otherwise.
1904  */
1905 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1906 				u8 *oobbuf, int start, int nbytes,
1907 				int (*iter)(struct mtd_info *,
1908 					    int section,
1909 					    struct mtd_oob_region *oobregion))
1910 {
1911 	struct mtd_oob_region oobregion;
1912 	int section, ret;
1913 
1914 	ret = mtd_ooblayout_find_region(mtd, start, &section,
1915 					&oobregion, iter);
1916 
1917 	while (!ret) {
1918 		int cnt;
1919 
1920 		cnt = min_t(int, nbytes, oobregion.length);
1921 		memcpy(oobbuf + oobregion.offset, buf, cnt);
1922 		buf += cnt;
1923 		nbytes -= cnt;
1924 
1925 		if (!nbytes)
1926 			break;
1927 
1928 		ret = iter(mtd, ++section, &oobregion);
1929 	}
1930 
1931 	return ret;
1932 }
1933 
1934 /**
1935  * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1936  * @mtd: mtd info structure
1937  * @iter: category iterator
1938  *
1939  * Count the number of bytes in a given category.
1940  *
1941  * Returns a positive value on success, a negative error code otherwise.
1942  */
1943 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1944 				int (*iter)(struct mtd_info *,
1945 					    int section,
1946 					    struct mtd_oob_region *oobregion))
1947 {
1948 	struct mtd_oob_region oobregion;
1949 	int section = 0, ret, nbytes = 0;
1950 
1951 	while (1) {
1952 		ret = iter(mtd, section++, &oobregion);
1953 		if (ret) {
1954 			if (ret == -ERANGE)
1955 				ret = nbytes;
1956 			break;
1957 		}
1958 
1959 		nbytes += oobregion.length;
1960 	}
1961 
1962 	return ret;
1963 }
1964 
1965 /**
1966  * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
1967  * @mtd: mtd info structure
1968  * @eccbuf: destination buffer to store ECC bytes
1969  * @oobbuf: OOB buffer
1970  * @start: first ECC byte to retrieve
1971  * @nbytes: number of ECC bytes to retrieve
1972  *
1973  * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
1974  *
1975  * Returns zero on success, a negative error code otherwise.
1976  */
1977 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1978 			       const u8 *oobbuf, int start, int nbytes)
1979 {
1980 	return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1981 				       mtd_ooblayout_ecc);
1982 }
1983 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1984 
1985 /**
1986  * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
1987  * @mtd: mtd info structure
1988  * @eccbuf: source buffer to get ECC bytes from
1989  * @oobbuf: OOB buffer
1990  * @start: first ECC byte to set
1991  * @nbytes: number of ECC bytes to set
1992  *
1993  * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
1994  *
1995  * Returns zero on success, a negative error code otherwise.
1996  */
1997 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1998 			       u8 *oobbuf, int start, int nbytes)
1999 {
2000 	return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
2001 				       mtd_ooblayout_ecc);
2002 }
2003 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
2004 
2005 /**
2006  * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
2007  * @mtd: mtd info structure
2008  * @databuf: destination buffer to store ECC bytes
2009  * @oobbuf: OOB buffer
2010  * @start: first ECC byte to retrieve
2011  * @nbytes: number of ECC bytes to retrieve
2012  *
2013  * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
2014  *
2015  * Returns zero on success, a negative error code otherwise.
2016  */
2017 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
2018 				const u8 *oobbuf, int start, int nbytes)
2019 {
2020 	return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
2021 				       mtd_ooblayout_free);
2022 }
2023 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
2024 
2025 /**
2026  * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
2027  * @mtd: mtd info structure
2028  * @databuf: source buffer to get data bytes from
2029  * @oobbuf: OOB buffer
2030  * @start: first ECC byte to set
2031  * @nbytes: number of ECC bytes to set
2032  *
2033  * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
2034  *
2035  * Returns zero on success, a negative error code otherwise.
2036  */
2037 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
2038 				u8 *oobbuf, int start, int nbytes)
2039 {
2040 	return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
2041 				       mtd_ooblayout_free);
2042 }
2043 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
2044 
2045 /**
2046  * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
2047  * @mtd: mtd info structure
2048  *
2049  * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
2050  *
2051  * Returns zero on success, a negative error code otherwise.
2052  */
2053 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
2054 {
2055 	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
2056 }
2057 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
2058 
2059 /**
2060  * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
2061  * @mtd: mtd info structure
2062  *
2063  * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
2064  *
2065  * Returns zero on success, a negative error code otherwise.
2066  */
2067 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
2068 {
2069 	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
2070 }
2071 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
2072 
2073 /*
2074  * Method to access the protection register area, present in some flash
2075  * devices. The user data is one time programmable but the factory data is read
2076  * only.
2077  */
2078 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2079 			   struct otp_info *buf)
2080 {
2081 	struct mtd_info *master = mtd_get_master(mtd);
2082 
2083 	if (!master->_get_fact_prot_info)
2084 		return -EOPNOTSUPP;
2085 	if (!len)
2086 		return 0;
2087 	return master->_get_fact_prot_info(master, len, retlen, buf);
2088 }
2089 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
2090 
2091 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2092 			   size_t *retlen, u_char *buf)
2093 {
2094 	struct mtd_info *master = mtd_get_master(mtd);
2095 
2096 	*retlen = 0;
2097 	if (!master->_read_fact_prot_reg)
2098 		return -EOPNOTSUPP;
2099 	if (!len)
2100 		return 0;
2101 	return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2102 }
2103 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2104 
2105 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2106 			   struct otp_info *buf)
2107 {
2108 	struct mtd_info *master = mtd_get_master(mtd);
2109 
2110 	if (!master->_get_user_prot_info)
2111 		return -EOPNOTSUPP;
2112 	if (!len)
2113 		return 0;
2114 	return master->_get_user_prot_info(master, len, retlen, buf);
2115 }
2116 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2117 
2118 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2119 			   size_t *retlen, u_char *buf)
2120 {
2121 	struct mtd_info *master = mtd_get_master(mtd);
2122 
2123 	*retlen = 0;
2124 	if (!master->_read_user_prot_reg)
2125 		return -EOPNOTSUPP;
2126 	if (!len)
2127 		return 0;
2128 	return master->_read_user_prot_reg(master, from, len, retlen, buf);
2129 }
2130 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2131 
2132 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2133 			    size_t *retlen, const u_char *buf)
2134 {
2135 	struct mtd_info *master = mtd_get_master(mtd);
2136 	int ret;
2137 
2138 	*retlen = 0;
2139 	if (!master->_write_user_prot_reg)
2140 		return -EOPNOTSUPP;
2141 	if (!len)
2142 		return 0;
2143 	ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2144 	if (ret)
2145 		return ret;
2146 
2147 	/*
2148 	 * If no data could be written at all, we are out of memory and
2149 	 * must return -ENOSPC.
2150 	 */
2151 	return (*retlen) ? 0 : -ENOSPC;
2152 }
2153 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2154 
2155 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2156 {
2157 	struct mtd_info *master = mtd_get_master(mtd);
2158 
2159 	if (!master->_lock_user_prot_reg)
2160 		return -EOPNOTSUPP;
2161 	if (!len)
2162 		return 0;
2163 	return master->_lock_user_prot_reg(master, from, len);
2164 }
2165 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2166 
2167 int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2168 {
2169 	struct mtd_info *master = mtd_get_master(mtd);
2170 
2171 	if (!master->_erase_user_prot_reg)
2172 		return -EOPNOTSUPP;
2173 	if (!len)
2174 		return 0;
2175 	return master->_erase_user_prot_reg(master, from, len);
2176 }
2177 EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2178 
2179 /* Chip-supported device locking */
2180 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2181 {
2182 	struct mtd_info *master = mtd_get_master(mtd);
2183 
2184 	if (!master->_lock)
2185 		return -EOPNOTSUPP;
2186 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2187 		return -EINVAL;
2188 	if (!len)
2189 		return 0;
2190 
2191 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2192 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2193 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2194 	}
2195 
2196 	return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2197 }
2198 EXPORT_SYMBOL_GPL(mtd_lock);
2199 
2200 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2201 {
2202 	struct mtd_info *master = mtd_get_master(mtd);
2203 
2204 	if (!master->_unlock)
2205 		return -EOPNOTSUPP;
2206 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2207 		return -EINVAL;
2208 	if (!len)
2209 		return 0;
2210 
2211 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2212 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2213 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2214 	}
2215 
2216 	return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2217 }
2218 EXPORT_SYMBOL_GPL(mtd_unlock);
2219 
2220 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2221 {
2222 	struct mtd_info *master = mtd_get_master(mtd);
2223 
2224 	if (!master->_is_locked)
2225 		return -EOPNOTSUPP;
2226 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2227 		return -EINVAL;
2228 	if (!len)
2229 		return 0;
2230 
2231 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2232 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2233 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2234 	}
2235 
2236 	return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2237 }
2238 EXPORT_SYMBOL_GPL(mtd_is_locked);
2239 
2240 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2241 {
2242 	struct mtd_info *master = mtd_get_master(mtd);
2243 
2244 	if (ofs < 0 || ofs >= mtd->size)
2245 		return -EINVAL;
2246 	if (!master->_block_isreserved)
2247 		return 0;
2248 
2249 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2250 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2251 
2252 	return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2253 }
2254 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2255 
2256 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2257 {
2258 	struct mtd_info *master = mtd_get_master(mtd);
2259 
2260 	if (ofs < 0 || ofs >= mtd->size)
2261 		return -EINVAL;
2262 	if (!master->_block_isbad)
2263 		return 0;
2264 
2265 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2266 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2267 
2268 	return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2269 }
2270 EXPORT_SYMBOL_GPL(mtd_block_isbad);
2271 
2272 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2273 {
2274 	struct mtd_info *master = mtd_get_master(mtd);
2275 	int ret;
2276 
2277 	if (!master->_block_markbad)
2278 		return -EOPNOTSUPP;
2279 	if (ofs < 0 || ofs >= mtd->size)
2280 		return -EINVAL;
2281 	if (!(mtd->flags & MTD_WRITEABLE))
2282 		return -EROFS;
2283 
2284 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2285 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2286 
2287 	ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2288 	if (ret)
2289 		return ret;
2290 
2291 	while (mtd->parent) {
2292 		mtd->ecc_stats.badblocks++;
2293 		mtd = mtd->parent;
2294 	}
2295 
2296 	return 0;
2297 }
2298 EXPORT_SYMBOL_GPL(mtd_block_markbad);
2299 
2300 /*
2301  * default_mtd_writev - the default writev method
2302  * @mtd: mtd device description object pointer
2303  * @vecs: the vectors to write
2304  * @count: count of vectors in @vecs
2305  * @to: the MTD device offset to write to
2306  * @retlen: on exit contains the count of bytes written to the MTD device.
2307  *
2308  * This function returns zero in case of success and a negative error code in
2309  * case of failure.
2310  */
2311 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2312 			      unsigned long count, loff_t to, size_t *retlen)
2313 {
2314 	unsigned long i;
2315 	size_t totlen = 0, thislen;
2316 	int ret = 0;
2317 
2318 	for (i = 0; i < count; i++) {
2319 		if (!vecs[i].iov_len)
2320 			continue;
2321 		ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2322 				vecs[i].iov_base);
2323 		totlen += thislen;
2324 		if (ret || thislen != vecs[i].iov_len)
2325 			break;
2326 		to += vecs[i].iov_len;
2327 	}
2328 	*retlen = totlen;
2329 	return ret;
2330 }
2331 
2332 /*
2333  * mtd_writev - the vector-based MTD write method
2334  * @mtd: mtd device description object pointer
2335  * @vecs: the vectors to write
2336  * @count: count of vectors in @vecs
2337  * @to: the MTD device offset to write to
2338  * @retlen: on exit contains the count of bytes written to the MTD device.
2339  *
2340  * This function returns zero in case of success and a negative error code in
2341  * case of failure.
2342  */
2343 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2344 	       unsigned long count, loff_t to, size_t *retlen)
2345 {
2346 	struct mtd_info *master = mtd_get_master(mtd);
2347 
2348 	*retlen = 0;
2349 	if (!(mtd->flags & MTD_WRITEABLE))
2350 		return -EROFS;
2351 
2352 	if (!master->_writev)
2353 		return default_mtd_writev(mtd, vecs, count, to, retlen);
2354 
2355 	return master->_writev(master, vecs, count,
2356 			       mtd_get_master_ofs(mtd, to), retlen);
2357 }
2358 EXPORT_SYMBOL_GPL(mtd_writev);
2359 
2360 /**
2361  * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2362  * @mtd: mtd device description object pointer
2363  * @size: a pointer to the ideal or maximum size of the allocation, points
2364  *        to the actual allocation size on success.
2365  *
2366  * This routine attempts to allocate a contiguous kernel buffer up to
2367  * the specified size, backing off the size of the request exponentially
2368  * until the request succeeds or until the allocation size falls below
2369  * the system page size. This attempts to make sure it does not adversely
2370  * impact system performance, so when allocating more than one page, we
2371  * ask the memory allocator to avoid re-trying, swapping, writing back
2372  * or performing I/O.
2373  *
2374  * Note, this function also makes sure that the allocated buffer is aligned to
2375  * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2376  *
2377  * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2378  * to handle smaller (i.e. degraded) buffer allocations under low- or
2379  * fragmented-memory situations where such reduced allocations, from a
2380  * requested ideal, are allowed.
2381  *
2382  * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2383  */
2384 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2385 {
2386 	gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2387 	size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2388 	void *kbuf;
2389 
2390 	*size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2391 
2392 	while (*size > min_alloc) {
2393 		kbuf = kmalloc(*size, flags);
2394 		if (kbuf)
2395 			return kbuf;
2396 
2397 		*size >>= 1;
2398 		*size = ALIGN(*size, mtd->writesize);
2399 	}
2400 
2401 	/*
2402 	 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2403 	 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2404 	 */
2405 	return kmalloc(*size, GFP_KERNEL);
2406 }
2407 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2408 
2409 #ifdef CONFIG_PROC_FS
2410 
2411 /*====================================================================*/
2412 /* Support for /proc/mtd */
2413 
2414 static int mtd_proc_show(struct seq_file *m, void *v)
2415 {
2416 	struct mtd_info *mtd;
2417 
2418 	seq_puts(m, "dev:    size   erasesize  name\n");
2419 	mutex_lock(&mtd_table_mutex);
2420 	mtd_for_each_device(mtd) {
2421 		seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2422 			   mtd->index, (unsigned long long)mtd->size,
2423 			   mtd->erasesize, mtd->name);
2424 	}
2425 	mutex_unlock(&mtd_table_mutex);
2426 	return 0;
2427 }
2428 #endif /* CONFIG_PROC_FS */
2429 
2430 /*====================================================================*/
2431 /* Init code */
2432 
2433 static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2434 {
2435 	struct backing_dev_info *bdi;
2436 	int ret;
2437 
2438 	bdi = bdi_alloc(NUMA_NO_NODE);
2439 	if (!bdi)
2440 		return ERR_PTR(-ENOMEM);
2441 	bdi->ra_pages = 0;
2442 	bdi->io_pages = 0;
2443 
2444 	/*
2445 	 * We put '-0' suffix to the name to get the same name format as we
2446 	 * used to get. Since this is called only once, we get a unique name.
2447 	 */
2448 	ret = bdi_register(bdi, "%.28s-0", name);
2449 	if (ret)
2450 		bdi_put(bdi);
2451 
2452 	return ret ? ERR_PTR(ret) : bdi;
2453 }
2454 
2455 static struct proc_dir_entry *proc_mtd;
2456 
2457 static int __init init_mtd(void)
2458 {
2459 	int ret;
2460 
2461 	ret = class_register(&mtd_class);
2462 	if (ret)
2463 		goto err_reg;
2464 
2465 	mtd_bdi = mtd_bdi_init("mtd");
2466 	if (IS_ERR(mtd_bdi)) {
2467 		ret = PTR_ERR(mtd_bdi);
2468 		goto err_bdi;
2469 	}
2470 
2471 	proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2472 
2473 	ret = init_mtdchar();
2474 	if (ret)
2475 		goto out_procfs;
2476 
2477 	dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2478 	debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
2479 			    &mtd_expert_analysis_mode);
2480 
2481 	return 0;
2482 
2483 out_procfs:
2484 	if (proc_mtd)
2485 		remove_proc_entry("mtd", NULL);
2486 	bdi_put(mtd_bdi);
2487 err_bdi:
2488 	class_unregister(&mtd_class);
2489 err_reg:
2490 	pr_err("Error registering mtd class or bdi: %d\n", ret);
2491 	return ret;
2492 }
2493 
2494 static void __exit cleanup_mtd(void)
2495 {
2496 	debugfs_remove_recursive(dfs_dir_mtd);
2497 	cleanup_mtdchar();
2498 	if (proc_mtd)
2499 		remove_proc_entry("mtd", NULL);
2500 	class_unregister(&mtd_class);
2501 	bdi_unregister(mtd_bdi);
2502 	bdi_put(mtd_bdi);
2503 	idr_destroy(&mtd_idr);
2504 }
2505 
2506 module_init(init_mtd);
2507 module_exit(cleanup_mtd);
2508 
2509 MODULE_LICENSE("GPL");
2510 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2511 MODULE_DESCRIPTION("Core MTD registration and access routines");
2512