xref: /openbmc/linux/drivers/mtd/nand/raw/nand_base.c (revision d2574c33)
1 /*
2  *  Overview:
3  *   This is the generic MTD driver for NAND flash devices. It should be
4  *   capable of working with almost all NAND chips currently available.
5  *
6  *	Additional technical information is available on
7  *	http://www.linux-mtd.infradead.org/doc/nand.html
8  *
9  *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10  *		  2002-2006 Thomas Gleixner (tglx@linutronix.de)
11  *
12  *  Credits:
13  *	David Woodhouse for adding multichip support
14  *
15  *	Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16  *	rework for 2K page size chips
17  *
18  *  TODO:
19  *	Enable cached programming for 2k page size chips
20  *	Check, if mtd->ecctype should be set to MTD_ECC_HW
21  *	if we have HW ECC support.
22  *	BBT table is not serialized, has to be fixed
23  *
24  * This program is free software; you can redistribute it and/or modify
25  * it under the terms of the GNU General Public License version 2 as
26  * published by the Free Software Foundation.
27  *
28  */
29 
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/mm.h>
39 #include <linux/types.h>
40 #include <linux/mtd/mtd.h>
41 #include <linux/mtd/nand_ecc.h>
42 #include <linux/mtd/nand_bch.h>
43 #include <linux/interrupt.h>
44 #include <linux/bitops.h>
45 #include <linux/io.h>
46 #include <linux/mtd/partitions.h>
47 #include <linux/of.h>
48 #include <linux/gpio/consumer.h>
49 
50 #include "internals.h"
51 
52 /* Define default oob placement schemes for large and small page devices */
53 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
54 				 struct mtd_oob_region *oobregion)
55 {
56 	struct nand_chip *chip = mtd_to_nand(mtd);
57 	struct nand_ecc_ctrl *ecc = &chip->ecc;
58 
59 	if (section > 1)
60 		return -ERANGE;
61 
62 	if (!section) {
63 		oobregion->offset = 0;
64 		if (mtd->oobsize == 16)
65 			oobregion->length = 4;
66 		else
67 			oobregion->length = 3;
68 	} else {
69 		if (mtd->oobsize == 8)
70 			return -ERANGE;
71 
72 		oobregion->offset = 6;
73 		oobregion->length = ecc->total - 4;
74 	}
75 
76 	return 0;
77 }
78 
79 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
80 				  struct mtd_oob_region *oobregion)
81 {
82 	if (section > 1)
83 		return -ERANGE;
84 
85 	if (mtd->oobsize == 16) {
86 		if (section)
87 			return -ERANGE;
88 
89 		oobregion->length = 8;
90 		oobregion->offset = 8;
91 	} else {
92 		oobregion->length = 2;
93 		if (!section)
94 			oobregion->offset = 3;
95 		else
96 			oobregion->offset = 6;
97 	}
98 
99 	return 0;
100 }
101 
102 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
103 	.ecc = nand_ooblayout_ecc_sp,
104 	.free = nand_ooblayout_free_sp,
105 };
106 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
107 
108 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
109 				 struct mtd_oob_region *oobregion)
110 {
111 	struct nand_chip *chip = mtd_to_nand(mtd);
112 	struct nand_ecc_ctrl *ecc = &chip->ecc;
113 
114 	if (section || !ecc->total)
115 		return -ERANGE;
116 
117 	oobregion->length = ecc->total;
118 	oobregion->offset = mtd->oobsize - oobregion->length;
119 
120 	return 0;
121 }
122 
123 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
124 				  struct mtd_oob_region *oobregion)
125 {
126 	struct nand_chip *chip = mtd_to_nand(mtd);
127 	struct nand_ecc_ctrl *ecc = &chip->ecc;
128 
129 	if (section)
130 		return -ERANGE;
131 
132 	oobregion->length = mtd->oobsize - ecc->total - 2;
133 	oobregion->offset = 2;
134 
135 	return 0;
136 }
137 
138 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
139 	.ecc = nand_ooblayout_ecc_lp,
140 	.free = nand_ooblayout_free_lp,
141 };
142 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
143 
144 /*
145  * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
146  * are placed at a fixed offset.
147  */
148 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
149 					 struct mtd_oob_region *oobregion)
150 {
151 	struct nand_chip *chip = mtd_to_nand(mtd);
152 	struct nand_ecc_ctrl *ecc = &chip->ecc;
153 
154 	if (section)
155 		return -ERANGE;
156 
157 	switch (mtd->oobsize) {
158 	case 64:
159 		oobregion->offset = 40;
160 		break;
161 	case 128:
162 		oobregion->offset = 80;
163 		break;
164 	default:
165 		return -EINVAL;
166 	}
167 
168 	oobregion->length = ecc->total;
169 	if (oobregion->offset + oobregion->length > mtd->oobsize)
170 		return -ERANGE;
171 
172 	return 0;
173 }
174 
175 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
176 					  struct mtd_oob_region *oobregion)
177 {
178 	struct nand_chip *chip = mtd_to_nand(mtd);
179 	struct nand_ecc_ctrl *ecc = &chip->ecc;
180 	int ecc_offset = 0;
181 
182 	if (section < 0 || section > 1)
183 		return -ERANGE;
184 
185 	switch (mtd->oobsize) {
186 	case 64:
187 		ecc_offset = 40;
188 		break;
189 	case 128:
190 		ecc_offset = 80;
191 		break;
192 	default:
193 		return -EINVAL;
194 	}
195 
196 	if (section == 0) {
197 		oobregion->offset = 2;
198 		oobregion->length = ecc_offset - 2;
199 	} else {
200 		oobregion->offset = ecc_offset + ecc->total;
201 		oobregion->length = mtd->oobsize - oobregion->offset;
202 	}
203 
204 	return 0;
205 }
206 
207 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
208 	.ecc = nand_ooblayout_ecc_lp_hamming,
209 	.free = nand_ooblayout_free_lp_hamming,
210 };
211 
212 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
213 {
214 	int ret = 0;
215 
216 	/* Start address must align on block boundary */
217 	if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
218 		pr_debug("%s: unaligned address\n", __func__);
219 		ret = -EINVAL;
220 	}
221 
222 	/* Length must align on block boundary */
223 	if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
224 		pr_debug("%s: length not block aligned\n", __func__);
225 		ret = -EINVAL;
226 	}
227 
228 	return ret;
229 }
230 
231 /**
232  * nand_select_target() - Select a NAND target (A.K.A. die)
233  * @chip: NAND chip object
234  * @cs: the CS line to select. Note that this CS id is always from the chip
235  *	PoV, not the controller one
236  *
237  * Select a NAND target so that further operations executed on @chip go to the
238  * selected NAND target.
239  */
240 void nand_select_target(struct nand_chip *chip, unsigned int cs)
241 {
242 	/*
243 	 * cs should always lie between 0 and chip->numchips, when that's not
244 	 * the case it's a bug and the caller should be fixed.
245 	 */
246 	if (WARN_ON(cs > chip->numchips))
247 		return;
248 
249 	chip->cur_cs = cs;
250 
251 	if (chip->legacy.select_chip)
252 		chip->legacy.select_chip(chip, cs);
253 }
254 EXPORT_SYMBOL_GPL(nand_select_target);
255 
256 /**
257  * nand_deselect_target() - Deselect the currently selected target
258  * @chip: NAND chip object
259  *
260  * Deselect the currently selected NAND target. The result of operations
261  * executed on @chip after the target has been deselected is undefined.
262  */
263 void nand_deselect_target(struct nand_chip *chip)
264 {
265 	if (chip->legacy.select_chip)
266 		chip->legacy.select_chip(chip, -1);
267 
268 	chip->cur_cs = -1;
269 }
270 EXPORT_SYMBOL_GPL(nand_deselect_target);
271 
272 /**
273  * nand_release_device - [GENERIC] release chip
274  * @chip: NAND chip object
275  *
276  * Release chip lock and wake up anyone waiting on the device.
277  */
278 static void nand_release_device(struct nand_chip *chip)
279 {
280 	/* Release the controller and the chip */
281 	mutex_unlock(&chip->controller->lock);
282 	mutex_unlock(&chip->lock);
283 }
284 
285 /**
286  * nand_block_bad - [DEFAULT] Read bad block marker from the chip
287  * @chip: NAND chip object
288  * @ofs: offset from device start
289  *
290  * Check, if the block is bad.
291  */
292 static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
293 {
294 	struct mtd_info *mtd = nand_to_mtd(chip);
295 	int page, page_end, res;
296 	u8 bad;
297 
298 	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
299 		ofs += mtd->erasesize - mtd->writesize;
300 
301 	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
302 	page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
303 
304 	for (; page < page_end; page++) {
305 		res = chip->ecc.read_oob(chip, page);
306 		if (res < 0)
307 			return res;
308 
309 		bad = chip->oob_poi[chip->badblockpos];
310 
311 		if (likely(chip->badblockbits == 8))
312 			res = bad != 0xFF;
313 		else
314 			res = hweight8(bad) < chip->badblockbits;
315 		if (res)
316 			return res;
317 	}
318 
319 	return 0;
320 }
321 
322 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
323 {
324 	if (chip->legacy.block_bad)
325 		return chip->legacy.block_bad(chip, ofs);
326 
327 	return nand_block_bad(chip, ofs);
328 }
329 
330 /**
331  * nand_get_device - [GENERIC] Get chip for selected access
332  * @chip: NAND chip structure
333  *
334  * Lock the device and its controller for exclusive access
335  *
336  * Return: -EBUSY if the chip has been suspended, 0 otherwise
337  */
338 static int nand_get_device(struct nand_chip *chip)
339 {
340 	mutex_lock(&chip->lock);
341 	if (chip->suspended) {
342 		mutex_unlock(&chip->lock);
343 		return -EBUSY;
344 	}
345 	mutex_lock(&chip->controller->lock);
346 
347 	return 0;
348 }
349 
350 /**
351  * nand_check_wp - [GENERIC] check if the chip is write protected
352  * @chip: NAND chip object
353  *
354  * Check, if the device is write protected. The function expects, that the
355  * device is already selected.
356  */
357 static int nand_check_wp(struct nand_chip *chip)
358 {
359 	u8 status;
360 	int ret;
361 
362 	/* Broken xD cards report WP despite being writable */
363 	if (chip->options & NAND_BROKEN_XD)
364 		return 0;
365 
366 	/* Check the WP bit */
367 	ret = nand_status_op(chip, &status);
368 	if (ret)
369 		return ret;
370 
371 	return status & NAND_STATUS_WP ? 0 : 1;
372 }
373 
374 /**
375  * nand_fill_oob - [INTERN] Transfer client buffer to oob
376  * @chip: NAND chip object
377  * @oob: oob data buffer
378  * @len: oob data write length
379  * @ops: oob ops structure
380  */
381 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
382 			      struct mtd_oob_ops *ops)
383 {
384 	struct mtd_info *mtd = nand_to_mtd(chip);
385 	int ret;
386 
387 	/*
388 	 * Initialise to all 0xFF, to avoid the possibility of left over OOB
389 	 * data from a previous OOB read.
390 	 */
391 	memset(chip->oob_poi, 0xff, mtd->oobsize);
392 
393 	switch (ops->mode) {
394 
395 	case MTD_OPS_PLACE_OOB:
396 	case MTD_OPS_RAW:
397 		memcpy(chip->oob_poi + ops->ooboffs, oob, len);
398 		return oob + len;
399 
400 	case MTD_OPS_AUTO_OOB:
401 		ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
402 						  ops->ooboffs, len);
403 		BUG_ON(ret);
404 		return oob + len;
405 
406 	default:
407 		BUG();
408 	}
409 	return NULL;
410 }
411 
412 /**
413  * nand_do_write_oob - [MTD Interface] NAND write out-of-band
414  * @chip: NAND chip object
415  * @to: offset to write to
416  * @ops: oob operation description structure
417  *
418  * NAND write out-of-band.
419  */
420 static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
421 			     struct mtd_oob_ops *ops)
422 {
423 	struct mtd_info *mtd = nand_to_mtd(chip);
424 	int chipnr, page, status, len, ret;
425 
426 	pr_debug("%s: to = 0x%08x, len = %i\n",
427 			 __func__, (unsigned int)to, (int)ops->ooblen);
428 
429 	len = mtd_oobavail(mtd, ops);
430 
431 	/* Do not allow write past end of page */
432 	if ((ops->ooboffs + ops->ooblen) > len) {
433 		pr_debug("%s: attempt to write past end of page\n",
434 				__func__);
435 		return -EINVAL;
436 	}
437 
438 	chipnr = (int)(to >> chip->chip_shift);
439 
440 	/*
441 	 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
442 	 * of my DiskOnChip 2000 test units) will clear the whole data page too
443 	 * if we don't do this. I have no clue why, but I seem to have 'fixed'
444 	 * it in the doc2000 driver in August 1999.  dwmw2.
445 	 */
446 	ret = nand_reset(chip, chipnr);
447 	if (ret)
448 		return ret;
449 
450 	nand_select_target(chip, chipnr);
451 
452 	/* Shift to get page */
453 	page = (int)(to >> chip->page_shift);
454 
455 	/* Check, if it is write protected */
456 	if (nand_check_wp(chip)) {
457 		nand_deselect_target(chip);
458 		return -EROFS;
459 	}
460 
461 	/* Invalidate the page cache, if we write to the cached page */
462 	if (page == chip->pagebuf)
463 		chip->pagebuf = -1;
464 
465 	nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
466 
467 	if (ops->mode == MTD_OPS_RAW)
468 		status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
469 	else
470 		status = chip->ecc.write_oob(chip, page & chip->pagemask);
471 
472 	nand_deselect_target(chip);
473 
474 	if (status)
475 		return status;
476 
477 	ops->oobretlen = ops->ooblen;
478 
479 	return 0;
480 }
481 
482 /**
483  * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
484  * @chip: NAND chip object
485  * @ofs: offset from device start
486  *
487  * This is the default implementation, which can be overridden by a hardware
488  * specific driver. It provides the details for writing a bad block marker to a
489  * block.
490  */
491 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
492 {
493 	struct mtd_info *mtd = nand_to_mtd(chip);
494 	struct mtd_oob_ops ops;
495 	uint8_t buf[2] = { 0, 0 };
496 	int ret = 0, res, i = 0;
497 
498 	memset(&ops, 0, sizeof(ops));
499 	ops.oobbuf = buf;
500 	ops.ooboffs = chip->badblockpos;
501 	if (chip->options & NAND_BUSWIDTH_16) {
502 		ops.ooboffs &= ~0x01;
503 		ops.len = ops.ooblen = 2;
504 	} else {
505 		ops.len = ops.ooblen = 1;
506 	}
507 	ops.mode = MTD_OPS_PLACE_OOB;
508 
509 	/* Write to first/last page(s) if necessary */
510 	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
511 		ofs += mtd->erasesize - mtd->writesize;
512 	do {
513 		res = nand_do_write_oob(chip, ofs, &ops);
514 		if (!ret)
515 			ret = res;
516 
517 		i++;
518 		ofs += mtd->writesize;
519 	} while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
520 
521 	return ret;
522 }
523 
524 /**
525  * nand_markbad_bbm - mark a block by updating the BBM
526  * @chip: NAND chip object
527  * @ofs: offset of the block to mark bad
528  */
529 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
530 {
531 	if (chip->legacy.block_markbad)
532 		return chip->legacy.block_markbad(chip, ofs);
533 
534 	return nand_default_block_markbad(chip, ofs);
535 }
536 
537 /**
538  * nand_block_markbad_lowlevel - mark a block bad
539  * @chip: NAND chip object
540  * @ofs: offset from device start
541  *
542  * This function performs the generic NAND bad block marking steps (i.e., bad
543  * block table(s) and/or marker(s)). We only allow the hardware driver to
544  * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
545  *
546  * We try operations in the following order:
547  *
548  *  (1) erase the affected block, to allow OOB marker to be written cleanly
549  *  (2) write bad block marker to OOB area of affected block (unless flag
550  *      NAND_BBT_NO_OOB_BBM is present)
551  *  (3) update the BBT
552  *
553  * Note that we retain the first error encountered in (2) or (3), finish the
554  * procedures, and dump the error in the end.
555 */
556 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
557 {
558 	struct mtd_info *mtd = nand_to_mtd(chip);
559 	int res, ret = 0;
560 
561 	if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
562 		struct erase_info einfo;
563 
564 		/* Attempt erase before marking OOB */
565 		memset(&einfo, 0, sizeof(einfo));
566 		einfo.addr = ofs;
567 		einfo.len = 1ULL << chip->phys_erase_shift;
568 		nand_erase_nand(chip, &einfo, 0);
569 
570 		/* Write bad block marker to OOB */
571 		ret = nand_get_device(chip);
572 		if (ret)
573 			return ret;
574 
575 		ret = nand_markbad_bbm(chip, ofs);
576 		nand_release_device(chip);
577 	}
578 
579 	/* Mark block bad in BBT */
580 	if (chip->bbt) {
581 		res = nand_markbad_bbt(chip, ofs);
582 		if (!ret)
583 			ret = res;
584 	}
585 
586 	if (!ret)
587 		mtd->ecc_stats.badblocks++;
588 
589 	return ret;
590 }
591 
592 /**
593  * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
594  * @mtd: MTD device structure
595  * @ofs: offset from device start
596  *
597  * Check if the block is marked as reserved.
598  */
599 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
600 {
601 	struct nand_chip *chip = mtd_to_nand(mtd);
602 
603 	if (!chip->bbt)
604 		return 0;
605 	/* Return info from the table */
606 	return nand_isreserved_bbt(chip, ofs);
607 }
608 
609 /**
610  * nand_block_checkbad - [GENERIC] Check if a block is marked bad
611  * @chip: NAND chip object
612  * @ofs: offset from device start
613  * @allowbbt: 1, if its allowed to access the bbt area
614  *
615  * Check, if the block is bad. Either by reading the bad block table or
616  * calling of the scan function.
617  */
618 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
619 {
620 	/* Return info from the table */
621 	if (chip->bbt)
622 		return nand_isbad_bbt(chip, ofs, allowbbt);
623 
624 	return nand_isbad_bbm(chip, ofs);
625 }
626 
627 /**
628  * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
629  * @chip: NAND chip structure
630  * @timeout_ms: Timeout in ms
631  *
632  * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
633  * If that does not happen whitin the specified timeout, -ETIMEDOUT is
634  * returned.
635  *
636  * This helper is intended to be used when the controller does not have access
637  * to the NAND R/B pin.
638  *
639  * Be aware that calling this helper from an ->exec_op() implementation means
640  * ->exec_op() must be re-entrant.
641  *
642  * Return 0 if the NAND chip is ready, a negative error otherwise.
643  */
644 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
645 {
646 	const struct nand_sdr_timings *timings;
647 	u8 status = 0;
648 	int ret;
649 
650 	if (!nand_has_exec_op(chip))
651 		return -ENOTSUPP;
652 
653 	/* Wait tWB before polling the STATUS reg. */
654 	timings = nand_get_sdr_timings(&chip->data_interface);
655 	ndelay(PSEC_TO_NSEC(timings->tWB_max));
656 
657 	ret = nand_status_op(chip, NULL);
658 	if (ret)
659 		return ret;
660 
661 	timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
662 	do {
663 		ret = nand_read_data_op(chip, &status, sizeof(status), true);
664 		if (ret)
665 			break;
666 
667 		if (status & NAND_STATUS_READY)
668 			break;
669 
670 		/*
671 		 * Typical lowest execution time for a tR on most NANDs is 10us,
672 		 * use this as polling delay before doing something smarter (ie.
673 		 * deriving a delay from the timeout value, timeout_ms/ratio).
674 		 */
675 		udelay(10);
676 	} while	(time_before(jiffies, timeout_ms));
677 
678 	/*
679 	 * We have to exit READ_STATUS mode in order to read real data on the
680 	 * bus in case the WAITRDY instruction is preceding a DATA_IN
681 	 * instruction.
682 	 */
683 	nand_exit_status_op(chip);
684 
685 	if (ret)
686 		return ret;
687 
688 	return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
689 };
690 EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
691 
692 /**
693  * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
694  * @chip: NAND chip structure
695  * @gpiod: GPIO descriptor of R/B pin
696  * @timeout_ms: Timeout in ms
697  *
698  * Poll the R/B GPIO pin until it becomes ready. If that does not happen
699  * whitin the specified timeout, -ETIMEDOUT is returned.
700  *
701  * This helper is intended to be used when the controller has access to the
702  * NAND R/B pin over GPIO.
703  *
704  * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
705  */
706 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
707 		      unsigned long timeout_ms)
708 {
709 	/* Wait until R/B pin indicates chip is ready or timeout occurs */
710 	timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
711 	do {
712 		if (gpiod_get_value_cansleep(gpiod))
713 			return 0;
714 
715 		cond_resched();
716 	} while	(time_before(jiffies, timeout_ms));
717 
718 	return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
719 };
720 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
721 
722 /**
723  * panic_nand_wait - [GENERIC] wait until the command is done
724  * @chip: NAND chip structure
725  * @timeo: timeout
726  *
727  * Wait for command done. This is a helper function for nand_wait used when
728  * we are in interrupt context. May happen when in panic and trying to write
729  * an oops through mtdoops.
730  */
731 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
732 {
733 	int i;
734 	for (i = 0; i < timeo; i++) {
735 		if (chip->legacy.dev_ready) {
736 			if (chip->legacy.dev_ready(chip))
737 				break;
738 		} else {
739 			int ret;
740 			u8 status;
741 
742 			ret = nand_read_data_op(chip, &status, sizeof(status),
743 						true);
744 			if (ret)
745 				return;
746 
747 			if (status & NAND_STATUS_READY)
748 				break;
749 		}
750 		mdelay(1);
751 	}
752 }
753 
754 static bool nand_supports_get_features(struct nand_chip *chip, int addr)
755 {
756 	return (chip->parameters.supports_set_get_features &&
757 		test_bit(addr, chip->parameters.get_feature_list));
758 }
759 
760 static bool nand_supports_set_features(struct nand_chip *chip, int addr)
761 {
762 	return (chip->parameters.supports_set_get_features &&
763 		test_bit(addr, chip->parameters.set_feature_list));
764 }
765 
766 /**
767  * nand_reset_data_interface - Reset data interface and timings
768  * @chip: The NAND chip
769  * @chipnr: Internal die id
770  *
771  * Reset the Data interface and timings to ONFI mode 0.
772  *
773  * Returns 0 for success or negative error code otherwise.
774  */
775 static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
776 {
777 	int ret;
778 
779 	if (!nand_has_setup_data_iface(chip))
780 		return 0;
781 
782 	/*
783 	 * The ONFI specification says:
784 	 * "
785 	 * To transition from NV-DDR or NV-DDR2 to the SDR data
786 	 * interface, the host shall use the Reset (FFh) command
787 	 * using SDR timing mode 0. A device in any timing mode is
788 	 * required to recognize Reset (FFh) command issued in SDR
789 	 * timing mode 0.
790 	 * "
791 	 *
792 	 * Configure the data interface in SDR mode and set the
793 	 * timings to timing mode 0.
794 	 */
795 
796 	onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
797 	ret = chip->controller->ops->setup_data_interface(chip, chipnr,
798 							&chip->data_interface);
799 	if (ret)
800 		pr_err("Failed to configure data interface to SDR timing mode 0\n");
801 
802 	return ret;
803 }
804 
805 /**
806  * nand_setup_data_interface - Setup the best data interface and timings
807  * @chip: The NAND chip
808  * @chipnr: Internal die id
809  *
810  * Find and configure the best data interface and NAND timings supported by
811  * the chip and the driver.
812  * First tries to retrieve supported timing modes from ONFI information,
813  * and if the NAND chip does not support ONFI, relies on the
814  * ->onfi_timing_mode_default specified in the nand_ids table.
815  *
816  * Returns 0 for success or negative error code otherwise.
817  */
818 static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
819 {
820 	u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
821 		chip->onfi_timing_mode_default,
822 	};
823 	int ret;
824 
825 	if (!nand_has_setup_data_iface(chip))
826 		return 0;
827 
828 	/* Change the mode on the chip side (if supported by the NAND chip) */
829 	if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
830 		nand_select_target(chip, chipnr);
831 		ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
832 					tmode_param);
833 		nand_deselect_target(chip);
834 		if (ret)
835 			return ret;
836 	}
837 
838 	/* Change the mode on the controller side */
839 	ret = chip->controller->ops->setup_data_interface(chip, chipnr,
840 							&chip->data_interface);
841 	if (ret)
842 		return ret;
843 
844 	/* Check the mode has been accepted by the chip, if supported */
845 	if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
846 		return 0;
847 
848 	memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
849 	nand_select_target(chip, chipnr);
850 	ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
851 				tmode_param);
852 	nand_deselect_target(chip);
853 	if (ret)
854 		goto err_reset_chip;
855 
856 	if (tmode_param[0] != chip->onfi_timing_mode_default) {
857 		pr_warn("timing mode %d not acknowledged by the NAND chip\n",
858 			chip->onfi_timing_mode_default);
859 		goto err_reset_chip;
860 	}
861 
862 	return 0;
863 
864 err_reset_chip:
865 	/*
866 	 * Fallback to mode 0 if the chip explicitly did not ack the chosen
867 	 * timing mode.
868 	 */
869 	nand_reset_data_interface(chip, chipnr);
870 	nand_select_target(chip, chipnr);
871 	nand_reset_op(chip);
872 	nand_deselect_target(chip);
873 
874 	return ret;
875 }
876 
877 /**
878  * nand_init_data_interface - find the best data interface and timings
879  * @chip: The NAND chip
880  *
881  * Find the best data interface and NAND timings supported by the chip
882  * and the driver.
883  * First tries to retrieve supported timing modes from ONFI information,
884  * and if the NAND chip does not support ONFI, relies on the
885  * ->onfi_timing_mode_default specified in the nand_ids table. After this
886  * function nand_chip->data_interface is initialized with the best timing mode
887  * available.
888  *
889  * Returns 0 for success or negative error code otherwise.
890  */
891 static int nand_init_data_interface(struct nand_chip *chip)
892 {
893 	int modes, mode, ret;
894 
895 	if (!nand_has_setup_data_iface(chip))
896 		return 0;
897 
898 	/*
899 	 * First try to identify the best timings from ONFI parameters and
900 	 * if the NAND does not support ONFI, fallback to the default ONFI
901 	 * timing mode.
902 	 */
903 	if (chip->parameters.onfi) {
904 		modes = chip->parameters.onfi->async_timing_mode;
905 	} else {
906 		if (!chip->onfi_timing_mode_default)
907 			return 0;
908 
909 		modes = GENMASK(chip->onfi_timing_mode_default, 0);
910 	}
911 
912 	for (mode = fls(modes) - 1; mode >= 0; mode--) {
913 		ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
914 		if (ret)
915 			continue;
916 
917 		/*
918 		 * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
919 		 * controller supports the requested timings.
920 		 */
921 		ret = chip->controller->ops->setup_data_interface(chip,
922 						 NAND_DATA_IFACE_CHECK_ONLY,
923 						 &chip->data_interface);
924 		if (!ret) {
925 			chip->onfi_timing_mode_default = mode;
926 			break;
927 		}
928 	}
929 
930 	return 0;
931 }
932 
933 /**
934  * nand_fill_column_cycles - fill the column cycles of an address
935  * @chip: The NAND chip
936  * @addrs: Array of address cycles to fill
937  * @offset_in_page: The offset in the page
938  *
939  * Fills the first or the first two bytes of the @addrs field depending
940  * on the NAND bus width and the page size.
941  *
942  * Returns the number of cycles needed to encode the column, or a negative
943  * error code in case one of the arguments is invalid.
944  */
945 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
946 				   unsigned int offset_in_page)
947 {
948 	struct mtd_info *mtd = nand_to_mtd(chip);
949 
950 	/* Make sure the offset is less than the actual page size. */
951 	if (offset_in_page > mtd->writesize + mtd->oobsize)
952 		return -EINVAL;
953 
954 	/*
955 	 * On small page NANDs, there's a dedicated command to access the OOB
956 	 * area, and the column address is relative to the start of the OOB
957 	 * area, not the start of the page. Asjust the address accordingly.
958 	 */
959 	if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
960 		offset_in_page -= mtd->writesize;
961 
962 	/*
963 	 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
964 	 * wide, then it must be divided by 2.
965 	 */
966 	if (chip->options & NAND_BUSWIDTH_16) {
967 		if (WARN_ON(offset_in_page % 2))
968 			return -EINVAL;
969 
970 		offset_in_page /= 2;
971 	}
972 
973 	addrs[0] = offset_in_page;
974 
975 	/*
976 	 * Small page NANDs use 1 cycle for the columns, while large page NANDs
977 	 * need 2
978 	 */
979 	if (mtd->writesize <= 512)
980 		return 1;
981 
982 	addrs[1] = offset_in_page >> 8;
983 
984 	return 2;
985 }
986 
987 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
988 				     unsigned int offset_in_page, void *buf,
989 				     unsigned int len)
990 {
991 	struct mtd_info *mtd = nand_to_mtd(chip);
992 	const struct nand_sdr_timings *sdr =
993 		nand_get_sdr_timings(&chip->data_interface);
994 	u8 addrs[4];
995 	struct nand_op_instr instrs[] = {
996 		NAND_OP_CMD(NAND_CMD_READ0, 0),
997 		NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
998 		NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
999 				 PSEC_TO_NSEC(sdr->tRR_min)),
1000 		NAND_OP_DATA_IN(len, buf, 0),
1001 	};
1002 	struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1003 	int ret;
1004 
1005 	/* Drop the DATA_IN instruction if len is set to 0. */
1006 	if (!len)
1007 		op.ninstrs--;
1008 
1009 	if (offset_in_page >= mtd->writesize)
1010 		instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1011 	else if (offset_in_page >= 256 &&
1012 		 !(chip->options & NAND_BUSWIDTH_16))
1013 		instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1014 
1015 	ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1016 	if (ret < 0)
1017 		return ret;
1018 
1019 	addrs[1] = page;
1020 	addrs[2] = page >> 8;
1021 
1022 	if (chip->options & NAND_ROW_ADDR_3) {
1023 		addrs[3] = page >> 16;
1024 		instrs[1].ctx.addr.naddrs++;
1025 	}
1026 
1027 	return nand_exec_op(chip, &op);
1028 }
1029 
1030 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1031 				     unsigned int offset_in_page, void *buf,
1032 				     unsigned int len)
1033 {
1034 	const struct nand_sdr_timings *sdr =
1035 		nand_get_sdr_timings(&chip->data_interface);
1036 	u8 addrs[5];
1037 	struct nand_op_instr instrs[] = {
1038 		NAND_OP_CMD(NAND_CMD_READ0, 0),
1039 		NAND_OP_ADDR(4, addrs, 0),
1040 		NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1041 		NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1042 				 PSEC_TO_NSEC(sdr->tRR_min)),
1043 		NAND_OP_DATA_IN(len, buf, 0),
1044 	};
1045 	struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1046 	int ret;
1047 
1048 	/* Drop the DATA_IN instruction if len is set to 0. */
1049 	if (!len)
1050 		op.ninstrs--;
1051 
1052 	ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1053 	if (ret < 0)
1054 		return ret;
1055 
1056 	addrs[2] = page;
1057 	addrs[3] = page >> 8;
1058 
1059 	if (chip->options & NAND_ROW_ADDR_3) {
1060 		addrs[4] = page >> 16;
1061 		instrs[1].ctx.addr.naddrs++;
1062 	}
1063 
1064 	return nand_exec_op(chip, &op);
1065 }
1066 
1067 /**
1068  * nand_read_page_op - Do a READ PAGE operation
1069  * @chip: The NAND chip
1070  * @page: page to read
1071  * @offset_in_page: offset within the page
1072  * @buf: buffer used to store the data
1073  * @len: length of the buffer
1074  *
1075  * This function issues a READ PAGE operation.
1076  * This function does not select/unselect the CS line.
1077  *
1078  * Returns 0 on success, a negative error code otherwise.
1079  */
1080 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1081 		      unsigned int offset_in_page, void *buf, unsigned int len)
1082 {
1083 	struct mtd_info *mtd = nand_to_mtd(chip);
1084 
1085 	if (len && !buf)
1086 		return -EINVAL;
1087 
1088 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1089 		return -EINVAL;
1090 
1091 	if (nand_has_exec_op(chip)) {
1092 		if (mtd->writesize > 512)
1093 			return nand_lp_exec_read_page_op(chip, page,
1094 							 offset_in_page, buf,
1095 							 len);
1096 
1097 		return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1098 						 buf, len);
1099 	}
1100 
1101 	chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1102 	if (len)
1103 		chip->legacy.read_buf(chip, buf, len);
1104 
1105 	return 0;
1106 }
1107 EXPORT_SYMBOL_GPL(nand_read_page_op);
1108 
1109 /**
1110  * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1111  * @chip: The NAND chip
1112  * @page: parameter page to read
1113  * @buf: buffer used to store the data
1114  * @len: length of the buffer
1115  *
1116  * This function issues a READ PARAMETER PAGE operation.
1117  * This function does not select/unselect the CS line.
1118  *
1119  * Returns 0 on success, a negative error code otherwise.
1120  */
1121 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1122 			    unsigned int len)
1123 {
1124 	unsigned int i;
1125 	u8 *p = buf;
1126 
1127 	if (len && !buf)
1128 		return -EINVAL;
1129 
1130 	if (nand_has_exec_op(chip)) {
1131 		const struct nand_sdr_timings *sdr =
1132 			nand_get_sdr_timings(&chip->data_interface);
1133 		struct nand_op_instr instrs[] = {
1134 			NAND_OP_CMD(NAND_CMD_PARAM, 0),
1135 			NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1136 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1137 					 PSEC_TO_NSEC(sdr->tRR_min)),
1138 			NAND_OP_8BIT_DATA_IN(len, buf, 0),
1139 		};
1140 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1141 
1142 		/* Drop the DATA_IN instruction if len is set to 0. */
1143 		if (!len)
1144 			op.ninstrs--;
1145 
1146 		return nand_exec_op(chip, &op);
1147 	}
1148 
1149 	chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1150 	for (i = 0; i < len; i++)
1151 		p[i] = chip->legacy.read_byte(chip);
1152 
1153 	return 0;
1154 }
1155 
1156 /**
1157  * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1158  * @chip: The NAND chip
1159  * @offset_in_page: offset within the page
1160  * @buf: buffer used to store the data
1161  * @len: length of the buffer
1162  * @force_8bit: force 8-bit bus access
1163  *
1164  * This function issues a CHANGE READ COLUMN operation.
1165  * This function does not select/unselect the CS line.
1166  *
1167  * Returns 0 on success, a negative error code otherwise.
1168  */
1169 int nand_change_read_column_op(struct nand_chip *chip,
1170 			       unsigned int offset_in_page, void *buf,
1171 			       unsigned int len, bool force_8bit)
1172 {
1173 	struct mtd_info *mtd = nand_to_mtd(chip);
1174 
1175 	if (len && !buf)
1176 		return -EINVAL;
1177 
1178 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1179 		return -EINVAL;
1180 
1181 	/* Small page NANDs do not support column change. */
1182 	if (mtd->writesize <= 512)
1183 		return -ENOTSUPP;
1184 
1185 	if (nand_has_exec_op(chip)) {
1186 		const struct nand_sdr_timings *sdr =
1187 			nand_get_sdr_timings(&chip->data_interface);
1188 		u8 addrs[2] = {};
1189 		struct nand_op_instr instrs[] = {
1190 			NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1191 			NAND_OP_ADDR(2, addrs, 0),
1192 			NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1193 				    PSEC_TO_NSEC(sdr->tCCS_min)),
1194 			NAND_OP_DATA_IN(len, buf, 0),
1195 		};
1196 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1197 		int ret;
1198 
1199 		ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1200 		if (ret < 0)
1201 			return ret;
1202 
1203 		/* Drop the DATA_IN instruction if len is set to 0. */
1204 		if (!len)
1205 			op.ninstrs--;
1206 
1207 		instrs[3].ctx.data.force_8bit = force_8bit;
1208 
1209 		return nand_exec_op(chip, &op);
1210 	}
1211 
1212 	chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1213 	if (len)
1214 		chip->legacy.read_buf(chip, buf, len);
1215 
1216 	return 0;
1217 }
1218 EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1219 
1220 /**
1221  * nand_read_oob_op - Do a READ OOB operation
1222  * @chip: The NAND chip
1223  * @page: page to read
1224  * @offset_in_oob: offset within the OOB area
1225  * @buf: buffer used to store the data
1226  * @len: length of the buffer
1227  *
1228  * This function issues a READ OOB operation.
1229  * This function does not select/unselect the CS line.
1230  *
1231  * Returns 0 on success, a negative error code otherwise.
1232  */
1233 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1234 		     unsigned int offset_in_oob, void *buf, unsigned int len)
1235 {
1236 	struct mtd_info *mtd = nand_to_mtd(chip);
1237 
1238 	if (len && !buf)
1239 		return -EINVAL;
1240 
1241 	if (offset_in_oob + len > mtd->oobsize)
1242 		return -EINVAL;
1243 
1244 	if (nand_has_exec_op(chip))
1245 		return nand_read_page_op(chip, page,
1246 					 mtd->writesize + offset_in_oob,
1247 					 buf, len);
1248 
1249 	chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1250 	if (len)
1251 		chip->legacy.read_buf(chip, buf, len);
1252 
1253 	return 0;
1254 }
1255 EXPORT_SYMBOL_GPL(nand_read_oob_op);
1256 
1257 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1258 				  unsigned int offset_in_page, const void *buf,
1259 				  unsigned int len, bool prog)
1260 {
1261 	struct mtd_info *mtd = nand_to_mtd(chip);
1262 	const struct nand_sdr_timings *sdr =
1263 		nand_get_sdr_timings(&chip->data_interface);
1264 	u8 addrs[5] = {};
1265 	struct nand_op_instr instrs[] = {
1266 		/*
1267 		 * The first instruction will be dropped if we're dealing
1268 		 * with a large page NAND and adjusted if we're dealing
1269 		 * with a small page NAND and the page offset is > 255.
1270 		 */
1271 		NAND_OP_CMD(NAND_CMD_READ0, 0),
1272 		NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1273 		NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1274 		NAND_OP_DATA_OUT(len, buf, 0),
1275 		NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1276 		NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1277 	};
1278 	struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1279 	int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1280 	int ret;
1281 	u8 status;
1282 
1283 	if (naddrs < 0)
1284 		return naddrs;
1285 
1286 	addrs[naddrs++] = page;
1287 	addrs[naddrs++] = page >> 8;
1288 	if (chip->options & NAND_ROW_ADDR_3)
1289 		addrs[naddrs++] = page >> 16;
1290 
1291 	instrs[2].ctx.addr.naddrs = naddrs;
1292 
1293 	/* Drop the last two instructions if we're not programming the page. */
1294 	if (!prog) {
1295 		op.ninstrs -= 2;
1296 		/* Also drop the DATA_OUT instruction if empty. */
1297 		if (!len)
1298 			op.ninstrs--;
1299 	}
1300 
1301 	if (mtd->writesize <= 512) {
1302 		/*
1303 		 * Small pages need some more tweaking: we have to adjust the
1304 		 * first instruction depending on the page offset we're trying
1305 		 * to access.
1306 		 */
1307 		if (offset_in_page >= mtd->writesize)
1308 			instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1309 		else if (offset_in_page >= 256 &&
1310 			 !(chip->options & NAND_BUSWIDTH_16))
1311 			instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1312 	} else {
1313 		/*
1314 		 * Drop the first command if we're dealing with a large page
1315 		 * NAND.
1316 		 */
1317 		op.instrs++;
1318 		op.ninstrs--;
1319 	}
1320 
1321 	ret = nand_exec_op(chip, &op);
1322 	if (!prog || ret)
1323 		return ret;
1324 
1325 	ret = nand_status_op(chip, &status);
1326 	if (ret)
1327 		return ret;
1328 
1329 	return status;
1330 }
1331 
1332 /**
1333  * nand_prog_page_begin_op - starts a PROG PAGE operation
1334  * @chip: The NAND chip
1335  * @page: page to write
1336  * @offset_in_page: offset within the page
1337  * @buf: buffer containing the data to write to the page
1338  * @len: length of the buffer
1339  *
1340  * This function issues the first half of a PROG PAGE operation.
1341  * This function does not select/unselect the CS line.
1342  *
1343  * Returns 0 on success, a negative error code otherwise.
1344  */
1345 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1346 			    unsigned int offset_in_page, const void *buf,
1347 			    unsigned int len)
1348 {
1349 	struct mtd_info *mtd = nand_to_mtd(chip);
1350 
1351 	if (len && !buf)
1352 		return -EINVAL;
1353 
1354 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1355 		return -EINVAL;
1356 
1357 	if (nand_has_exec_op(chip))
1358 		return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1359 					      len, false);
1360 
1361 	chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1362 
1363 	if (buf)
1364 		chip->legacy.write_buf(chip, buf, len);
1365 
1366 	return 0;
1367 }
1368 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1369 
1370 /**
1371  * nand_prog_page_end_op - ends a PROG PAGE operation
1372  * @chip: The NAND chip
1373  *
1374  * This function issues the second half of a PROG PAGE operation.
1375  * This function does not select/unselect the CS line.
1376  *
1377  * Returns 0 on success, a negative error code otherwise.
1378  */
1379 int nand_prog_page_end_op(struct nand_chip *chip)
1380 {
1381 	int ret;
1382 	u8 status;
1383 
1384 	if (nand_has_exec_op(chip)) {
1385 		const struct nand_sdr_timings *sdr =
1386 			nand_get_sdr_timings(&chip->data_interface);
1387 		struct nand_op_instr instrs[] = {
1388 			NAND_OP_CMD(NAND_CMD_PAGEPROG,
1389 				    PSEC_TO_NSEC(sdr->tWB_max)),
1390 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1391 		};
1392 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1393 
1394 		ret = nand_exec_op(chip, &op);
1395 		if (ret)
1396 			return ret;
1397 
1398 		ret = nand_status_op(chip, &status);
1399 		if (ret)
1400 			return ret;
1401 	} else {
1402 		chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1403 		ret = chip->legacy.waitfunc(chip);
1404 		if (ret < 0)
1405 			return ret;
1406 
1407 		status = ret;
1408 	}
1409 
1410 	if (status & NAND_STATUS_FAIL)
1411 		return -EIO;
1412 
1413 	return 0;
1414 }
1415 EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1416 
1417 /**
1418  * nand_prog_page_op - Do a full PROG PAGE operation
1419  * @chip: The NAND chip
1420  * @page: page to write
1421  * @offset_in_page: offset within the page
1422  * @buf: buffer containing the data to write to the page
1423  * @len: length of the buffer
1424  *
1425  * This function issues a full PROG PAGE operation.
1426  * This function does not select/unselect the CS line.
1427  *
1428  * Returns 0 on success, a negative error code otherwise.
1429  */
1430 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1431 		      unsigned int offset_in_page, const void *buf,
1432 		      unsigned int len)
1433 {
1434 	struct mtd_info *mtd = nand_to_mtd(chip);
1435 	int status;
1436 
1437 	if (!len || !buf)
1438 		return -EINVAL;
1439 
1440 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1441 		return -EINVAL;
1442 
1443 	if (nand_has_exec_op(chip)) {
1444 		status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1445 						len, true);
1446 	} else {
1447 		chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1448 				     page);
1449 		chip->legacy.write_buf(chip, buf, len);
1450 		chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1451 		status = chip->legacy.waitfunc(chip);
1452 	}
1453 
1454 	if (status & NAND_STATUS_FAIL)
1455 		return -EIO;
1456 
1457 	return 0;
1458 }
1459 EXPORT_SYMBOL_GPL(nand_prog_page_op);
1460 
1461 /**
1462  * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1463  * @chip: The NAND chip
1464  * @offset_in_page: offset within the page
1465  * @buf: buffer containing the data to send to the NAND
1466  * @len: length of the buffer
1467  * @force_8bit: force 8-bit bus access
1468  *
1469  * This function issues a CHANGE WRITE COLUMN operation.
1470  * This function does not select/unselect the CS line.
1471  *
1472  * Returns 0 on success, a negative error code otherwise.
1473  */
1474 int nand_change_write_column_op(struct nand_chip *chip,
1475 				unsigned int offset_in_page,
1476 				const void *buf, unsigned int len,
1477 				bool force_8bit)
1478 {
1479 	struct mtd_info *mtd = nand_to_mtd(chip);
1480 
1481 	if (len && !buf)
1482 		return -EINVAL;
1483 
1484 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1485 		return -EINVAL;
1486 
1487 	/* Small page NANDs do not support column change. */
1488 	if (mtd->writesize <= 512)
1489 		return -ENOTSUPP;
1490 
1491 	if (nand_has_exec_op(chip)) {
1492 		const struct nand_sdr_timings *sdr =
1493 			nand_get_sdr_timings(&chip->data_interface);
1494 		u8 addrs[2];
1495 		struct nand_op_instr instrs[] = {
1496 			NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1497 			NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1498 			NAND_OP_DATA_OUT(len, buf, 0),
1499 		};
1500 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1501 		int ret;
1502 
1503 		ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1504 		if (ret < 0)
1505 			return ret;
1506 
1507 		instrs[2].ctx.data.force_8bit = force_8bit;
1508 
1509 		/* Drop the DATA_OUT instruction if len is set to 0. */
1510 		if (!len)
1511 			op.ninstrs--;
1512 
1513 		return nand_exec_op(chip, &op);
1514 	}
1515 
1516 	chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1517 	if (len)
1518 		chip->legacy.write_buf(chip, buf, len);
1519 
1520 	return 0;
1521 }
1522 EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1523 
1524 /**
1525  * nand_readid_op - Do a READID operation
1526  * @chip: The NAND chip
1527  * @addr: address cycle to pass after the READID command
1528  * @buf: buffer used to store the ID
1529  * @len: length of the buffer
1530  *
1531  * This function sends a READID command and reads back the ID returned by the
1532  * NAND.
1533  * This function does not select/unselect the CS line.
1534  *
1535  * Returns 0 on success, a negative error code otherwise.
1536  */
1537 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1538 		   unsigned int len)
1539 {
1540 	unsigned int i;
1541 	u8 *id = buf;
1542 
1543 	if (len && !buf)
1544 		return -EINVAL;
1545 
1546 	if (nand_has_exec_op(chip)) {
1547 		const struct nand_sdr_timings *sdr =
1548 			nand_get_sdr_timings(&chip->data_interface);
1549 		struct nand_op_instr instrs[] = {
1550 			NAND_OP_CMD(NAND_CMD_READID, 0),
1551 			NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
1552 			NAND_OP_8BIT_DATA_IN(len, buf, 0),
1553 		};
1554 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1555 
1556 		/* Drop the DATA_IN instruction if len is set to 0. */
1557 		if (!len)
1558 			op.ninstrs--;
1559 
1560 		return nand_exec_op(chip, &op);
1561 	}
1562 
1563 	chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1564 
1565 	for (i = 0; i < len; i++)
1566 		id[i] = chip->legacy.read_byte(chip);
1567 
1568 	return 0;
1569 }
1570 EXPORT_SYMBOL_GPL(nand_readid_op);
1571 
1572 /**
1573  * nand_status_op - Do a STATUS operation
1574  * @chip: The NAND chip
1575  * @status: out variable to store the NAND status
1576  *
1577  * This function sends a STATUS command and reads back the status returned by
1578  * the NAND.
1579  * This function does not select/unselect the CS line.
1580  *
1581  * Returns 0 on success, a negative error code otherwise.
1582  */
1583 int nand_status_op(struct nand_chip *chip, u8 *status)
1584 {
1585 	if (nand_has_exec_op(chip)) {
1586 		const struct nand_sdr_timings *sdr =
1587 			nand_get_sdr_timings(&chip->data_interface);
1588 		struct nand_op_instr instrs[] = {
1589 			NAND_OP_CMD(NAND_CMD_STATUS,
1590 				    PSEC_TO_NSEC(sdr->tADL_min)),
1591 			NAND_OP_8BIT_DATA_IN(1, status, 0),
1592 		};
1593 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1594 
1595 		if (!status)
1596 			op.ninstrs--;
1597 
1598 		return nand_exec_op(chip, &op);
1599 	}
1600 
1601 	chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1602 	if (status)
1603 		*status = chip->legacy.read_byte(chip);
1604 
1605 	return 0;
1606 }
1607 EXPORT_SYMBOL_GPL(nand_status_op);
1608 
1609 /**
1610  * nand_exit_status_op - Exit a STATUS operation
1611  * @chip: The NAND chip
1612  *
1613  * This function sends a READ0 command to cancel the effect of the STATUS
1614  * command to avoid reading only the status until a new read command is sent.
1615  *
1616  * This function does not select/unselect the CS line.
1617  *
1618  * Returns 0 on success, a negative error code otherwise.
1619  */
1620 int nand_exit_status_op(struct nand_chip *chip)
1621 {
1622 	if (nand_has_exec_op(chip)) {
1623 		struct nand_op_instr instrs[] = {
1624 			NAND_OP_CMD(NAND_CMD_READ0, 0),
1625 		};
1626 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1627 
1628 		return nand_exec_op(chip, &op);
1629 	}
1630 
1631 	chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1632 
1633 	return 0;
1634 }
1635 
1636 /**
1637  * nand_erase_op - Do an erase operation
1638  * @chip: The NAND chip
1639  * @eraseblock: block to erase
1640  *
1641  * This function sends an ERASE command and waits for the NAND to be ready
1642  * before returning.
1643  * This function does not select/unselect the CS line.
1644  *
1645  * Returns 0 on success, a negative error code otherwise.
1646  */
1647 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1648 {
1649 	unsigned int page = eraseblock <<
1650 			    (chip->phys_erase_shift - chip->page_shift);
1651 	int ret;
1652 	u8 status;
1653 
1654 	if (nand_has_exec_op(chip)) {
1655 		const struct nand_sdr_timings *sdr =
1656 			nand_get_sdr_timings(&chip->data_interface);
1657 		u8 addrs[3] = {	page, page >> 8, page >> 16 };
1658 		struct nand_op_instr instrs[] = {
1659 			NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1660 			NAND_OP_ADDR(2, addrs, 0),
1661 			NAND_OP_CMD(NAND_CMD_ERASE2,
1662 				    PSEC_TO_MSEC(sdr->tWB_max)),
1663 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
1664 		};
1665 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1666 
1667 		if (chip->options & NAND_ROW_ADDR_3)
1668 			instrs[1].ctx.addr.naddrs++;
1669 
1670 		ret = nand_exec_op(chip, &op);
1671 		if (ret)
1672 			return ret;
1673 
1674 		ret = nand_status_op(chip, &status);
1675 		if (ret)
1676 			return ret;
1677 	} else {
1678 		chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1679 		chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1680 
1681 		ret = chip->legacy.waitfunc(chip);
1682 		if (ret < 0)
1683 			return ret;
1684 
1685 		status = ret;
1686 	}
1687 
1688 	if (status & NAND_STATUS_FAIL)
1689 		return -EIO;
1690 
1691 	return 0;
1692 }
1693 EXPORT_SYMBOL_GPL(nand_erase_op);
1694 
1695 /**
1696  * nand_set_features_op - Do a SET FEATURES operation
1697  * @chip: The NAND chip
1698  * @feature: feature id
1699  * @data: 4 bytes of data
1700  *
1701  * This function sends a SET FEATURES command and waits for the NAND to be
1702  * ready before returning.
1703  * This function does not select/unselect the CS line.
1704  *
1705  * Returns 0 on success, a negative error code otherwise.
1706  */
1707 static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1708 				const void *data)
1709 {
1710 	const u8 *params = data;
1711 	int i, ret;
1712 
1713 	if (nand_has_exec_op(chip)) {
1714 		const struct nand_sdr_timings *sdr =
1715 			nand_get_sdr_timings(&chip->data_interface);
1716 		struct nand_op_instr instrs[] = {
1717 			NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1718 			NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
1719 			NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1720 					      PSEC_TO_NSEC(sdr->tWB_max)),
1721 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
1722 		};
1723 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1724 
1725 		return nand_exec_op(chip, &op);
1726 	}
1727 
1728 	chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1729 	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1730 		chip->legacy.write_byte(chip, params[i]);
1731 
1732 	ret = chip->legacy.waitfunc(chip);
1733 	if (ret < 0)
1734 		return ret;
1735 
1736 	if (ret & NAND_STATUS_FAIL)
1737 		return -EIO;
1738 
1739 	return 0;
1740 }
1741 
1742 /**
1743  * nand_get_features_op - Do a GET FEATURES operation
1744  * @chip: The NAND chip
1745  * @feature: feature id
1746  * @data: 4 bytes of data
1747  *
1748  * This function sends a GET FEATURES command and waits for the NAND to be
1749  * ready before returning.
1750  * This function does not select/unselect the CS line.
1751  *
1752  * Returns 0 on success, a negative error code otherwise.
1753  */
1754 static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1755 				void *data)
1756 {
1757 	u8 *params = data;
1758 	int i;
1759 
1760 	if (nand_has_exec_op(chip)) {
1761 		const struct nand_sdr_timings *sdr =
1762 			nand_get_sdr_timings(&chip->data_interface);
1763 		struct nand_op_instr instrs[] = {
1764 			NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1765 			NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
1766 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
1767 					 PSEC_TO_NSEC(sdr->tRR_min)),
1768 			NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1769 					     data, 0),
1770 		};
1771 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1772 
1773 		return nand_exec_op(chip, &op);
1774 	}
1775 
1776 	chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1777 	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1778 		params[i] = chip->legacy.read_byte(chip);
1779 
1780 	return 0;
1781 }
1782 
1783 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1784 			    unsigned int delay_ns)
1785 {
1786 	if (nand_has_exec_op(chip)) {
1787 		struct nand_op_instr instrs[] = {
1788 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1789 					 PSEC_TO_NSEC(delay_ns)),
1790 		};
1791 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1792 
1793 		return nand_exec_op(chip, &op);
1794 	}
1795 
1796 	/* Apply delay or wait for ready/busy pin */
1797 	if (!chip->legacy.dev_ready)
1798 		udelay(chip->legacy.chip_delay);
1799 	else
1800 		nand_wait_ready(chip);
1801 
1802 	return 0;
1803 }
1804 
1805 /**
1806  * nand_reset_op - Do a reset operation
1807  * @chip: The NAND chip
1808  *
1809  * This function sends a RESET command and waits for the NAND to be ready
1810  * before returning.
1811  * This function does not select/unselect the CS line.
1812  *
1813  * Returns 0 on success, a negative error code otherwise.
1814  */
1815 int nand_reset_op(struct nand_chip *chip)
1816 {
1817 	if (nand_has_exec_op(chip)) {
1818 		const struct nand_sdr_timings *sdr =
1819 			nand_get_sdr_timings(&chip->data_interface);
1820 		struct nand_op_instr instrs[] = {
1821 			NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
1822 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
1823 		};
1824 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1825 
1826 		return nand_exec_op(chip, &op);
1827 	}
1828 
1829 	chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
1830 
1831 	return 0;
1832 }
1833 EXPORT_SYMBOL_GPL(nand_reset_op);
1834 
1835 /**
1836  * nand_read_data_op - Read data from the NAND
1837  * @chip: The NAND chip
1838  * @buf: buffer used to store the data
1839  * @len: length of the buffer
1840  * @force_8bit: force 8-bit bus access
1841  *
1842  * This function does a raw data read on the bus. Usually used after launching
1843  * another NAND operation like nand_read_page_op().
1844  * This function does not select/unselect the CS line.
1845  *
1846  * Returns 0 on success, a negative error code otherwise.
1847  */
1848 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1849 		      bool force_8bit)
1850 {
1851 	if (!len || !buf)
1852 		return -EINVAL;
1853 
1854 	if (nand_has_exec_op(chip)) {
1855 		struct nand_op_instr instrs[] = {
1856 			NAND_OP_DATA_IN(len, buf, 0),
1857 		};
1858 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1859 
1860 		instrs[0].ctx.data.force_8bit = force_8bit;
1861 
1862 		return nand_exec_op(chip, &op);
1863 	}
1864 
1865 	if (force_8bit) {
1866 		u8 *p = buf;
1867 		unsigned int i;
1868 
1869 		for (i = 0; i < len; i++)
1870 			p[i] = chip->legacy.read_byte(chip);
1871 	} else {
1872 		chip->legacy.read_buf(chip, buf, len);
1873 	}
1874 
1875 	return 0;
1876 }
1877 EXPORT_SYMBOL_GPL(nand_read_data_op);
1878 
1879 /**
1880  * nand_write_data_op - Write data from the NAND
1881  * @chip: The NAND chip
1882  * @buf: buffer containing the data to send on the bus
1883  * @len: length of the buffer
1884  * @force_8bit: force 8-bit bus access
1885  *
1886  * This function does a raw data write on the bus. Usually used after launching
1887  * another NAND operation like nand_write_page_begin_op().
1888  * This function does not select/unselect the CS line.
1889  *
1890  * Returns 0 on success, a negative error code otherwise.
1891  */
1892 int nand_write_data_op(struct nand_chip *chip, const void *buf,
1893 		       unsigned int len, bool force_8bit)
1894 {
1895 	if (!len || !buf)
1896 		return -EINVAL;
1897 
1898 	if (nand_has_exec_op(chip)) {
1899 		struct nand_op_instr instrs[] = {
1900 			NAND_OP_DATA_OUT(len, buf, 0),
1901 		};
1902 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1903 
1904 		instrs[0].ctx.data.force_8bit = force_8bit;
1905 
1906 		return nand_exec_op(chip, &op);
1907 	}
1908 
1909 	if (force_8bit) {
1910 		const u8 *p = buf;
1911 		unsigned int i;
1912 
1913 		for (i = 0; i < len; i++)
1914 			chip->legacy.write_byte(chip, p[i]);
1915 	} else {
1916 		chip->legacy.write_buf(chip, buf, len);
1917 	}
1918 
1919 	return 0;
1920 }
1921 EXPORT_SYMBOL_GPL(nand_write_data_op);
1922 
1923 /**
1924  * struct nand_op_parser_ctx - Context used by the parser
1925  * @instrs: array of all the instructions that must be addressed
1926  * @ninstrs: length of the @instrs array
1927  * @subop: Sub-operation to be passed to the NAND controller
1928  *
1929  * This structure is used by the core to split NAND operations into
1930  * sub-operations that can be handled by the NAND controller.
1931  */
1932 struct nand_op_parser_ctx {
1933 	const struct nand_op_instr *instrs;
1934 	unsigned int ninstrs;
1935 	struct nand_subop subop;
1936 };
1937 
1938 /**
1939  * nand_op_parser_must_split_instr - Checks if an instruction must be split
1940  * @pat: the parser pattern element that matches @instr
1941  * @instr: pointer to the instruction to check
1942  * @start_offset: this is an in/out parameter. If @instr has already been
1943  *		  split, then @start_offset is the offset from which to start
1944  *		  (either an address cycle or an offset in the data buffer).
1945  *		  Conversely, if the function returns true (ie. instr must be
1946  *		  split), this parameter is updated to point to the first
1947  *		  data/address cycle that has not been taken care of.
1948  *
1949  * Some NAND controllers are limited and cannot send X address cycles with a
1950  * unique operation, or cannot read/write more than Y bytes at the same time.
1951  * In this case, split the instruction that does not fit in a single
1952  * controller-operation into two or more chunks.
1953  *
1954  * Returns true if the instruction must be split, false otherwise.
1955  * The @start_offset parameter is also updated to the offset at which the next
1956  * bundle of instruction must start (if an address or a data instruction).
1957  */
1958 static bool
1959 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
1960 				const struct nand_op_instr *instr,
1961 				unsigned int *start_offset)
1962 {
1963 	switch (pat->type) {
1964 	case NAND_OP_ADDR_INSTR:
1965 		if (!pat->ctx.addr.maxcycles)
1966 			break;
1967 
1968 		if (instr->ctx.addr.naddrs - *start_offset >
1969 		    pat->ctx.addr.maxcycles) {
1970 			*start_offset += pat->ctx.addr.maxcycles;
1971 			return true;
1972 		}
1973 		break;
1974 
1975 	case NAND_OP_DATA_IN_INSTR:
1976 	case NAND_OP_DATA_OUT_INSTR:
1977 		if (!pat->ctx.data.maxlen)
1978 			break;
1979 
1980 		if (instr->ctx.data.len - *start_offset >
1981 		    pat->ctx.data.maxlen) {
1982 			*start_offset += pat->ctx.data.maxlen;
1983 			return true;
1984 		}
1985 		break;
1986 
1987 	default:
1988 		break;
1989 	}
1990 
1991 	return false;
1992 }
1993 
1994 /**
1995  * nand_op_parser_match_pat - Checks if a pattern matches the instructions
1996  *			      remaining in the parser context
1997  * @pat: the pattern to test
1998  * @ctx: the parser context structure to match with the pattern @pat
1999  *
2000  * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2001  * Returns true if this is the case, false ortherwise. When true is returned,
2002  * @ctx->subop is updated with the set of instructions to be passed to the
2003  * controller driver.
2004  */
2005 static bool
2006 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2007 			 struct nand_op_parser_ctx *ctx)
2008 {
2009 	unsigned int instr_offset = ctx->subop.first_instr_start_off;
2010 	const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2011 	const struct nand_op_instr *instr = ctx->subop.instrs;
2012 	unsigned int i, ninstrs;
2013 
2014 	for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2015 		/*
2016 		 * The pattern instruction does not match the operation
2017 		 * instruction. If the instruction is marked optional in the
2018 		 * pattern definition, we skip the pattern element and continue
2019 		 * to the next one. If the element is mandatory, there's no
2020 		 * match and we can return false directly.
2021 		 */
2022 		if (instr->type != pat->elems[i].type) {
2023 			if (!pat->elems[i].optional)
2024 				return false;
2025 
2026 			continue;
2027 		}
2028 
2029 		/*
2030 		 * Now check the pattern element constraints. If the pattern is
2031 		 * not able to handle the whole instruction in a single step,
2032 		 * we have to split it.
2033 		 * The last_instr_end_off value comes back updated to point to
2034 		 * the position where we have to split the instruction (the
2035 		 * start of the next subop chunk).
2036 		 */
2037 		if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2038 						    &instr_offset)) {
2039 			ninstrs++;
2040 			i++;
2041 			break;
2042 		}
2043 
2044 		instr++;
2045 		ninstrs++;
2046 		instr_offset = 0;
2047 	}
2048 
2049 	/*
2050 	 * This can happen if all instructions of a pattern are optional.
2051 	 * Still, if there's not at least one instruction handled by this
2052 	 * pattern, this is not a match, and we should try the next one (if
2053 	 * any).
2054 	 */
2055 	if (!ninstrs)
2056 		return false;
2057 
2058 	/*
2059 	 * We had a match on the pattern head, but the pattern may be longer
2060 	 * than the instructions we're asked to execute. We need to make sure
2061 	 * there's no mandatory elements in the pattern tail.
2062 	 */
2063 	for (; i < pat->nelems; i++) {
2064 		if (!pat->elems[i].optional)
2065 			return false;
2066 	}
2067 
2068 	/*
2069 	 * We have a match: update the subop structure accordingly and return
2070 	 * true.
2071 	 */
2072 	ctx->subop.ninstrs = ninstrs;
2073 	ctx->subop.last_instr_end_off = instr_offset;
2074 
2075 	return true;
2076 }
2077 
2078 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2079 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2080 {
2081 	const struct nand_op_instr *instr;
2082 	char *prefix = "      ";
2083 	unsigned int i;
2084 
2085 	pr_debug("executing subop:\n");
2086 
2087 	for (i = 0; i < ctx->ninstrs; i++) {
2088 		instr = &ctx->instrs[i];
2089 
2090 		if (instr == &ctx->subop.instrs[0])
2091 			prefix = "    ->";
2092 
2093 		switch (instr->type) {
2094 		case NAND_OP_CMD_INSTR:
2095 			pr_debug("%sCMD      [0x%02x]\n", prefix,
2096 				 instr->ctx.cmd.opcode);
2097 			break;
2098 		case NAND_OP_ADDR_INSTR:
2099 			pr_debug("%sADDR     [%d cyc: %*ph]\n", prefix,
2100 				 instr->ctx.addr.naddrs,
2101 				 instr->ctx.addr.naddrs < 64 ?
2102 				 instr->ctx.addr.naddrs : 64,
2103 				 instr->ctx.addr.addrs);
2104 			break;
2105 		case NAND_OP_DATA_IN_INSTR:
2106 			pr_debug("%sDATA_IN  [%d B%s]\n", prefix,
2107 				 instr->ctx.data.len,
2108 				 instr->ctx.data.force_8bit ?
2109 				 ", force 8-bit" : "");
2110 			break;
2111 		case NAND_OP_DATA_OUT_INSTR:
2112 			pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
2113 				 instr->ctx.data.len,
2114 				 instr->ctx.data.force_8bit ?
2115 				 ", force 8-bit" : "");
2116 			break;
2117 		case NAND_OP_WAITRDY_INSTR:
2118 			pr_debug("%sWAITRDY  [max %d ms]\n", prefix,
2119 				 instr->ctx.waitrdy.timeout_ms);
2120 			break;
2121 		}
2122 
2123 		if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2124 			prefix = "      ";
2125 	}
2126 }
2127 #else
2128 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2129 {
2130 	/* NOP */
2131 }
2132 #endif
2133 
2134 /**
2135  * nand_op_parser_exec_op - exec_op parser
2136  * @chip: the NAND chip
2137  * @parser: patterns description provided by the controller driver
2138  * @op: the NAND operation to address
2139  * @check_only: when true, the function only checks if @op can be handled but
2140  *		does not execute the operation
2141  *
2142  * Helper function designed to ease integration of NAND controller drivers that
2143  * only support a limited set of instruction sequences. The supported sequences
2144  * are described in @parser, and the framework takes care of splitting @op into
2145  * multiple sub-operations (if required) and pass them back to the ->exec()
2146  * callback of the matching pattern if @check_only is set to false.
2147  *
2148  * NAND controller drivers should call this function from their own ->exec_op()
2149  * implementation.
2150  *
2151  * Returns 0 on success, a negative error code otherwise. A failure can be
2152  * caused by an unsupported operation (none of the supported patterns is able
2153  * to handle the requested operation), or an error returned by one of the
2154  * matching pattern->exec() hook.
2155  */
2156 int nand_op_parser_exec_op(struct nand_chip *chip,
2157 			   const struct nand_op_parser *parser,
2158 			   const struct nand_operation *op, bool check_only)
2159 {
2160 	struct nand_op_parser_ctx ctx = {
2161 		.subop.instrs = op->instrs,
2162 		.instrs = op->instrs,
2163 		.ninstrs = op->ninstrs,
2164 	};
2165 	unsigned int i;
2166 
2167 	while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2168 		int ret;
2169 
2170 		for (i = 0; i < parser->npatterns; i++) {
2171 			const struct nand_op_parser_pattern *pattern;
2172 
2173 			pattern = &parser->patterns[i];
2174 			if (!nand_op_parser_match_pat(pattern, &ctx))
2175 				continue;
2176 
2177 			nand_op_parser_trace(&ctx);
2178 
2179 			if (check_only)
2180 				break;
2181 
2182 			ret = pattern->exec(chip, &ctx.subop);
2183 			if (ret)
2184 				return ret;
2185 
2186 			break;
2187 		}
2188 
2189 		if (i == parser->npatterns) {
2190 			pr_debug("->exec_op() parser: pattern not found!\n");
2191 			return -ENOTSUPP;
2192 		}
2193 
2194 		/*
2195 		 * Update the context structure by pointing to the start of the
2196 		 * next subop.
2197 		 */
2198 		ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2199 		if (ctx.subop.last_instr_end_off)
2200 			ctx.subop.instrs -= 1;
2201 
2202 		ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2203 	}
2204 
2205 	return 0;
2206 }
2207 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2208 
2209 static bool nand_instr_is_data(const struct nand_op_instr *instr)
2210 {
2211 	return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2212 			 instr->type == NAND_OP_DATA_OUT_INSTR);
2213 }
2214 
2215 static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2216 				      unsigned int instr_idx)
2217 {
2218 	return subop && instr_idx < subop->ninstrs;
2219 }
2220 
2221 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2222 					     unsigned int instr_idx)
2223 {
2224 	if (instr_idx)
2225 		return 0;
2226 
2227 	return subop->first_instr_start_off;
2228 }
2229 
2230 /**
2231  * nand_subop_get_addr_start_off - Get the start offset in an address array
2232  * @subop: The entire sub-operation
2233  * @instr_idx: Index of the instruction inside the sub-operation
2234  *
2235  * During driver development, one could be tempted to directly use the
2236  * ->addr.addrs field of address instructions. This is wrong as address
2237  * instructions might be split.
2238  *
2239  * Given an address instruction, returns the offset of the first cycle to issue.
2240  */
2241 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2242 					   unsigned int instr_idx)
2243 {
2244 	if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2245 		    subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2246 		return 0;
2247 
2248 	return nand_subop_get_start_off(subop, instr_idx);
2249 }
2250 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2251 
2252 /**
2253  * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2254  * @subop: The entire sub-operation
2255  * @instr_idx: Index of the instruction inside the sub-operation
2256  *
2257  * During driver development, one could be tempted to directly use the
2258  * ->addr->naddrs field of a data instruction. This is wrong as instructions
2259  * might be split.
2260  *
2261  * Given an address instruction, returns the number of address cycle to issue.
2262  */
2263 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2264 					 unsigned int instr_idx)
2265 {
2266 	int start_off, end_off;
2267 
2268 	if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2269 		    subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2270 		return 0;
2271 
2272 	start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2273 
2274 	if (instr_idx == subop->ninstrs - 1 &&
2275 	    subop->last_instr_end_off)
2276 		end_off = subop->last_instr_end_off;
2277 	else
2278 		end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2279 
2280 	return end_off - start_off;
2281 }
2282 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2283 
2284 /**
2285  * nand_subop_get_data_start_off - Get the start offset in a data array
2286  * @subop: The entire sub-operation
2287  * @instr_idx: Index of the instruction inside the sub-operation
2288  *
2289  * During driver development, one could be tempted to directly use the
2290  * ->data->buf.{in,out} field of data instructions. This is wrong as data
2291  * instructions might be split.
2292  *
2293  * Given a data instruction, returns the offset to start from.
2294  */
2295 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2296 					   unsigned int instr_idx)
2297 {
2298 	if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2299 		    !nand_instr_is_data(&subop->instrs[instr_idx])))
2300 		return 0;
2301 
2302 	return nand_subop_get_start_off(subop, instr_idx);
2303 }
2304 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2305 
2306 /**
2307  * nand_subop_get_data_len - Get the number of bytes to retrieve
2308  * @subop: The entire sub-operation
2309  * @instr_idx: Index of the instruction inside the sub-operation
2310  *
2311  * During driver development, one could be tempted to directly use the
2312  * ->data->len field of a data instruction. This is wrong as data instructions
2313  * might be split.
2314  *
2315  * Returns the length of the chunk of data to send/receive.
2316  */
2317 unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2318 				     unsigned int instr_idx)
2319 {
2320 	int start_off = 0, end_off;
2321 
2322 	if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2323 		    !nand_instr_is_data(&subop->instrs[instr_idx])))
2324 		return 0;
2325 
2326 	start_off = nand_subop_get_data_start_off(subop, instr_idx);
2327 
2328 	if (instr_idx == subop->ninstrs - 1 &&
2329 	    subop->last_instr_end_off)
2330 		end_off = subop->last_instr_end_off;
2331 	else
2332 		end_off = subop->instrs[instr_idx].ctx.data.len;
2333 
2334 	return end_off - start_off;
2335 }
2336 EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2337 
2338 /**
2339  * nand_reset - Reset and initialize a NAND device
2340  * @chip: The NAND chip
2341  * @chipnr: Internal die id
2342  *
2343  * Save the timings data structure, then apply SDR timings mode 0 (see
2344  * nand_reset_data_interface for details), do the reset operation, and
2345  * apply back the previous timings.
2346  *
2347  * Returns 0 on success, a negative error code otherwise.
2348  */
2349 int nand_reset(struct nand_chip *chip, int chipnr)
2350 {
2351 	struct nand_data_interface saved_data_intf = chip->data_interface;
2352 	int ret;
2353 
2354 	ret = nand_reset_data_interface(chip, chipnr);
2355 	if (ret)
2356 		return ret;
2357 
2358 	/*
2359 	 * The CS line has to be released before we can apply the new NAND
2360 	 * interface settings, hence this weird nand_select_target()
2361 	 * nand_deselect_target() dance.
2362 	 */
2363 	nand_select_target(chip, chipnr);
2364 	ret = nand_reset_op(chip);
2365 	nand_deselect_target(chip);
2366 	if (ret)
2367 		return ret;
2368 
2369 	/*
2370 	 * A nand_reset_data_interface() put both the NAND chip and the NAND
2371 	 * controller in timings mode 0. If the default mode for this chip is
2372 	 * also 0, no need to proceed to the change again. Plus, at probe time,
2373 	 * nand_setup_data_interface() uses ->set/get_features() which would
2374 	 * fail anyway as the parameter page is not available yet.
2375 	 */
2376 	if (!chip->onfi_timing_mode_default)
2377 		return 0;
2378 
2379 	chip->data_interface = saved_data_intf;
2380 	ret = nand_setup_data_interface(chip, chipnr);
2381 	if (ret)
2382 		return ret;
2383 
2384 	return 0;
2385 }
2386 EXPORT_SYMBOL_GPL(nand_reset);
2387 
2388 /**
2389  * nand_get_features - wrapper to perform a GET_FEATURE
2390  * @chip: NAND chip info structure
2391  * @addr: feature address
2392  * @subfeature_param: the subfeature parameters, a four bytes array
2393  *
2394  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2395  * operation cannot be handled.
2396  */
2397 int nand_get_features(struct nand_chip *chip, int addr,
2398 		      u8 *subfeature_param)
2399 {
2400 	if (!nand_supports_get_features(chip, addr))
2401 		return -ENOTSUPP;
2402 
2403 	if (chip->legacy.get_features)
2404 		return chip->legacy.get_features(chip, addr, subfeature_param);
2405 
2406 	return nand_get_features_op(chip, addr, subfeature_param);
2407 }
2408 
2409 /**
2410  * nand_set_features - wrapper to perform a SET_FEATURE
2411  * @chip: NAND chip info structure
2412  * @addr: feature address
2413  * @subfeature_param: the subfeature parameters, a four bytes array
2414  *
2415  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2416  * operation cannot be handled.
2417  */
2418 int nand_set_features(struct nand_chip *chip, int addr,
2419 		      u8 *subfeature_param)
2420 {
2421 	if (!nand_supports_set_features(chip, addr))
2422 		return -ENOTSUPP;
2423 
2424 	if (chip->legacy.set_features)
2425 		return chip->legacy.set_features(chip, addr, subfeature_param);
2426 
2427 	return nand_set_features_op(chip, addr, subfeature_param);
2428 }
2429 
2430 /**
2431  * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2432  * @buf: buffer to test
2433  * @len: buffer length
2434  * @bitflips_threshold: maximum number of bitflips
2435  *
2436  * Check if a buffer contains only 0xff, which means the underlying region
2437  * has been erased and is ready to be programmed.
2438  * The bitflips_threshold specify the maximum number of bitflips before
2439  * considering the region is not erased.
2440  * Note: The logic of this function has been extracted from the memweight
2441  * implementation, except that nand_check_erased_buf function exit before
2442  * testing the whole buffer if the number of bitflips exceed the
2443  * bitflips_threshold value.
2444  *
2445  * Returns a positive number of bitflips less than or equal to
2446  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2447  * threshold.
2448  */
2449 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2450 {
2451 	const unsigned char *bitmap = buf;
2452 	int bitflips = 0;
2453 	int weight;
2454 
2455 	for (; len && ((uintptr_t)bitmap) % sizeof(long);
2456 	     len--, bitmap++) {
2457 		weight = hweight8(*bitmap);
2458 		bitflips += BITS_PER_BYTE - weight;
2459 		if (unlikely(bitflips > bitflips_threshold))
2460 			return -EBADMSG;
2461 	}
2462 
2463 	for (; len >= sizeof(long);
2464 	     len -= sizeof(long), bitmap += sizeof(long)) {
2465 		unsigned long d = *((unsigned long *)bitmap);
2466 		if (d == ~0UL)
2467 			continue;
2468 		weight = hweight_long(d);
2469 		bitflips += BITS_PER_LONG - weight;
2470 		if (unlikely(bitflips > bitflips_threshold))
2471 			return -EBADMSG;
2472 	}
2473 
2474 	for (; len > 0; len--, bitmap++) {
2475 		weight = hweight8(*bitmap);
2476 		bitflips += BITS_PER_BYTE - weight;
2477 		if (unlikely(bitflips > bitflips_threshold))
2478 			return -EBADMSG;
2479 	}
2480 
2481 	return bitflips;
2482 }
2483 
2484 /**
2485  * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2486  *				 0xff data
2487  * @data: data buffer to test
2488  * @datalen: data length
2489  * @ecc: ECC buffer
2490  * @ecclen: ECC length
2491  * @extraoob: extra OOB buffer
2492  * @extraooblen: extra OOB length
2493  * @bitflips_threshold: maximum number of bitflips
2494  *
2495  * Check if a data buffer and its associated ECC and OOB data contains only
2496  * 0xff pattern, which means the underlying region has been erased and is
2497  * ready to be programmed.
2498  * The bitflips_threshold specify the maximum number of bitflips before
2499  * considering the region as not erased.
2500  *
2501  * Note:
2502  * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2503  *    different from the NAND page size. When fixing bitflips, ECC engines will
2504  *    report the number of errors per chunk, and the NAND core infrastructure
2505  *    expect you to return the maximum number of bitflips for the whole page.
2506  *    This is why you should always use this function on a single chunk and
2507  *    not on the whole page. After checking each chunk you should update your
2508  *    max_bitflips value accordingly.
2509  * 2/ When checking for bitflips in erased pages you should not only check
2510  *    the payload data but also their associated ECC data, because a user might
2511  *    have programmed almost all bits to 1 but a few. In this case, we
2512  *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
2513  *    this case.
2514  * 3/ The extraoob argument is optional, and should be used if some of your OOB
2515  *    data are protected by the ECC engine.
2516  *    It could also be used if you support subpages and want to attach some
2517  *    extra OOB data to an ECC chunk.
2518  *
2519  * Returns a positive number of bitflips less than or equal to
2520  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2521  * threshold. In case of success, the passed buffers are filled with 0xff.
2522  */
2523 int nand_check_erased_ecc_chunk(void *data, int datalen,
2524 				void *ecc, int ecclen,
2525 				void *extraoob, int extraooblen,
2526 				int bitflips_threshold)
2527 {
2528 	int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2529 
2530 	data_bitflips = nand_check_erased_buf(data, datalen,
2531 					      bitflips_threshold);
2532 	if (data_bitflips < 0)
2533 		return data_bitflips;
2534 
2535 	bitflips_threshold -= data_bitflips;
2536 
2537 	ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2538 	if (ecc_bitflips < 0)
2539 		return ecc_bitflips;
2540 
2541 	bitflips_threshold -= ecc_bitflips;
2542 
2543 	extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2544 						  bitflips_threshold);
2545 	if (extraoob_bitflips < 0)
2546 		return extraoob_bitflips;
2547 
2548 	if (data_bitflips)
2549 		memset(data, 0xff, datalen);
2550 
2551 	if (ecc_bitflips)
2552 		memset(ecc, 0xff, ecclen);
2553 
2554 	if (extraoob_bitflips)
2555 		memset(extraoob, 0xff, extraooblen);
2556 
2557 	return data_bitflips + ecc_bitflips + extraoob_bitflips;
2558 }
2559 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2560 
2561 /**
2562  * nand_read_page_raw_notsupp - dummy read raw page function
2563  * @chip: nand chip info structure
2564  * @buf: buffer to store read data
2565  * @oob_required: caller requires OOB data read to chip->oob_poi
2566  * @page: page number to read
2567  *
2568  * Returns -ENOTSUPP unconditionally.
2569  */
2570 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2571 			       int oob_required, int page)
2572 {
2573 	return -ENOTSUPP;
2574 }
2575 
2576 /**
2577  * nand_read_page_raw - [INTERN] read raw page data without ecc
2578  * @chip: nand chip info structure
2579  * @buf: buffer to store read data
2580  * @oob_required: caller requires OOB data read to chip->oob_poi
2581  * @page: page number to read
2582  *
2583  * Not for syndrome calculating ECC controllers, which use a special oob layout.
2584  */
2585 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2586 		       int page)
2587 {
2588 	struct mtd_info *mtd = nand_to_mtd(chip);
2589 	int ret;
2590 
2591 	ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2592 	if (ret)
2593 		return ret;
2594 
2595 	if (oob_required) {
2596 		ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2597 					false);
2598 		if (ret)
2599 			return ret;
2600 	}
2601 
2602 	return 0;
2603 }
2604 EXPORT_SYMBOL(nand_read_page_raw);
2605 
2606 /**
2607  * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
2608  * @chip: nand chip info structure
2609  * @buf: buffer to store read data
2610  * @oob_required: caller requires OOB data read to chip->oob_poi
2611  * @page: page number to read
2612  *
2613  * We need a special oob layout and handling even when OOB isn't used.
2614  */
2615 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2616 				       int oob_required, int page)
2617 {
2618 	struct mtd_info *mtd = nand_to_mtd(chip);
2619 	int eccsize = chip->ecc.size;
2620 	int eccbytes = chip->ecc.bytes;
2621 	uint8_t *oob = chip->oob_poi;
2622 	int steps, size, ret;
2623 
2624 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
2625 	if (ret)
2626 		return ret;
2627 
2628 	for (steps = chip->ecc.steps; steps > 0; steps--) {
2629 		ret = nand_read_data_op(chip, buf, eccsize, false);
2630 		if (ret)
2631 			return ret;
2632 
2633 		buf += eccsize;
2634 
2635 		if (chip->ecc.prepad) {
2636 			ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2637 						false);
2638 			if (ret)
2639 				return ret;
2640 
2641 			oob += chip->ecc.prepad;
2642 		}
2643 
2644 		ret = nand_read_data_op(chip, oob, eccbytes, false);
2645 		if (ret)
2646 			return ret;
2647 
2648 		oob += eccbytes;
2649 
2650 		if (chip->ecc.postpad) {
2651 			ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2652 						false);
2653 			if (ret)
2654 				return ret;
2655 
2656 			oob += chip->ecc.postpad;
2657 		}
2658 	}
2659 
2660 	size = mtd->oobsize - (oob - chip->oob_poi);
2661 	if (size) {
2662 		ret = nand_read_data_op(chip, oob, size, false);
2663 		if (ret)
2664 			return ret;
2665 	}
2666 
2667 	return 0;
2668 }
2669 
2670 /**
2671  * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
2672  * @chip: nand chip info structure
2673  * @buf: buffer to store read data
2674  * @oob_required: caller requires OOB data read to chip->oob_poi
2675  * @page: page number to read
2676  */
2677 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2678 				int oob_required, int page)
2679 {
2680 	struct mtd_info *mtd = nand_to_mtd(chip);
2681 	int i, eccsize = chip->ecc.size, ret;
2682 	int eccbytes = chip->ecc.bytes;
2683 	int eccsteps = chip->ecc.steps;
2684 	uint8_t *p = buf;
2685 	uint8_t *ecc_calc = chip->ecc.calc_buf;
2686 	uint8_t *ecc_code = chip->ecc.code_buf;
2687 	unsigned int max_bitflips = 0;
2688 
2689 	chip->ecc.read_page_raw(chip, buf, 1, page);
2690 
2691 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2692 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
2693 
2694 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2695 					 chip->ecc.total);
2696 	if (ret)
2697 		return ret;
2698 
2699 	eccsteps = chip->ecc.steps;
2700 	p = buf;
2701 
2702 	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2703 		int stat;
2704 
2705 		stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2706 		if (stat < 0) {
2707 			mtd->ecc_stats.failed++;
2708 		} else {
2709 			mtd->ecc_stats.corrected += stat;
2710 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2711 		}
2712 	}
2713 	return max_bitflips;
2714 }
2715 
2716 /**
2717  * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
2718  * @chip: nand chip info structure
2719  * @data_offs: offset of requested data within the page
2720  * @readlen: data length
2721  * @bufpoi: buffer to store read data
2722  * @page: page number to read
2723  */
2724 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2725 			     uint32_t readlen, uint8_t *bufpoi, int page)
2726 {
2727 	struct mtd_info *mtd = nand_to_mtd(chip);
2728 	int start_step, end_step, num_steps, ret;
2729 	uint8_t *p;
2730 	int data_col_addr, i, gaps = 0;
2731 	int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
2732 	int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
2733 	int index, section = 0;
2734 	unsigned int max_bitflips = 0;
2735 	struct mtd_oob_region oobregion = { };
2736 
2737 	/* Column address within the page aligned to ECC size (256bytes) */
2738 	start_step = data_offs / chip->ecc.size;
2739 	end_step = (data_offs + readlen - 1) / chip->ecc.size;
2740 	num_steps = end_step - start_step + 1;
2741 	index = start_step * chip->ecc.bytes;
2742 
2743 	/* Data size aligned to ECC ecc.size */
2744 	datafrag_len = num_steps * chip->ecc.size;
2745 	eccfrag_len = num_steps * chip->ecc.bytes;
2746 
2747 	data_col_addr = start_step * chip->ecc.size;
2748 	/* If we read not a page aligned data */
2749 	p = bufpoi + data_col_addr;
2750 	ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
2751 	if (ret)
2752 		return ret;
2753 
2754 	/* Calculate ECC */
2755 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
2756 		chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
2757 
2758 	/*
2759 	 * The performance is faster if we position offsets according to
2760 	 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
2761 	 */
2762 	ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
2763 	if (ret)
2764 		return ret;
2765 
2766 	if (oobregion.length < eccfrag_len)
2767 		gaps = 1;
2768 
2769 	if (gaps) {
2770 		ret = nand_change_read_column_op(chip, mtd->writesize,
2771 						 chip->oob_poi, mtd->oobsize,
2772 						 false);
2773 		if (ret)
2774 			return ret;
2775 	} else {
2776 		/*
2777 		 * Send the command to read the particular ECC bytes take care
2778 		 * about buswidth alignment in read_buf.
2779 		 */
2780 		aligned_pos = oobregion.offset & ~(busw - 1);
2781 		aligned_len = eccfrag_len;
2782 		if (oobregion.offset & (busw - 1))
2783 			aligned_len++;
2784 		if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
2785 		    (busw - 1))
2786 			aligned_len++;
2787 
2788 		ret = nand_change_read_column_op(chip,
2789 						 mtd->writesize + aligned_pos,
2790 						 &chip->oob_poi[aligned_pos],
2791 						 aligned_len, false);
2792 		if (ret)
2793 			return ret;
2794 	}
2795 
2796 	ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
2797 					 chip->oob_poi, index, eccfrag_len);
2798 	if (ret)
2799 		return ret;
2800 
2801 	p = bufpoi + data_col_addr;
2802 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
2803 		int stat;
2804 
2805 		stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
2806 					 &chip->ecc.calc_buf[i]);
2807 		if (stat == -EBADMSG &&
2808 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2809 			/* check for empty pages with bitflips */
2810 			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2811 						&chip->ecc.code_buf[i],
2812 						chip->ecc.bytes,
2813 						NULL, 0,
2814 						chip->ecc.strength);
2815 		}
2816 
2817 		if (stat < 0) {
2818 			mtd->ecc_stats.failed++;
2819 		} else {
2820 			mtd->ecc_stats.corrected += stat;
2821 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2822 		}
2823 	}
2824 	return max_bitflips;
2825 }
2826 
2827 /**
2828  * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
2829  * @chip: nand chip info structure
2830  * @buf: buffer to store read data
2831  * @oob_required: caller requires OOB data read to chip->oob_poi
2832  * @page: page number to read
2833  *
2834  * Not for syndrome calculating ECC controllers which need a special oob layout.
2835  */
2836 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
2837 				int oob_required, int page)
2838 {
2839 	struct mtd_info *mtd = nand_to_mtd(chip);
2840 	int i, eccsize = chip->ecc.size, ret;
2841 	int eccbytes = chip->ecc.bytes;
2842 	int eccsteps = chip->ecc.steps;
2843 	uint8_t *p = buf;
2844 	uint8_t *ecc_calc = chip->ecc.calc_buf;
2845 	uint8_t *ecc_code = chip->ecc.code_buf;
2846 	unsigned int max_bitflips = 0;
2847 
2848 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
2849 	if (ret)
2850 		return ret;
2851 
2852 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2853 		chip->ecc.hwctl(chip, NAND_ECC_READ);
2854 
2855 		ret = nand_read_data_op(chip, p, eccsize, false);
2856 		if (ret)
2857 			return ret;
2858 
2859 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
2860 	}
2861 
2862 	ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
2863 	if (ret)
2864 		return ret;
2865 
2866 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2867 					 chip->ecc.total);
2868 	if (ret)
2869 		return ret;
2870 
2871 	eccsteps = chip->ecc.steps;
2872 	p = buf;
2873 
2874 	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2875 		int stat;
2876 
2877 		stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2878 		if (stat == -EBADMSG &&
2879 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2880 			/* check for empty pages with bitflips */
2881 			stat = nand_check_erased_ecc_chunk(p, eccsize,
2882 						&ecc_code[i], eccbytes,
2883 						NULL, 0,
2884 						chip->ecc.strength);
2885 		}
2886 
2887 		if (stat < 0) {
2888 			mtd->ecc_stats.failed++;
2889 		} else {
2890 			mtd->ecc_stats.corrected += stat;
2891 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2892 		}
2893 	}
2894 	return max_bitflips;
2895 }
2896 
2897 /**
2898  * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
2899  * @chip: nand chip info structure
2900  * @buf: buffer to store read data
2901  * @oob_required: caller requires OOB data read to chip->oob_poi
2902  * @page: page number to read
2903  *
2904  * Hardware ECC for large page chips, require OOB to be read first. For this
2905  * ECC mode, the write_page method is re-used from ECC_HW. These methods
2906  * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
2907  * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
2908  * the data area, by overwriting the NAND manufacturer bad block markings.
2909  */
2910 static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
2911 					  int oob_required, int page)
2912 {
2913 	struct mtd_info *mtd = nand_to_mtd(chip);
2914 	int i, eccsize = chip->ecc.size, ret;
2915 	int eccbytes = chip->ecc.bytes;
2916 	int eccsteps = chip->ecc.steps;
2917 	uint8_t *p = buf;
2918 	uint8_t *ecc_code = chip->ecc.code_buf;
2919 	uint8_t *ecc_calc = chip->ecc.calc_buf;
2920 	unsigned int max_bitflips = 0;
2921 
2922 	/* Read the OOB area first */
2923 	ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
2924 	if (ret)
2925 		return ret;
2926 
2927 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
2928 	if (ret)
2929 		return ret;
2930 
2931 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2932 					 chip->ecc.total);
2933 	if (ret)
2934 		return ret;
2935 
2936 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2937 		int stat;
2938 
2939 		chip->ecc.hwctl(chip, NAND_ECC_READ);
2940 
2941 		ret = nand_read_data_op(chip, p, eccsize, false);
2942 		if (ret)
2943 			return ret;
2944 
2945 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
2946 
2947 		stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
2948 		if (stat == -EBADMSG &&
2949 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2950 			/* check for empty pages with bitflips */
2951 			stat = nand_check_erased_ecc_chunk(p, eccsize,
2952 						&ecc_code[i], eccbytes,
2953 						NULL, 0,
2954 						chip->ecc.strength);
2955 		}
2956 
2957 		if (stat < 0) {
2958 			mtd->ecc_stats.failed++;
2959 		} else {
2960 			mtd->ecc_stats.corrected += stat;
2961 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2962 		}
2963 	}
2964 	return max_bitflips;
2965 }
2966 
2967 /**
2968  * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
2969  * @chip: nand chip info structure
2970  * @buf: buffer to store read data
2971  * @oob_required: caller requires OOB data read to chip->oob_poi
2972  * @page: page number to read
2973  *
2974  * The hw generator calculates the error syndrome automatically. Therefore we
2975  * need a special oob layout and handling.
2976  */
2977 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
2978 				   int oob_required, int page)
2979 {
2980 	struct mtd_info *mtd = nand_to_mtd(chip);
2981 	int ret, i, eccsize = chip->ecc.size;
2982 	int eccbytes = chip->ecc.bytes;
2983 	int eccsteps = chip->ecc.steps;
2984 	int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
2985 	uint8_t *p = buf;
2986 	uint8_t *oob = chip->oob_poi;
2987 	unsigned int max_bitflips = 0;
2988 
2989 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
2990 	if (ret)
2991 		return ret;
2992 
2993 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2994 		int stat;
2995 
2996 		chip->ecc.hwctl(chip, NAND_ECC_READ);
2997 
2998 		ret = nand_read_data_op(chip, p, eccsize, false);
2999 		if (ret)
3000 			return ret;
3001 
3002 		if (chip->ecc.prepad) {
3003 			ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3004 						false);
3005 			if (ret)
3006 				return ret;
3007 
3008 			oob += chip->ecc.prepad;
3009 		}
3010 
3011 		chip->ecc.hwctl(chip, NAND_ECC_READSYN);
3012 
3013 		ret = nand_read_data_op(chip, oob, eccbytes, false);
3014 		if (ret)
3015 			return ret;
3016 
3017 		stat = chip->ecc.correct(chip, p, oob, NULL);
3018 
3019 		oob += eccbytes;
3020 
3021 		if (chip->ecc.postpad) {
3022 			ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3023 						false);
3024 			if (ret)
3025 				return ret;
3026 
3027 			oob += chip->ecc.postpad;
3028 		}
3029 
3030 		if (stat == -EBADMSG &&
3031 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3032 			/* check for empty pages with bitflips */
3033 			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3034 							   oob - eccpadbytes,
3035 							   eccpadbytes,
3036 							   NULL, 0,
3037 							   chip->ecc.strength);
3038 		}
3039 
3040 		if (stat < 0) {
3041 			mtd->ecc_stats.failed++;
3042 		} else {
3043 			mtd->ecc_stats.corrected += stat;
3044 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
3045 		}
3046 	}
3047 
3048 	/* Calculate remaining oob bytes */
3049 	i = mtd->oobsize - (oob - chip->oob_poi);
3050 	if (i) {
3051 		ret = nand_read_data_op(chip, oob, i, false);
3052 		if (ret)
3053 			return ret;
3054 	}
3055 
3056 	return max_bitflips;
3057 }
3058 
3059 /**
3060  * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3061  * @chip: NAND chip object
3062  * @oob: oob destination address
3063  * @ops: oob ops structure
3064  * @len: size of oob to transfer
3065  */
3066 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3067 				  struct mtd_oob_ops *ops, size_t len)
3068 {
3069 	struct mtd_info *mtd = nand_to_mtd(chip);
3070 	int ret;
3071 
3072 	switch (ops->mode) {
3073 
3074 	case MTD_OPS_PLACE_OOB:
3075 	case MTD_OPS_RAW:
3076 		memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3077 		return oob + len;
3078 
3079 	case MTD_OPS_AUTO_OOB:
3080 		ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3081 						  ops->ooboffs, len);
3082 		BUG_ON(ret);
3083 		return oob + len;
3084 
3085 	default:
3086 		BUG();
3087 	}
3088 	return NULL;
3089 }
3090 
3091 /**
3092  * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3093  * @chip: NAND chip object
3094  * @retry_mode: the retry mode to use
3095  *
3096  * Some vendors supply a special command to shift the Vt threshold, to be used
3097  * when there are too many bitflips in a page (i.e., ECC error). After setting
3098  * a new threshold, the host should retry reading the page.
3099  */
3100 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3101 {
3102 	pr_debug("setting READ RETRY mode %d\n", retry_mode);
3103 
3104 	if (retry_mode >= chip->read_retries)
3105 		return -EINVAL;
3106 
3107 	if (!chip->setup_read_retry)
3108 		return -EOPNOTSUPP;
3109 
3110 	return chip->setup_read_retry(chip, retry_mode);
3111 }
3112 
3113 static void nand_wait_readrdy(struct nand_chip *chip)
3114 {
3115 	const struct nand_sdr_timings *sdr;
3116 
3117 	if (!(chip->options & NAND_NEED_READRDY))
3118 		return;
3119 
3120 	sdr = nand_get_sdr_timings(&chip->data_interface);
3121 	WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
3122 }
3123 
3124 /**
3125  * nand_do_read_ops - [INTERN] Read data with ECC
3126  * @chip: NAND chip object
3127  * @from: offset to read from
3128  * @ops: oob ops structure
3129  *
3130  * Internal function. Called with chip held.
3131  */
3132 static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3133 			    struct mtd_oob_ops *ops)
3134 {
3135 	int chipnr, page, realpage, col, bytes, aligned, oob_required;
3136 	struct mtd_info *mtd = nand_to_mtd(chip);
3137 	int ret = 0;
3138 	uint32_t readlen = ops->len;
3139 	uint32_t oobreadlen = ops->ooblen;
3140 	uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3141 
3142 	uint8_t *bufpoi, *oob, *buf;
3143 	int use_bufpoi;
3144 	unsigned int max_bitflips = 0;
3145 	int retry_mode = 0;
3146 	bool ecc_fail = false;
3147 
3148 	chipnr = (int)(from >> chip->chip_shift);
3149 	nand_select_target(chip, chipnr);
3150 
3151 	realpage = (int)(from >> chip->page_shift);
3152 	page = realpage & chip->pagemask;
3153 
3154 	col = (int)(from & (mtd->writesize - 1));
3155 
3156 	buf = ops->datbuf;
3157 	oob = ops->oobbuf;
3158 	oob_required = oob ? 1 : 0;
3159 
3160 	while (1) {
3161 		unsigned int ecc_failures = mtd->ecc_stats.failed;
3162 
3163 		bytes = min(mtd->writesize - col, readlen);
3164 		aligned = (bytes == mtd->writesize);
3165 
3166 		if (!aligned)
3167 			use_bufpoi = 1;
3168 		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
3169 			use_bufpoi = !virt_addr_valid(buf) ||
3170 				     !IS_ALIGNED((unsigned long)buf,
3171 						 chip->buf_align);
3172 		else
3173 			use_bufpoi = 0;
3174 
3175 		/* Is the current page in the buffer? */
3176 		if (realpage != chip->pagebuf || oob) {
3177 			bufpoi = use_bufpoi ? chip->data_buf : buf;
3178 
3179 			if (use_bufpoi && aligned)
3180 				pr_debug("%s: using read bounce buffer for buf@%p\n",
3181 						 __func__, buf);
3182 
3183 read_retry:
3184 			/*
3185 			 * Now read the page into the buffer.  Absent an error,
3186 			 * the read methods return max bitflips per ecc step.
3187 			 */
3188 			if (unlikely(ops->mode == MTD_OPS_RAW))
3189 				ret = chip->ecc.read_page_raw(chip, bufpoi,
3190 							      oob_required,
3191 							      page);
3192 			else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3193 				 !oob)
3194 				ret = chip->ecc.read_subpage(chip, col, bytes,
3195 							     bufpoi, page);
3196 			else
3197 				ret = chip->ecc.read_page(chip, bufpoi,
3198 							  oob_required, page);
3199 			if (ret < 0) {
3200 				if (use_bufpoi)
3201 					/* Invalidate page cache */
3202 					chip->pagebuf = -1;
3203 				break;
3204 			}
3205 
3206 			/* Transfer not aligned data */
3207 			if (use_bufpoi) {
3208 				if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3209 				    !(mtd->ecc_stats.failed - ecc_failures) &&
3210 				    (ops->mode != MTD_OPS_RAW)) {
3211 					chip->pagebuf = realpage;
3212 					chip->pagebuf_bitflips = ret;
3213 				} else {
3214 					/* Invalidate page cache */
3215 					chip->pagebuf = -1;
3216 				}
3217 				memcpy(buf, chip->data_buf + col, bytes);
3218 			}
3219 
3220 			if (unlikely(oob)) {
3221 				int toread = min(oobreadlen, max_oobsize);
3222 
3223 				if (toread) {
3224 					oob = nand_transfer_oob(chip, oob, ops,
3225 								toread);
3226 					oobreadlen -= toread;
3227 				}
3228 			}
3229 
3230 			nand_wait_readrdy(chip);
3231 
3232 			if (mtd->ecc_stats.failed - ecc_failures) {
3233 				if (retry_mode + 1 < chip->read_retries) {
3234 					retry_mode++;
3235 					ret = nand_setup_read_retry(chip,
3236 							retry_mode);
3237 					if (ret < 0)
3238 						break;
3239 
3240 					/* Reset failures; retry */
3241 					mtd->ecc_stats.failed = ecc_failures;
3242 					goto read_retry;
3243 				} else {
3244 					/* No more retry modes; real failure */
3245 					ecc_fail = true;
3246 				}
3247 			}
3248 
3249 			buf += bytes;
3250 			max_bitflips = max_t(unsigned int, max_bitflips, ret);
3251 		} else {
3252 			memcpy(buf, chip->data_buf + col, bytes);
3253 			buf += bytes;
3254 			max_bitflips = max_t(unsigned int, max_bitflips,
3255 					     chip->pagebuf_bitflips);
3256 		}
3257 
3258 		readlen -= bytes;
3259 
3260 		/* Reset to retry mode 0 */
3261 		if (retry_mode) {
3262 			ret = nand_setup_read_retry(chip, 0);
3263 			if (ret < 0)
3264 				break;
3265 			retry_mode = 0;
3266 		}
3267 
3268 		if (!readlen)
3269 			break;
3270 
3271 		/* For subsequent reads align to page boundary */
3272 		col = 0;
3273 		/* Increment page address */
3274 		realpage++;
3275 
3276 		page = realpage & chip->pagemask;
3277 		/* Check, if we cross a chip boundary */
3278 		if (!page) {
3279 			chipnr++;
3280 			nand_deselect_target(chip);
3281 			nand_select_target(chip, chipnr);
3282 		}
3283 	}
3284 	nand_deselect_target(chip);
3285 
3286 	ops->retlen = ops->len - (size_t) readlen;
3287 	if (oob)
3288 		ops->oobretlen = ops->ooblen - oobreadlen;
3289 
3290 	if (ret < 0)
3291 		return ret;
3292 
3293 	if (ecc_fail)
3294 		return -EBADMSG;
3295 
3296 	return max_bitflips;
3297 }
3298 
3299 /**
3300  * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3301  * @chip: nand chip info structure
3302  * @page: page number to read
3303  */
3304 int nand_read_oob_std(struct nand_chip *chip, int page)
3305 {
3306 	struct mtd_info *mtd = nand_to_mtd(chip);
3307 
3308 	return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3309 }
3310 EXPORT_SYMBOL(nand_read_oob_std);
3311 
3312 /**
3313  * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3314  *			    with syndromes
3315  * @chip: nand chip info structure
3316  * @page: page number to read
3317  */
3318 static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3319 {
3320 	struct mtd_info *mtd = nand_to_mtd(chip);
3321 	int length = mtd->oobsize;
3322 	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3323 	int eccsize = chip->ecc.size;
3324 	uint8_t *bufpoi = chip->oob_poi;
3325 	int i, toread, sndrnd = 0, pos, ret;
3326 
3327 	ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3328 	if (ret)
3329 		return ret;
3330 
3331 	for (i = 0; i < chip->ecc.steps; i++) {
3332 		if (sndrnd) {
3333 			int ret;
3334 
3335 			pos = eccsize + i * (eccsize + chunk);
3336 			if (mtd->writesize > 512)
3337 				ret = nand_change_read_column_op(chip, pos,
3338 								 NULL, 0,
3339 								 false);
3340 			else
3341 				ret = nand_read_page_op(chip, page, pos, NULL,
3342 							0);
3343 
3344 			if (ret)
3345 				return ret;
3346 		} else
3347 			sndrnd = 1;
3348 		toread = min_t(int, length, chunk);
3349 
3350 		ret = nand_read_data_op(chip, bufpoi, toread, false);
3351 		if (ret)
3352 			return ret;
3353 
3354 		bufpoi += toread;
3355 		length -= toread;
3356 	}
3357 	if (length > 0) {
3358 		ret = nand_read_data_op(chip, bufpoi, length, false);
3359 		if (ret)
3360 			return ret;
3361 	}
3362 
3363 	return 0;
3364 }
3365 
3366 /**
3367  * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3368  * @chip: nand chip info structure
3369  * @page: page number to write
3370  */
3371 int nand_write_oob_std(struct nand_chip *chip, int page)
3372 {
3373 	struct mtd_info *mtd = nand_to_mtd(chip);
3374 
3375 	return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3376 				 mtd->oobsize);
3377 }
3378 EXPORT_SYMBOL(nand_write_oob_std);
3379 
3380 /**
3381  * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3382  *			     with syndrome - only for large page flash
3383  * @chip: nand chip info structure
3384  * @page: page number to write
3385  */
3386 static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3387 {
3388 	struct mtd_info *mtd = nand_to_mtd(chip);
3389 	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3390 	int eccsize = chip->ecc.size, length = mtd->oobsize;
3391 	int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3392 	const uint8_t *bufpoi = chip->oob_poi;
3393 
3394 	/*
3395 	 * data-ecc-data-ecc ... ecc-oob
3396 	 * or
3397 	 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3398 	 */
3399 	if (!chip->ecc.prepad && !chip->ecc.postpad) {
3400 		pos = steps * (eccsize + chunk);
3401 		steps = 0;
3402 	} else
3403 		pos = eccsize;
3404 
3405 	ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3406 	if (ret)
3407 		return ret;
3408 
3409 	for (i = 0; i < steps; i++) {
3410 		if (sndcmd) {
3411 			if (mtd->writesize <= 512) {
3412 				uint32_t fill = 0xFFFFFFFF;
3413 
3414 				len = eccsize;
3415 				while (len > 0) {
3416 					int num = min_t(int, len, 4);
3417 
3418 					ret = nand_write_data_op(chip, &fill,
3419 								 num, false);
3420 					if (ret)
3421 						return ret;
3422 
3423 					len -= num;
3424 				}
3425 			} else {
3426 				pos = eccsize + i * (eccsize + chunk);
3427 				ret = nand_change_write_column_op(chip, pos,
3428 								  NULL, 0,
3429 								  false);
3430 				if (ret)
3431 					return ret;
3432 			}
3433 		} else
3434 			sndcmd = 1;
3435 		len = min_t(int, length, chunk);
3436 
3437 		ret = nand_write_data_op(chip, bufpoi, len, false);
3438 		if (ret)
3439 			return ret;
3440 
3441 		bufpoi += len;
3442 		length -= len;
3443 	}
3444 	if (length > 0) {
3445 		ret = nand_write_data_op(chip, bufpoi, length, false);
3446 		if (ret)
3447 			return ret;
3448 	}
3449 
3450 	return nand_prog_page_end_op(chip);
3451 }
3452 
3453 /**
3454  * nand_do_read_oob - [INTERN] NAND read out-of-band
3455  * @chip: NAND chip object
3456  * @from: offset to read from
3457  * @ops: oob operations description structure
3458  *
3459  * NAND read out-of-band data from the spare area.
3460  */
3461 static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3462 			    struct mtd_oob_ops *ops)
3463 {
3464 	struct mtd_info *mtd = nand_to_mtd(chip);
3465 	unsigned int max_bitflips = 0;
3466 	int page, realpage, chipnr;
3467 	struct mtd_ecc_stats stats;
3468 	int readlen = ops->ooblen;
3469 	int len;
3470 	uint8_t *buf = ops->oobbuf;
3471 	int ret = 0;
3472 
3473 	pr_debug("%s: from = 0x%08Lx, len = %i\n",
3474 			__func__, (unsigned long long)from, readlen);
3475 
3476 	stats = mtd->ecc_stats;
3477 
3478 	len = mtd_oobavail(mtd, ops);
3479 
3480 	chipnr = (int)(from >> chip->chip_shift);
3481 	nand_select_target(chip, chipnr);
3482 
3483 	/* Shift to get page */
3484 	realpage = (int)(from >> chip->page_shift);
3485 	page = realpage & chip->pagemask;
3486 
3487 	while (1) {
3488 		if (ops->mode == MTD_OPS_RAW)
3489 			ret = chip->ecc.read_oob_raw(chip, page);
3490 		else
3491 			ret = chip->ecc.read_oob(chip, page);
3492 
3493 		if (ret < 0)
3494 			break;
3495 
3496 		len = min(len, readlen);
3497 		buf = nand_transfer_oob(chip, buf, ops, len);
3498 
3499 		nand_wait_readrdy(chip);
3500 
3501 		max_bitflips = max_t(unsigned int, max_bitflips, ret);
3502 
3503 		readlen -= len;
3504 		if (!readlen)
3505 			break;
3506 
3507 		/* Increment page address */
3508 		realpage++;
3509 
3510 		page = realpage & chip->pagemask;
3511 		/* Check, if we cross a chip boundary */
3512 		if (!page) {
3513 			chipnr++;
3514 			nand_deselect_target(chip);
3515 			nand_select_target(chip, chipnr);
3516 		}
3517 	}
3518 	nand_deselect_target(chip);
3519 
3520 	ops->oobretlen = ops->ooblen - readlen;
3521 
3522 	if (ret < 0)
3523 		return ret;
3524 
3525 	if (mtd->ecc_stats.failed - stats.failed)
3526 		return -EBADMSG;
3527 
3528 	return max_bitflips;
3529 }
3530 
3531 /**
3532  * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3533  * @mtd: MTD device structure
3534  * @from: offset to read from
3535  * @ops: oob operation description structure
3536  *
3537  * NAND read data and/or out-of-band data.
3538  */
3539 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3540 			 struct mtd_oob_ops *ops)
3541 {
3542 	struct nand_chip *chip = mtd_to_nand(mtd);
3543 	int ret;
3544 
3545 	ops->retlen = 0;
3546 
3547 	if (ops->mode != MTD_OPS_PLACE_OOB &&
3548 	    ops->mode != MTD_OPS_AUTO_OOB &&
3549 	    ops->mode != MTD_OPS_RAW)
3550 		return -ENOTSUPP;
3551 
3552 	ret = nand_get_device(chip);
3553 	if (ret)
3554 		return ret;
3555 
3556 	if (!ops->datbuf)
3557 		ret = nand_do_read_oob(chip, from, ops);
3558 	else
3559 		ret = nand_do_read_ops(chip, from, ops);
3560 
3561 	nand_release_device(chip);
3562 	return ret;
3563 }
3564 
3565 /**
3566  * nand_write_page_raw_notsupp - dummy raw page write function
3567  * @chip: nand chip info structure
3568  * @buf: data buffer
3569  * @oob_required: must write chip->oob_poi to OOB
3570  * @page: page number to write
3571  *
3572  * Returns -ENOTSUPP unconditionally.
3573  */
3574 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3575 				int oob_required, int page)
3576 {
3577 	return -ENOTSUPP;
3578 }
3579 
3580 /**
3581  * nand_write_page_raw - [INTERN] raw page write function
3582  * @chip: nand chip info structure
3583  * @buf: data buffer
3584  * @oob_required: must write chip->oob_poi to OOB
3585  * @page: page number to write
3586  *
3587  * Not for syndrome calculating ECC controllers, which use a special oob layout.
3588  */
3589 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3590 			int oob_required, int page)
3591 {
3592 	struct mtd_info *mtd = nand_to_mtd(chip);
3593 	int ret;
3594 
3595 	ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3596 	if (ret)
3597 		return ret;
3598 
3599 	if (oob_required) {
3600 		ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3601 					 false);
3602 		if (ret)
3603 			return ret;
3604 	}
3605 
3606 	return nand_prog_page_end_op(chip);
3607 }
3608 EXPORT_SYMBOL(nand_write_page_raw);
3609 
3610 /**
3611  * nand_write_page_raw_syndrome - [INTERN] raw page write function
3612  * @chip: nand chip info structure
3613  * @buf: data buffer
3614  * @oob_required: must write chip->oob_poi to OOB
3615  * @page: page number to write
3616  *
3617  * We need a special oob layout and handling even when ECC isn't checked.
3618  */
3619 static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3620 					const uint8_t *buf, int oob_required,
3621 					int page)
3622 {
3623 	struct mtd_info *mtd = nand_to_mtd(chip);
3624 	int eccsize = chip->ecc.size;
3625 	int eccbytes = chip->ecc.bytes;
3626 	uint8_t *oob = chip->oob_poi;
3627 	int steps, size, ret;
3628 
3629 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3630 	if (ret)
3631 		return ret;
3632 
3633 	for (steps = chip->ecc.steps; steps > 0; steps--) {
3634 		ret = nand_write_data_op(chip, buf, eccsize, false);
3635 		if (ret)
3636 			return ret;
3637 
3638 		buf += eccsize;
3639 
3640 		if (chip->ecc.prepad) {
3641 			ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3642 						 false);
3643 			if (ret)
3644 				return ret;
3645 
3646 			oob += chip->ecc.prepad;
3647 		}
3648 
3649 		ret = nand_write_data_op(chip, oob, eccbytes, false);
3650 		if (ret)
3651 			return ret;
3652 
3653 		oob += eccbytes;
3654 
3655 		if (chip->ecc.postpad) {
3656 			ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3657 						 false);
3658 			if (ret)
3659 				return ret;
3660 
3661 			oob += chip->ecc.postpad;
3662 		}
3663 	}
3664 
3665 	size = mtd->oobsize - (oob - chip->oob_poi);
3666 	if (size) {
3667 		ret = nand_write_data_op(chip, oob, size, false);
3668 		if (ret)
3669 			return ret;
3670 	}
3671 
3672 	return nand_prog_page_end_op(chip);
3673 }
3674 /**
3675  * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
3676  * @chip: nand chip info structure
3677  * @buf: data buffer
3678  * @oob_required: must write chip->oob_poi to OOB
3679  * @page: page number to write
3680  */
3681 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3682 				 int oob_required, int page)
3683 {
3684 	struct mtd_info *mtd = nand_to_mtd(chip);
3685 	int i, eccsize = chip->ecc.size, ret;
3686 	int eccbytes = chip->ecc.bytes;
3687 	int eccsteps = chip->ecc.steps;
3688 	uint8_t *ecc_calc = chip->ecc.calc_buf;
3689 	const uint8_t *p = buf;
3690 
3691 	/* Software ECC calculation */
3692 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3693 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
3694 
3695 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3696 					 chip->ecc.total);
3697 	if (ret)
3698 		return ret;
3699 
3700 	return chip->ecc.write_page_raw(chip, buf, 1, page);
3701 }
3702 
3703 /**
3704  * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
3705  * @chip: nand chip info structure
3706  * @buf: data buffer
3707  * @oob_required: must write chip->oob_poi to OOB
3708  * @page: page number to write
3709  */
3710 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
3711 				 int oob_required, int page)
3712 {
3713 	struct mtd_info *mtd = nand_to_mtd(chip);
3714 	int i, eccsize = chip->ecc.size, ret;
3715 	int eccbytes = chip->ecc.bytes;
3716 	int eccsteps = chip->ecc.steps;
3717 	uint8_t *ecc_calc = chip->ecc.calc_buf;
3718 	const uint8_t *p = buf;
3719 
3720 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3721 	if (ret)
3722 		return ret;
3723 
3724 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3725 		chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3726 
3727 		ret = nand_write_data_op(chip, p, eccsize, false);
3728 		if (ret)
3729 			return ret;
3730 
3731 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
3732 	}
3733 
3734 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3735 					 chip->ecc.total);
3736 	if (ret)
3737 		return ret;
3738 
3739 	ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3740 	if (ret)
3741 		return ret;
3742 
3743 	return nand_prog_page_end_op(chip);
3744 }
3745 
3746 
3747 /**
3748  * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
3749  * @chip:	nand chip info structure
3750  * @offset:	column address of subpage within the page
3751  * @data_len:	data length
3752  * @buf:	data buffer
3753  * @oob_required: must write chip->oob_poi to OOB
3754  * @page: page number to write
3755  */
3756 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
3757 				    uint32_t data_len, const uint8_t *buf,
3758 				    int oob_required, int page)
3759 {
3760 	struct mtd_info *mtd = nand_to_mtd(chip);
3761 	uint8_t *oob_buf  = chip->oob_poi;
3762 	uint8_t *ecc_calc = chip->ecc.calc_buf;
3763 	int ecc_size      = chip->ecc.size;
3764 	int ecc_bytes     = chip->ecc.bytes;
3765 	int ecc_steps     = chip->ecc.steps;
3766 	uint32_t start_step = offset / ecc_size;
3767 	uint32_t end_step   = (offset + data_len - 1) / ecc_size;
3768 	int oob_bytes       = mtd->oobsize / ecc_steps;
3769 	int step, ret;
3770 
3771 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3772 	if (ret)
3773 		return ret;
3774 
3775 	for (step = 0; step < ecc_steps; step++) {
3776 		/* configure controller for WRITE access */
3777 		chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3778 
3779 		/* write data (untouched subpages already masked by 0xFF) */
3780 		ret = nand_write_data_op(chip, buf, ecc_size, false);
3781 		if (ret)
3782 			return ret;
3783 
3784 		/* mask ECC of un-touched subpages by padding 0xFF */
3785 		if ((step < start_step) || (step > end_step))
3786 			memset(ecc_calc, 0xff, ecc_bytes);
3787 		else
3788 			chip->ecc.calculate(chip, buf, ecc_calc);
3789 
3790 		/* mask OOB of un-touched subpages by padding 0xFF */
3791 		/* if oob_required, preserve OOB metadata of written subpage */
3792 		if (!oob_required || (step < start_step) || (step > end_step))
3793 			memset(oob_buf, 0xff, oob_bytes);
3794 
3795 		buf += ecc_size;
3796 		ecc_calc += ecc_bytes;
3797 		oob_buf  += oob_bytes;
3798 	}
3799 
3800 	/* copy calculated ECC for whole page to chip->buffer->oob */
3801 	/* this include masked-value(0xFF) for unwritten subpages */
3802 	ecc_calc = chip->ecc.calc_buf;
3803 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3804 					 chip->ecc.total);
3805 	if (ret)
3806 		return ret;
3807 
3808 	/* write OOB buffer to NAND device */
3809 	ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3810 	if (ret)
3811 		return ret;
3812 
3813 	return nand_prog_page_end_op(chip);
3814 }
3815 
3816 
3817 /**
3818  * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
3819  * @chip: nand chip info structure
3820  * @buf: data buffer
3821  * @oob_required: must write chip->oob_poi to OOB
3822  * @page: page number to write
3823  *
3824  * The hw generator calculates the error syndrome automatically. Therefore we
3825  * need a special oob layout and handling.
3826  */
3827 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
3828 				    int oob_required, int page)
3829 {
3830 	struct mtd_info *mtd = nand_to_mtd(chip);
3831 	int i, eccsize = chip->ecc.size;
3832 	int eccbytes = chip->ecc.bytes;
3833 	int eccsteps = chip->ecc.steps;
3834 	const uint8_t *p = buf;
3835 	uint8_t *oob = chip->oob_poi;
3836 	int ret;
3837 
3838 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3839 	if (ret)
3840 		return ret;
3841 
3842 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3843 		chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3844 
3845 		ret = nand_write_data_op(chip, p, eccsize, false);
3846 		if (ret)
3847 			return ret;
3848 
3849 		if (chip->ecc.prepad) {
3850 			ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3851 						 false);
3852 			if (ret)
3853 				return ret;
3854 
3855 			oob += chip->ecc.prepad;
3856 		}
3857 
3858 		chip->ecc.calculate(chip, p, oob);
3859 
3860 		ret = nand_write_data_op(chip, oob, eccbytes, false);
3861 		if (ret)
3862 			return ret;
3863 
3864 		oob += eccbytes;
3865 
3866 		if (chip->ecc.postpad) {
3867 			ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3868 						 false);
3869 			if (ret)
3870 				return ret;
3871 
3872 			oob += chip->ecc.postpad;
3873 		}
3874 	}
3875 
3876 	/* Calculate remaining oob bytes */
3877 	i = mtd->oobsize - (oob - chip->oob_poi);
3878 	if (i) {
3879 		ret = nand_write_data_op(chip, oob, i, false);
3880 		if (ret)
3881 			return ret;
3882 	}
3883 
3884 	return nand_prog_page_end_op(chip);
3885 }
3886 
3887 /**
3888  * nand_write_page - write one page
3889  * @chip: NAND chip descriptor
3890  * @offset: address offset within the page
3891  * @data_len: length of actual data to be written
3892  * @buf: the data to write
3893  * @oob_required: must write chip->oob_poi to OOB
3894  * @page: page number to write
3895  * @raw: use _raw version of write_page
3896  */
3897 static int nand_write_page(struct nand_chip *chip, uint32_t offset,
3898 			   int data_len, const uint8_t *buf, int oob_required,
3899 			   int page, int raw)
3900 {
3901 	struct mtd_info *mtd = nand_to_mtd(chip);
3902 	int status, subpage;
3903 
3904 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3905 		chip->ecc.write_subpage)
3906 		subpage = offset || (data_len < mtd->writesize);
3907 	else
3908 		subpage = 0;
3909 
3910 	if (unlikely(raw))
3911 		status = chip->ecc.write_page_raw(chip, buf, oob_required,
3912 						  page);
3913 	else if (subpage)
3914 		status = chip->ecc.write_subpage(chip, offset, data_len, buf,
3915 						 oob_required, page);
3916 	else
3917 		status = chip->ecc.write_page(chip, buf, oob_required, page);
3918 
3919 	if (status < 0)
3920 		return status;
3921 
3922 	return 0;
3923 }
3924 
3925 #define NOTALIGNED(x)	((x & (chip->subpagesize - 1)) != 0)
3926 
3927 /**
3928  * nand_do_write_ops - [INTERN] NAND write with ECC
3929  * @chip: NAND chip object
3930  * @to: offset to write to
3931  * @ops: oob operations description structure
3932  *
3933  * NAND write with ECC.
3934  */
3935 static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
3936 			     struct mtd_oob_ops *ops)
3937 {
3938 	struct mtd_info *mtd = nand_to_mtd(chip);
3939 	int chipnr, realpage, page, column;
3940 	uint32_t writelen = ops->len;
3941 
3942 	uint32_t oobwritelen = ops->ooblen;
3943 	uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
3944 
3945 	uint8_t *oob = ops->oobbuf;
3946 	uint8_t *buf = ops->datbuf;
3947 	int ret;
3948 	int oob_required = oob ? 1 : 0;
3949 
3950 	ops->retlen = 0;
3951 	if (!writelen)
3952 		return 0;
3953 
3954 	/* Reject writes, which are not page aligned */
3955 	if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
3956 		pr_notice("%s: attempt to write non page aligned data\n",
3957 			   __func__);
3958 		return -EINVAL;
3959 	}
3960 
3961 	column = to & (mtd->writesize - 1);
3962 
3963 	chipnr = (int)(to >> chip->chip_shift);
3964 	nand_select_target(chip, chipnr);
3965 
3966 	/* Check, if it is write protected */
3967 	if (nand_check_wp(chip)) {
3968 		ret = -EIO;
3969 		goto err_out;
3970 	}
3971 
3972 	realpage = (int)(to >> chip->page_shift);
3973 	page = realpage & chip->pagemask;
3974 
3975 	/* Invalidate the page cache, when we write to the cached page */
3976 	if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
3977 	    ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
3978 		chip->pagebuf = -1;
3979 
3980 	/* Don't allow multipage oob writes with offset */
3981 	if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
3982 		ret = -EINVAL;
3983 		goto err_out;
3984 	}
3985 
3986 	while (1) {
3987 		int bytes = mtd->writesize;
3988 		uint8_t *wbuf = buf;
3989 		int use_bufpoi;
3990 		int part_pagewr = (column || writelen < mtd->writesize);
3991 
3992 		if (part_pagewr)
3993 			use_bufpoi = 1;
3994 		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
3995 			use_bufpoi = !virt_addr_valid(buf) ||
3996 				     !IS_ALIGNED((unsigned long)buf,
3997 						 chip->buf_align);
3998 		else
3999 			use_bufpoi = 0;
4000 
4001 		/* Partial page write?, or need to use bounce buffer */
4002 		if (use_bufpoi) {
4003 			pr_debug("%s: using write bounce buffer for buf@%p\n",
4004 					 __func__, buf);
4005 			if (part_pagewr)
4006 				bytes = min_t(int, bytes - column, writelen);
4007 			chip->pagebuf = -1;
4008 			memset(chip->data_buf, 0xff, mtd->writesize);
4009 			memcpy(&chip->data_buf[column], buf, bytes);
4010 			wbuf = chip->data_buf;
4011 		}
4012 
4013 		if (unlikely(oob)) {
4014 			size_t len = min(oobwritelen, oobmaxlen);
4015 			oob = nand_fill_oob(chip, oob, len, ops);
4016 			oobwritelen -= len;
4017 		} else {
4018 			/* We still need to erase leftover OOB data */
4019 			memset(chip->oob_poi, 0xff, mtd->oobsize);
4020 		}
4021 
4022 		ret = nand_write_page(chip, column, bytes, wbuf,
4023 				      oob_required, page,
4024 				      (ops->mode == MTD_OPS_RAW));
4025 		if (ret)
4026 			break;
4027 
4028 		writelen -= bytes;
4029 		if (!writelen)
4030 			break;
4031 
4032 		column = 0;
4033 		buf += bytes;
4034 		realpage++;
4035 
4036 		page = realpage & chip->pagemask;
4037 		/* Check, if we cross a chip boundary */
4038 		if (!page) {
4039 			chipnr++;
4040 			nand_deselect_target(chip);
4041 			nand_select_target(chip, chipnr);
4042 		}
4043 	}
4044 
4045 	ops->retlen = ops->len - writelen;
4046 	if (unlikely(oob))
4047 		ops->oobretlen = ops->ooblen;
4048 
4049 err_out:
4050 	nand_deselect_target(chip);
4051 	return ret;
4052 }
4053 
4054 /**
4055  * panic_nand_write - [MTD Interface] NAND write with ECC
4056  * @mtd: MTD device structure
4057  * @to: offset to write to
4058  * @len: number of bytes to write
4059  * @retlen: pointer to variable to store the number of written bytes
4060  * @buf: the data to write
4061  *
4062  * NAND write with ECC. Used when performing writes in interrupt context, this
4063  * may for example be called by mtdoops when writing an oops while in panic.
4064  */
4065 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4066 			    size_t *retlen, const uint8_t *buf)
4067 {
4068 	struct nand_chip *chip = mtd_to_nand(mtd);
4069 	int chipnr = (int)(to >> chip->chip_shift);
4070 	struct mtd_oob_ops ops;
4071 	int ret;
4072 
4073 	nand_select_target(chip, chipnr);
4074 
4075 	/* Wait for the device to get ready */
4076 	panic_nand_wait(chip, 400);
4077 
4078 	memset(&ops, 0, sizeof(ops));
4079 	ops.len = len;
4080 	ops.datbuf = (uint8_t *)buf;
4081 	ops.mode = MTD_OPS_PLACE_OOB;
4082 
4083 	ret = nand_do_write_ops(chip, to, &ops);
4084 
4085 	*retlen = ops.retlen;
4086 	return ret;
4087 }
4088 
4089 /**
4090  * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4091  * @mtd: MTD device structure
4092  * @to: offset to write to
4093  * @ops: oob operation description structure
4094  */
4095 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4096 			  struct mtd_oob_ops *ops)
4097 {
4098 	struct nand_chip *chip = mtd_to_nand(mtd);
4099 	int ret = -ENOTSUPP;
4100 
4101 	ops->retlen = 0;
4102 
4103 	ret = nand_get_device(chip);
4104 	if (ret)
4105 		return ret;
4106 
4107 	switch (ops->mode) {
4108 	case MTD_OPS_PLACE_OOB:
4109 	case MTD_OPS_AUTO_OOB:
4110 	case MTD_OPS_RAW:
4111 		break;
4112 
4113 	default:
4114 		goto out;
4115 	}
4116 
4117 	if (!ops->datbuf)
4118 		ret = nand_do_write_oob(chip, to, ops);
4119 	else
4120 		ret = nand_do_write_ops(chip, to, ops);
4121 
4122 out:
4123 	nand_release_device(chip);
4124 	return ret;
4125 }
4126 
4127 /**
4128  * nand_erase - [MTD Interface] erase block(s)
4129  * @mtd: MTD device structure
4130  * @instr: erase instruction
4131  *
4132  * Erase one ore more blocks.
4133  */
4134 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4135 {
4136 	return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4137 }
4138 
4139 /**
4140  * nand_erase_nand - [INTERN] erase block(s)
4141  * @chip: NAND chip object
4142  * @instr: erase instruction
4143  * @allowbbt: allow erasing the bbt area
4144  *
4145  * Erase one ore more blocks.
4146  */
4147 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4148 		    int allowbbt)
4149 {
4150 	int page, pages_per_block, ret, chipnr;
4151 	loff_t len;
4152 
4153 	pr_debug("%s: start = 0x%012llx, len = %llu\n",
4154 			__func__, (unsigned long long)instr->addr,
4155 			(unsigned long long)instr->len);
4156 
4157 	if (check_offs_len(chip, instr->addr, instr->len))
4158 		return -EINVAL;
4159 
4160 	/* Grab the lock and see if the device is available */
4161 	ret = nand_get_device(chip);
4162 	if (ret)
4163 		return ret;
4164 
4165 	/* Shift to get first page */
4166 	page = (int)(instr->addr >> chip->page_shift);
4167 	chipnr = (int)(instr->addr >> chip->chip_shift);
4168 
4169 	/* Calculate pages in each block */
4170 	pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4171 
4172 	/* Select the NAND device */
4173 	nand_select_target(chip, chipnr);
4174 
4175 	/* Check, if it is write protected */
4176 	if (nand_check_wp(chip)) {
4177 		pr_debug("%s: device is write protected!\n",
4178 				__func__);
4179 		ret = -EIO;
4180 		goto erase_exit;
4181 	}
4182 
4183 	/* Loop through the pages */
4184 	len = instr->len;
4185 
4186 	while (len) {
4187 		/* Check if we have a bad block, we do not erase bad blocks! */
4188 		if (nand_block_checkbad(chip, ((loff_t) page) <<
4189 					chip->page_shift, allowbbt)) {
4190 			pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4191 				    __func__, page);
4192 			ret = -EIO;
4193 			goto erase_exit;
4194 		}
4195 
4196 		/*
4197 		 * Invalidate the page cache, if we erase the block which
4198 		 * contains the current cached page.
4199 		 */
4200 		if (page <= chip->pagebuf && chip->pagebuf <
4201 		    (page + pages_per_block))
4202 			chip->pagebuf = -1;
4203 
4204 		ret = nand_erase_op(chip, (page & chip->pagemask) >>
4205 				    (chip->phys_erase_shift - chip->page_shift));
4206 		if (ret) {
4207 			pr_debug("%s: failed erase, page 0x%08x\n",
4208 					__func__, page);
4209 			instr->fail_addr =
4210 				((loff_t)page << chip->page_shift);
4211 			goto erase_exit;
4212 		}
4213 
4214 		/* Increment page address and decrement length */
4215 		len -= (1ULL << chip->phys_erase_shift);
4216 		page += pages_per_block;
4217 
4218 		/* Check, if we cross a chip boundary */
4219 		if (len && !(page & chip->pagemask)) {
4220 			chipnr++;
4221 			nand_deselect_target(chip);
4222 			nand_select_target(chip, chipnr);
4223 		}
4224 	}
4225 
4226 	ret = 0;
4227 erase_exit:
4228 
4229 	/* Deselect and wake up anyone waiting on the device */
4230 	nand_deselect_target(chip);
4231 	nand_release_device(chip);
4232 
4233 	/* Return more or less happy */
4234 	return ret;
4235 }
4236 
4237 /**
4238  * nand_sync - [MTD Interface] sync
4239  * @mtd: MTD device structure
4240  *
4241  * Sync is actually a wait for chip ready function.
4242  */
4243 static void nand_sync(struct mtd_info *mtd)
4244 {
4245 	struct nand_chip *chip = mtd_to_nand(mtd);
4246 
4247 	pr_debug("%s: called\n", __func__);
4248 
4249 	/* Grab the lock and see if the device is available */
4250 	WARN_ON(nand_get_device(chip));
4251 	/* Release it and go back */
4252 	nand_release_device(chip);
4253 }
4254 
4255 /**
4256  * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4257  * @mtd: MTD device structure
4258  * @offs: offset relative to mtd start
4259  */
4260 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4261 {
4262 	struct nand_chip *chip = mtd_to_nand(mtd);
4263 	int chipnr = (int)(offs >> chip->chip_shift);
4264 	int ret;
4265 
4266 	/* Select the NAND device */
4267 	ret = nand_get_device(chip);
4268 	if (ret)
4269 		return ret;
4270 
4271 	nand_select_target(chip, chipnr);
4272 
4273 	ret = nand_block_checkbad(chip, offs, 0);
4274 
4275 	nand_deselect_target(chip);
4276 	nand_release_device(chip);
4277 
4278 	return ret;
4279 }
4280 
4281 /**
4282  * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4283  * @mtd: MTD device structure
4284  * @ofs: offset relative to mtd start
4285  */
4286 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4287 {
4288 	int ret;
4289 
4290 	ret = nand_block_isbad(mtd, ofs);
4291 	if (ret) {
4292 		/* If it was bad already, return success and do nothing */
4293 		if (ret > 0)
4294 			return 0;
4295 		return ret;
4296 	}
4297 
4298 	return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4299 }
4300 
4301 /**
4302  * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
4303  * @mtd: MTD device structure
4304  * @ofs: offset relative to mtd start
4305  * @len: length of mtd
4306  */
4307 static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
4308 {
4309 	struct nand_chip *chip = mtd_to_nand(mtd);
4310 	u32 part_start_block;
4311 	u32 part_end_block;
4312 	u32 part_start_die;
4313 	u32 part_end_die;
4314 
4315 	/*
4316 	 * max_bb_per_die and blocks_per_die used to determine
4317 	 * the maximum bad block count.
4318 	 */
4319 	if (!chip->max_bb_per_die || !chip->blocks_per_die)
4320 		return -ENOTSUPP;
4321 
4322 	/* Get the start and end of the partition in erase blocks. */
4323 	part_start_block = mtd_div_by_eb(ofs, mtd);
4324 	part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
4325 
4326 	/* Get the start and end LUNs of the partition. */
4327 	part_start_die = part_start_block / chip->blocks_per_die;
4328 	part_end_die = part_end_block / chip->blocks_per_die;
4329 
4330 	/*
4331 	 * Look up the bad blocks per unit and multiply by the number of units
4332 	 * that the partition spans.
4333 	 */
4334 	return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
4335 }
4336 
4337 /**
4338  * nand_suspend - [MTD Interface] Suspend the NAND flash
4339  * @mtd: MTD device structure
4340  */
4341 static int nand_suspend(struct mtd_info *mtd)
4342 {
4343 	struct nand_chip *chip = mtd_to_nand(mtd);
4344 
4345 	mutex_lock(&chip->lock);
4346 	chip->suspended = 1;
4347 	mutex_unlock(&chip->lock);
4348 
4349 	return 0;
4350 }
4351 
4352 /**
4353  * nand_resume - [MTD Interface] Resume the NAND flash
4354  * @mtd: MTD device structure
4355  */
4356 static void nand_resume(struct mtd_info *mtd)
4357 {
4358 	struct nand_chip *chip = mtd_to_nand(mtd);
4359 
4360 	mutex_lock(&chip->lock);
4361 	if (chip->suspended)
4362 		chip->suspended = 0;
4363 	else
4364 		pr_err("%s called for a chip which is not in suspended state\n",
4365 			__func__);
4366 	mutex_unlock(&chip->lock);
4367 }
4368 
4369 /**
4370  * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4371  *                 prevent further operations
4372  * @mtd: MTD device structure
4373  */
4374 static void nand_shutdown(struct mtd_info *mtd)
4375 {
4376 	nand_suspend(mtd);
4377 }
4378 
4379 /* Set default functions */
4380 static void nand_set_defaults(struct nand_chip *chip)
4381 {
4382 	/* If no controller is provided, use the dummy, legacy one. */
4383 	if (!chip->controller) {
4384 		chip->controller = &chip->legacy.dummy_controller;
4385 		nand_controller_init(chip->controller);
4386 	}
4387 
4388 	nand_legacy_set_defaults(chip);
4389 
4390 	if (!chip->buf_align)
4391 		chip->buf_align = 1;
4392 }
4393 
4394 /* Sanitize ONFI strings so we can safely print them */
4395 void sanitize_string(uint8_t *s, size_t len)
4396 {
4397 	ssize_t i;
4398 
4399 	/* Null terminate */
4400 	s[len - 1] = 0;
4401 
4402 	/* Remove non printable chars */
4403 	for (i = 0; i < len - 1; i++) {
4404 		if (s[i] < ' ' || s[i] > 127)
4405 			s[i] = '?';
4406 	}
4407 
4408 	/* Remove trailing spaces */
4409 	strim(s);
4410 }
4411 
4412 /*
4413  * nand_id_has_period - Check if an ID string has a given wraparound period
4414  * @id_data: the ID string
4415  * @arrlen: the length of the @id_data array
4416  * @period: the period of repitition
4417  *
4418  * Check if an ID string is repeated within a given sequence of bytes at
4419  * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
4420  * period of 3). This is a helper function for nand_id_len(). Returns non-zero
4421  * if the repetition has a period of @period; otherwise, returns zero.
4422  */
4423 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4424 {
4425 	int i, j;
4426 	for (i = 0; i < period; i++)
4427 		for (j = i + period; j < arrlen; j += period)
4428 			if (id_data[i] != id_data[j])
4429 				return 0;
4430 	return 1;
4431 }
4432 
4433 /*
4434  * nand_id_len - Get the length of an ID string returned by CMD_READID
4435  * @id_data: the ID string
4436  * @arrlen: the length of the @id_data array
4437 
4438  * Returns the length of the ID string, according to known wraparound/trailing
4439  * zero patterns. If no pattern exists, returns the length of the array.
4440  */
4441 static int nand_id_len(u8 *id_data, int arrlen)
4442 {
4443 	int last_nonzero, period;
4444 
4445 	/* Find last non-zero byte */
4446 	for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4447 		if (id_data[last_nonzero])
4448 			break;
4449 
4450 	/* All zeros */
4451 	if (last_nonzero < 0)
4452 		return 0;
4453 
4454 	/* Calculate wraparound period */
4455 	for (period = 1; period < arrlen; period++)
4456 		if (nand_id_has_period(id_data, arrlen, period))
4457 			break;
4458 
4459 	/* There's a repeated pattern */
4460 	if (period < arrlen)
4461 		return period;
4462 
4463 	/* There are trailing zeros */
4464 	if (last_nonzero < arrlen - 1)
4465 		return last_nonzero + 1;
4466 
4467 	/* No pattern detected */
4468 	return arrlen;
4469 }
4470 
4471 /* Extract the bits of per cell from the 3rd byte of the extended ID */
4472 static int nand_get_bits_per_cell(u8 cellinfo)
4473 {
4474 	int bits;
4475 
4476 	bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4477 	bits >>= NAND_CI_CELLTYPE_SHIFT;
4478 	return bits + 1;
4479 }
4480 
4481 /*
4482  * Many new NAND share similar device ID codes, which represent the size of the
4483  * chip. The rest of the parameters must be decoded according to generic or
4484  * manufacturer-specific "extended ID" decoding patterns.
4485  */
4486 void nand_decode_ext_id(struct nand_chip *chip)
4487 {
4488 	struct mtd_info *mtd = nand_to_mtd(chip);
4489 	int extid;
4490 	u8 *id_data = chip->id.data;
4491 	/* The 3rd id byte holds MLC / multichip data */
4492 	chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4493 	/* The 4th id byte is the important one */
4494 	extid = id_data[3];
4495 
4496 	/* Calc pagesize */
4497 	mtd->writesize = 1024 << (extid & 0x03);
4498 	extid >>= 2;
4499 	/* Calc oobsize */
4500 	mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4501 	extid >>= 2;
4502 	/* Calc blocksize. Blocksize is multiples of 64KiB */
4503 	mtd->erasesize = (64 * 1024) << (extid & 0x03);
4504 	extid >>= 2;
4505 	/* Get buswidth information */
4506 	if (extid & 0x1)
4507 		chip->options |= NAND_BUSWIDTH_16;
4508 }
4509 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4510 
4511 /*
4512  * Old devices have chip data hardcoded in the device ID table. nand_decode_id
4513  * decodes a matching ID table entry and assigns the MTD size parameters for
4514  * the chip.
4515  */
4516 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4517 {
4518 	struct mtd_info *mtd = nand_to_mtd(chip);
4519 
4520 	mtd->erasesize = type->erasesize;
4521 	mtd->writesize = type->pagesize;
4522 	mtd->oobsize = mtd->writesize / 32;
4523 
4524 	/* All legacy ID NAND are small-page, SLC */
4525 	chip->bits_per_cell = 1;
4526 }
4527 
4528 /*
4529  * Set the bad block marker/indicator (BBM/BBI) patterns according to some
4530  * heuristic patterns using various detected parameters (e.g., manufacturer,
4531  * page size, cell-type information).
4532  */
4533 static void nand_decode_bbm_options(struct nand_chip *chip)
4534 {
4535 	struct mtd_info *mtd = nand_to_mtd(chip);
4536 
4537 	/* Set the bad block position */
4538 	if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4539 		chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
4540 	else
4541 		chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
4542 }
4543 
4544 static inline bool is_full_id_nand(struct nand_flash_dev *type)
4545 {
4546 	return type->id_len;
4547 }
4548 
4549 static bool find_full_id_nand(struct nand_chip *chip,
4550 			      struct nand_flash_dev *type)
4551 {
4552 	struct mtd_info *mtd = nand_to_mtd(chip);
4553 	u8 *id_data = chip->id.data;
4554 
4555 	if (!strncmp(type->id, id_data, type->id_len)) {
4556 		mtd->writesize = type->pagesize;
4557 		mtd->erasesize = type->erasesize;
4558 		mtd->oobsize = type->oobsize;
4559 
4560 		chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4561 		chip->chipsize = (uint64_t)type->chipsize << 20;
4562 		chip->options |= type->options;
4563 		chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
4564 		chip->ecc_step_ds = NAND_ECC_STEP(type);
4565 		chip->onfi_timing_mode_default =
4566 					type->onfi_timing_mode_default;
4567 
4568 		chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4569 		if (!chip->parameters.model)
4570 			return false;
4571 
4572 		return true;
4573 	}
4574 	return false;
4575 }
4576 
4577 /*
4578  * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
4579  * compliant and does not have a full-id or legacy-id entry in the nand_ids
4580  * table.
4581  */
4582 static void nand_manufacturer_detect(struct nand_chip *chip)
4583 {
4584 	/*
4585 	 * Try manufacturer detection if available and use
4586 	 * nand_decode_ext_id() otherwise.
4587 	 */
4588 	if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4589 	    chip->manufacturer.desc->ops->detect) {
4590 		/* The 3rd id byte holds MLC / multichip data */
4591 		chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4592 		chip->manufacturer.desc->ops->detect(chip);
4593 	} else {
4594 		nand_decode_ext_id(chip);
4595 	}
4596 }
4597 
4598 /*
4599  * Manufacturer initialization. This function is called for all NANDs including
4600  * ONFI and JEDEC compliant ones.
4601  * Manufacturer drivers should put all their specific initialization code in
4602  * their ->init() hook.
4603  */
4604 static int nand_manufacturer_init(struct nand_chip *chip)
4605 {
4606 	if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4607 	    !chip->manufacturer.desc->ops->init)
4608 		return 0;
4609 
4610 	return chip->manufacturer.desc->ops->init(chip);
4611 }
4612 
4613 /*
4614  * Manufacturer cleanup. This function is called for all NANDs including
4615  * ONFI and JEDEC compliant ones.
4616  * Manufacturer drivers should put all their specific cleanup code in their
4617  * ->cleanup() hook.
4618  */
4619 static void nand_manufacturer_cleanup(struct nand_chip *chip)
4620 {
4621 	/* Release manufacturer private data */
4622 	if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4623 	    chip->manufacturer.desc->ops->cleanup)
4624 		chip->manufacturer.desc->ops->cleanup(chip);
4625 }
4626 
4627 static const char *
4628 nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
4629 {
4630 	return manufacturer ? manufacturer->name : "Unknown";
4631 }
4632 
4633 /*
4634  * Get the flash and manufacturer id and lookup if the type is supported.
4635  */
4636 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4637 {
4638 	const struct nand_manufacturer *manufacturer;
4639 	struct mtd_info *mtd = nand_to_mtd(chip);
4640 	int busw, ret;
4641 	u8 *id_data = chip->id.data;
4642 	u8 maf_id, dev_id;
4643 
4644 	/*
4645 	 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4646 	 * after power-up.
4647 	 */
4648 	ret = nand_reset(chip, 0);
4649 	if (ret)
4650 		return ret;
4651 
4652 	/* Select the device */
4653 	nand_select_target(chip, 0);
4654 
4655 	/* Send the command for reading device ID */
4656 	ret = nand_readid_op(chip, 0, id_data, 2);
4657 	if (ret)
4658 		return ret;
4659 
4660 	/* Read manufacturer and device IDs */
4661 	maf_id = id_data[0];
4662 	dev_id = id_data[1];
4663 
4664 	/*
4665 	 * Try again to make sure, as some systems the bus-hold or other
4666 	 * interface concerns can cause random data which looks like a
4667 	 * possibly credible NAND flash to appear. If the two results do
4668 	 * not match, ignore the device completely.
4669 	 */
4670 
4671 	/* Read entire ID string */
4672 	ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
4673 	if (ret)
4674 		return ret;
4675 
4676 	if (id_data[0] != maf_id || id_data[1] != dev_id) {
4677 		pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4678 			maf_id, dev_id, id_data[0], id_data[1]);
4679 		return -ENODEV;
4680 	}
4681 
4682 	chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
4683 
4684 	/* Try to identify manufacturer */
4685 	manufacturer = nand_get_manufacturer(maf_id);
4686 	chip->manufacturer.desc = manufacturer;
4687 
4688 	if (!type)
4689 		type = nand_flash_ids;
4690 
4691 	/*
4692 	 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
4693 	 * override it.
4694 	 * This is required to make sure initial NAND bus width set by the
4695 	 * NAND controller driver is coherent with the real NAND bus width
4696 	 * (extracted by auto-detection code).
4697 	 */
4698 	busw = chip->options & NAND_BUSWIDTH_16;
4699 
4700 	/*
4701 	 * The flag is only set (never cleared), reset it to its default value
4702 	 * before starting auto-detection.
4703 	 */
4704 	chip->options &= ~NAND_BUSWIDTH_16;
4705 
4706 	for (; type->name != NULL; type++) {
4707 		if (is_full_id_nand(type)) {
4708 			if (find_full_id_nand(chip, type))
4709 				goto ident_done;
4710 		} else if (dev_id == type->dev_id) {
4711 			break;
4712 		}
4713 	}
4714 
4715 	if (!type->name || !type->pagesize) {
4716 		/* Check if the chip is ONFI compliant */
4717 		ret = nand_onfi_detect(chip);
4718 		if (ret < 0)
4719 			return ret;
4720 		else if (ret)
4721 			goto ident_done;
4722 
4723 		/* Check if the chip is JEDEC compliant */
4724 		ret = nand_jedec_detect(chip);
4725 		if (ret < 0)
4726 			return ret;
4727 		else if (ret)
4728 			goto ident_done;
4729 	}
4730 
4731 	if (!type->name)
4732 		return -ENODEV;
4733 
4734 	chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4735 	if (!chip->parameters.model)
4736 		return -ENOMEM;
4737 
4738 	chip->chipsize = (uint64_t)type->chipsize << 20;
4739 
4740 	if (!type->pagesize)
4741 		nand_manufacturer_detect(chip);
4742 	else
4743 		nand_decode_id(chip, type);
4744 
4745 	/* Get chip options */
4746 	chip->options |= type->options;
4747 
4748 ident_done:
4749 	if (!mtd->name)
4750 		mtd->name = chip->parameters.model;
4751 
4752 	if (chip->options & NAND_BUSWIDTH_AUTO) {
4753 		WARN_ON(busw & NAND_BUSWIDTH_16);
4754 		nand_set_defaults(chip);
4755 	} else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4756 		/*
4757 		 * Check, if buswidth is correct. Hardware drivers should set
4758 		 * chip correct!
4759 		 */
4760 		pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4761 			maf_id, dev_id);
4762 		pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4763 			mtd->name);
4764 		pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4765 			(chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4766 		ret = -EINVAL;
4767 
4768 		goto free_detect_allocation;
4769 	}
4770 
4771 	nand_decode_bbm_options(chip);
4772 
4773 	/* Calculate the address shift from the page size */
4774 	chip->page_shift = ffs(mtd->writesize) - 1;
4775 	/* Convert chipsize to number of pages per chip -1 */
4776 	chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
4777 
4778 	chip->bbt_erase_shift = chip->phys_erase_shift =
4779 		ffs(mtd->erasesize) - 1;
4780 	if (chip->chipsize & 0xffffffff)
4781 		chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
4782 	else {
4783 		chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
4784 		chip->chip_shift += 32 - 1;
4785 	}
4786 
4787 	if (chip->chip_shift - chip->page_shift > 16)
4788 		chip->options |= NAND_ROW_ADDR_3;
4789 
4790 	chip->badblockbits = 8;
4791 
4792 	nand_legacy_adjust_cmdfunc(chip);
4793 
4794 	pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4795 		maf_id, dev_id);
4796 	pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4797 		chip->parameters.model);
4798 	pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4799 		(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4800 		mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4801 	return 0;
4802 
4803 free_detect_allocation:
4804 	kfree(chip->parameters.model);
4805 
4806 	return ret;
4807 }
4808 
4809 static const char * const nand_ecc_modes[] = {
4810 	[NAND_ECC_NONE]		= "none",
4811 	[NAND_ECC_SOFT]		= "soft",
4812 	[NAND_ECC_HW]		= "hw",
4813 	[NAND_ECC_HW_SYNDROME]	= "hw_syndrome",
4814 	[NAND_ECC_HW_OOB_FIRST]	= "hw_oob_first",
4815 	[NAND_ECC_ON_DIE]	= "on-die",
4816 };
4817 
4818 static int of_get_nand_ecc_mode(struct device_node *np)
4819 {
4820 	const char *pm;
4821 	int err, i;
4822 
4823 	err = of_property_read_string(np, "nand-ecc-mode", &pm);
4824 	if (err < 0)
4825 		return err;
4826 
4827 	for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4828 		if (!strcasecmp(pm, nand_ecc_modes[i]))
4829 			return i;
4830 
4831 	/*
4832 	 * For backward compatibility we support few obsoleted values that don't
4833 	 * have their mappings into nand_ecc_modes_t anymore (they were merged
4834 	 * with other enums).
4835 	 */
4836 	if (!strcasecmp(pm, "soft_bch"))
4837 		return NAND_ECC_SOFT;
4838 
4839 	return -ENODEV;
4840 }
4841 
4842 static const char * const nand_ecc_algos[] = {
4843 	[NAND_ECC_HAMMING]	= "hamming",
4844 	[NAND_ECC_BCH]		= "bch",
4845 	[NAND_ECC_RS]		= "rs",
4846 };
4847 
4848 static int of_get_nand_ecc_algo(struct device_node *np)
4849 {
4850 	const char *pm;
4851 	int err, i;
4852 
4853 	err = of_property_read_string(np, "nand-ecc-algo", &pm);
4854 	if (!err) {
4855 		for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4856 			if (!strcasecmp(pm, nand_ecc_algos[i]))
4857 				return i;
4858 		return -ENODEV;
4859 	}
4860 
4861 	/*
4862 	 * For backward compatibility we also read "nand-ecc-mode" checking
4863 	 * for some obsoleted values that were specifying ECC algorithm.
4864 	 */
4865 	err = of_property_read_string(np, "nand-ecc-mode", &pm);
4866 	if (err < 0)
4867 		return err;
4868 
4869 	if (!strcasecmp(pm, "soft"))
4870 		return NAND_ECC_HAMMING;
4871 	else if (!strcasecmp(pm, "soft_bch"))
4872 		return NAND_ECC_BCH;
4873 
4874 	return -ENODEV;
4875 }
4876 
4877 static int of_get_nand_ecc_step_size(struct device_node *np)
4878 {
4879 	int ret;
4880 	u32 val;
4881 
4882 	ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4883 	return ret ? ret : val;
4884 }
4885 
4886 static int of_get_nand_ecc_strength(struct device_node *np)
4887 {
4888 	int ret;
4889 	u32 val;
4890 
4891 	ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4892 	return ret ? ret : val;
4893 }
4894 
4895 static int of_get_nand_bus_width(struct device_node *np)
4896 {
4897 	u32 val;
4898 
4899 	if (of_property_read_u32(np, "nand-bus-width", &val))
4900 		return 8;
4901 
4902 	switch (val) {
4903 	case 8:
4904 	case 16:
4905 		return val;
4906 	default:
4907 		return -EIO;
4908 	}
4909 }
4910 
4911 static bool of_get_nand_on_flash_bbt(struct device_node *np)
4912 {
4913 	return of_property_read_bool(np, "nand-on-flash-bbt");
4914 }
4915 
4916 static int nand_dt_init(struct nand_chip *chip)
4917 {
4918 	struct device_node *dn = nand_get_flash_node(chip);
4919 	int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4920 
4921 	if (!dn)
4922 		return 0;
4923 
4924 	if (of_get_nand_bus_width(dn) == 16)
4925 		chip->options |= NAND_BUSWIDTH_16;
4926 
4927 	if (of_property_read_bool(dn, "nand-is-boot-medium"))
4928 		chip->options |= NAND_IS_BOOT_MEDIUM;
4929 
4930 	if (of_get_nand_on_flash_bbt(dn))
4931 		chip->bbt_options |= NAND_BBT_USE_FLASH;
4932 
4933 	ecc_mode = of_get_nand_ecc_mode(dn);
4934 	ecc_algo = of_get_nand_ecc_algo(dn);
4935 	ecc_strength = of_get_nand_ecc_strength(dn);
4936 	ecc_step = of_get_nand_ecc_step_size(dn);
4937 
4938 	if (ecc_mode >= 0)
4939 		chip->ecc.mode = ecc_mode;
4940 
4941 	if (ecc_algo >= 0)
4942 		chip->ecc.algo = ecc_algo;
4943 
4944 	if (ecc_strength >= 0)
4945 		chip->ecc.strength = ecc_strength;
4946 
4947 	if (ecc_step > 0)
4948 		chip->ecc.size = ecc_step;
4949 
4950 	if (of_property_read_bool(dn, "nand-ecc-maximize"))
4951 		chip->ecc.options |= NAND_ECC_MAXIMIZE;
4952 
4953 	return 0;
4954 }
4955 
4956 /**
4957  * nand_scan_ident - Scan for the NAND device
4958  * @chip: NAND chip object
4959  * @maxchips: number of chips to scan for
4960  * @table: alternative NAND ID table
4961  *
4962  * This is the first phase of the normal nand_scan() function. It reads the
4963  * flash ID and sets up MTD fields accordingly.
4964  *
4965  * This helper used to be called directly from controller drivers that needed
4966  * to tweak some ECC-related parameters before nand_scan_tail(). This separation
4967  * prevented dynamic allocations during this phase which was unconvenient and
4968  * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
4969  */
4970 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
4971 			   struct nand_flash_dev *table)
4972 {
4973 	struct mtd_info *mtd = nand_to_mtd(chip);
4974 	int nand_maf_id, nand_dev_id;
4975 	unsigned int i;
4976 	int ret;
4977 
4978 	/* Assume all dies are deselected when we enter nand_scan_ident(). */
4979 	chip->cur_cs = -1;
4980 
4981 	mutex_init(&chip->lock);
4982 
4983 	/* Enforce the right timings for reset/detection */
4984 	onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
4985 
4986 	ret = nand_dt_init(chip);
4987 	if (ret)
4988 		return ret;
4989 
4990 	if (!mtd->name && mtd->dev.parent)
4991 		mtd->name = dev_name(mtd->dev.parent);
4992 
4993 	/*
4994 	 * Start with chips->numchips = maxchips to let nand_select_target() do
4995 	 * its job. chip->numchips will be adjusted after.
4996 	 */
4997 	chip->numchips = maxchips;
4998 
4999 	/* Set the default functions */
5000 	nand_set_defaults(chip);
5001 
5002 	ret = nand_legacy_check_hooks(chip);
5003 	if (ret)
5004 		return ret;
5005 
5006 	/* Read the flash type */
5007 	ret = nand_detect(chip, table);
5008 	if (ret) {
5009 		if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5010 			pr_warn("No NAND device found\n");
5011 		nand_deselect_target(chip);
5012 		return ret;
5013 	}
5014 
5015 	nand_maf_id = chip->id.data[0];
5016 	nand_dev_id = chip->id.data[1];
5017 
5018 	nand_deselect_target(chip);
5019 
5020 	/* Check for a chip array */
5021 	for (i = 1; i < maxchips; i++) {
5022 		u8 id[2];
5023 
5024 		/* See comment in nand_get_flash_type for reset */
5025 		ret = nand_reset(chip, i);
5026 		if (ret)
5027 			break;
5028 
5029 		nand_select_target(chip, i);
5030 		/* Send the command for reading device ID */
5031 		ret = nand_readid_op(chip, 0, id, sizeof(id));
5032 		if (ret)
5033 			break;
5034 		/* Read manufacturer and device IDs */
5035 		if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5036 			nand_deselect_target(chip);
5037 			break;
5038 		}
5039 		nand_deselect_target(chip);
5040 	}
5041 	if (i > 1)
5042 		pr_info("%d chips detected\n", i);
5043 
5044 	/* Store the number of chips and calc total size for mtd */
5045 	chip->numchips = i;
5046 	mtd->size = i * chip->chipsize;
5047 
5048 	return 0;
5049 }
5050 
5051 static void nand_scan_ident_cleanup(struct nand_chip *chip)
5052 {
5053 	kfree(chip->parameters.model);
5054 	kfree(chip->parameters.onfi);
5055 }
5056 
5057 static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5058 {
5059 	struct mtd_info *mtd = nand_to_mtd(chip);
5060 	struct nand_ecc_ctrl *ecc = &chip->ecc;
5061 
5062 	if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
5063 		return -EINVAL;
5064 
5065 	switch (ecc->algo) {
5066 	case NAND_ECC_HAMMING:
5067 		ecc->calculate = nand_calculate_ecc;
5068 		ecc->correct = nand_correct_data;
5069 		ecc->read_page = nand_read_page_swecc;
5070 		ecc->read_subpage = nand_read_subpage;
5071 		ecc->write_page = nand_write_page_swecc;
5072 		ecc->read_page_raw = nand_read_page_raw;
5073 		ecc->write_page_raw = nand_write_page_raw;
5074 		ecc->read_oob = nand_read_oob_std;
5075 		ecc->write_oob = nand_write_oob_std;
5076 		if (!ecc->size)
5077 			ecc->size = 256;
5078 		ecc->bytes = 3;
5079 		ecc->strength = 1;
5080 
5081 		if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC))
5082 			ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5083 
5084 		return 0;
5085 	case NAND_ECC_BCH:
5086 		if (!mtd_nand_has_bch()) {
5087 			WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
5088 			return -EINVAL;
5089 		}
5090 		ecc->calculate = nand_bch_calculate_ecc;
5091 		ecc->correct = nand_bch_correct_data;
5092 		ecc->read_page = nand_read_page_swecc;
5093 		ecc->read_subpage = nand_read_subpage;
5094 		ecc->write_page = nand_write_page_swecc;
5095 		ecc->read_page_raw = nand_read_page_raw;
5096 		ecc->write_page_raw = nand_write_page_raw;
5097 		ecc->read_oob = nand_read_oob_std;
5098 		ecc->write_oob = nand_write_oob_std;
5099 
5100 		/*
5101 		* Board driver should supply ecc.size and ecc.strength
5102 		* values to select how many bits are correctable.
5103 		* Otherwise, default to 4 bits for large page devices.
5104 		*/
5105 		if (!ecc->size && (mtd->oobsize >= 64)) {
5106 			ecc->size = 512;
5107 			ecc->strength = 4;
5108 		}
5109 
5110 		/*
5111 		 * if no ecc placement scheme was provided pickup the default
5112 		 * large page one.
5113 		 */
5114 		if (!mtd->ooblayout) {
5115 			/* handle large page devices only */
5116 			if (mtd->oobsize < 64) {
5117 				WARN(1, "OOB layout is required when using software BCH on small pages\n");
5118 				return -EINVAL;
5119 			}
5120 
5121 			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
5122 
5123 		}
5124 
5125 		/*
5126 		 * We can only maximize ECC config when the default layout is
5127 		 * used, otherwise we don't know how many bytes can really be
5128 		 * used.
5129 		 */
5130 		if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
5131 		    ecc->options & NAND_ECC_MAXIMIZE) {
5132 			int steps, bytes;
5133 
5134 			/* Always prefer 1k blocks over 512bytes ones */
5135 			ecc->size = 1024;
5136 			steps = mtd->writesize / ecc->size;
5137 
5138 			/* Reserve 2 bytes for the BBM */
5139 			bytes = (mtd->oobsize - 2) / steps;
5140 			ecc->strength = bytes * 8 / fls(8 * ecc->size);
5141 		}
5142 
5143 		/* See nand_bch_init() for details. */
5144 		ecc->bytes = 0;
5145 		ecc->priv = nand_bch_init(mtd);
5146 		if (!ecc->priv) {
5147 			WARN(1, "BCH ECC initialization failed!\n");
5148 			return -EINVAL;
5149 		}
5150 		return 0;
5151 	default:
5152 		WARN(1, "Unsupported ECC algorithm!\n");
5153 		return -EINVAL;
5154 	}
5155 }
5156 
5157 /**
5158  * nand_check_ecc_caps - check the sanity of preset ECC settings
5159  * @chip: nand chip info structure
5160  * @caps: ECC caps info structure
5161  * @oobavail: OOB size that the ECC engine can use
5162  *
5163  * When ECC step size and strength are already set, check if they are supported
5164  * by the controller and the calculated ECC bytes fit within the chip's OOB.
5165  * On success, the calculated ECC bytes is set.
5166  */
5167 static int
5168 nand_check_ecc_caps(struct nand_chip *chip,
5169 		    const struct nand_ecc_caps *caps, int oobavail)
5170 {
5171 	struct mtd_info *mtd = nand_to_mtd(chip);
5172 	const struct nand_ecc_step_info *stepinfo;
5173 	int preset_step = chip->ecc.size;
5174 	int preset_strength = chip->ecc.strength;
5175 	int ecc_bytes, nsteps = mtd->writesize / preset_step;
5176 	int i, j;
5177 
5178 	for (i = 0; i < caps->nstepinfos; i++) {
5179 		stepinfo = &caps->stepinfos[i];
5180 
5181 		if (stepinfo->stepsize != preset_step)
5182 			continue;
5183 
5184 		for (j = 0; j < stepinfo->nstrengths; j++) {
5185 			if (stepinfo->strengths[j] != preset_strength)
5186 				continue;
5187 
5188 			ecc_bytes = caps->calc_ecc_bytes(preset_step,
5189 							 preset_strength);
5190 			if (WARN_ON_ONCE(ecc_bytes < 0))
5191 				return ecc_bytes;
5192 
5193 			if (ecc_bytes * nsteps > oobavail) {
5194 				pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5195 				       preset_step, preset_strength);
5196 				return -ENOSPC;
5197 			}
5198 
5199 			chip->ecc.bytes = ecc_bytes;
5200 
5201 			return 0;
5202 		}
5203 	}
5204 
5205 	pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5206 	       preset_step, preset_strength);
5207 
5208 	return -ENOTSUPP;
5209 }
5210 
5211 /**
5212  * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
5213  * @chip: nand chip info structure
5214  * @caps: ECC engine caps info structure
5215  * @oobavail: OOB size that the ECC engine can use
5216  *
5217  * If a chip's ECC requirement is provided, try to meet it with the least
5218  * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
5219  * On success, the chosen ECC settings are set.
5220  */
5221 static int
5222 nand_match_ecc_req(struct nand_chip *chip,
5223 		   const struct nand_ecc_caps *caps, int oobavail)
5224 {
5225 	struct mtd_info *mtd = nand_to_mtd(chip);
5226 	const struct nand_ecc_step_info *stepinfo;
5227 	int req_step = chip->ecc_step_ds;
5228 	int req_strength = chip->ecc_strength_ds;
5229 	int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5230 	int best_step, best_strength, best_ecc_bytes;
5231 	int best_ecc_bytes_total = INT_MAX;
5232 	int i, j;
5233 
5234 	/* No information provided by the NAND chip */
5235 	if (!req_step || !req_strength)
5236 		return -ENOTSUPP;
5237 
5238 	/* number of correctable bits the chip requires in a page */
5239 	req_corr = mtd->writesize / req_step * req_strength;
5240 
5241 	for (i = 0; i < caps->nstepinfos; i++) {
5242 		stepinfo = &caps->stepinfos[i];
5243 		step_size = stepinfo->stepsize;
5244 
5245 		for (j = 0; j < stepinfo->nstrengths; j++) {
5246 			strength = stepinfo->strengths[j];
5247 
5248 			/*
5249 			 * If both step size and strength are smaller than the
5250 			 * chip's requirement, it is not easy to compare the
5251 			 * resulted reliability.
5252 			 */
5253 			if (step_size < req_step && strength < req_strength)
5254 				continue;
5255 
5256 			if (mtd->writesize % step_size)
5257 				continue;
5258 
5259 			nsteps = mtd->writesize / step_size;
5260 
5261 			ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5262 			if (WARN_ON_ONCE(ecc_bytes < 0))
5263 				continue;
5264 			ecc_bytes_total = ecc_bytes * nsteps;
5265 
5266 			if (ecc_bytes_total > oobavail ||
5267 			    strength * nsteps < req_corr)
5268 				continue;
5269 
5270 			/*
5271 			 * We assume the best is to meet the chip's requrement
5272 			 * with the least number of ECC bytes.
5273 			 */
5274 			if (ecc_bytes_total < best_ecc_bytes_total) {
5275 				best_ecc_bytes_total = ecc_bytes_total;
5276 				best_step = step_size;
5277 				best_strength = strength;
5278 				best_ecc_bytes = ecc_bytes;
5279 			}
5280 		}
5281 	}
5282 
5283 	if (best_ecc_bytes_total == INT_MAX)
5284 		return -ENOTSUPP;
5285 
5286 	chip->ecc.size = best_step;
5287 	chip->ecc.strength = best_strength;
5288 	chip->ecc.bytes = best_ecc_bytes;
5289 
5290 	return 0;
5291 }
5292 
5293 /**
5294  * nand_maximize_ecc - choose the max ECC strength available
5295  * @chip: nand chip info structure
5296  * @caps: ECC engine caps info structure
5297  * @oobavail: OOB size that the ECC engine can use
5298  *
5299  * Choose the max ECC strength that is supported on the controller, and can fit
5300  * within the chip's OOB.  On success, the chosen ECC settings are set.
5301  */
5302 static int
5303 nand_maximize_ecc(struct nand_chip *chip,
5304 		  const struct nand_ecc_caps *caps, int oobavail)
5305 {
5306 	struct mtd_info *mtd = nand_to_mtd(chip);
5307 	const struct nand_ecc_step_info *stepinfo;
5308 	int step_size, strength, nsteps, ecc_bytes, corr;
5309 	int best_corr = 0;
5310 	int best_step = 0;
5311 	int best_strength, best_ecc_bytes;
5312 	int i, j;
5313 
5314 	for (i = 0; i < caps->nstepinfos; i++) {
5315 		stepinfo = &caps->stepinfos[i];
5316 		step_size = stepinfo->stepsize;
5317 
5318 		/* If chip->ecc.size is already set, respect it */
5319 		if (chip->ecc.size && step_size != chip->ecc.size)
5320 			continue;
5321 
5322 		for (j = 0; j < stepinfo->nstrengths; j++) {
5323 			strength = stepinfo->strengths[j];
5324 
5325 			if (mtd->writesize % step_size)
5326 				continue;
5327 
5328 			nsteps = mtd->writesize / step_size;
5329 
5330 			ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5331 			if (WARN_ON_ONCE(ecc_bytes < 0))
5332 				continue;
5333 
5334 			if (ecc_bytes * nsteps > oobavail)
5335 				continue;
5336 
5337 			corr = strength * nsteps;
5338 
5339 			/*
5340 			 * If the number of correctable bits is the same,
5341 			 * bigger step_size has more reliability.
5342 			 */
5343 			if (corr > best_corr ||
5344 			    (corr == best_corr && step_size > best_step)) {
5345 				best_corr = corr;
5346 				best_step = step_size;
5347 				best_strength = strength;
5348 				best_ecc_bytes = ecc_bytes;
5349 			}
5350 		}
5351 	}
5352 
5353 	if (!best_corr)
5354 		return -ENOTSUPP;
5355 
5356 	chip->ecc.size = best_step;
5357 	chip->ecc.strength = best_strength;
5358 	chip->ecc.bytes = best_ecc_bytes;
5359 
5360 	return 0;
5361 }
5362 
5363 /**
5364  * nand_ecc_choose_conf - Set the ECC strength and ECC step size
5365  * @chip: nand chip info structure
5366  * @caps: ECC engine caps info structure
5367  * @oobavail: OOB size that the ECC engine can use
5368  *
5369  * Choose the ECC configuration according to following logic
5370  *
5371  * 1. If both ECC step size and ECC strength are already set (usually by DT)
5372  *    then check if it is supported by this controller.
5373  * 2. If NAND_ECC_MAXIMIZE is set, then select maximum ECC strength.
5374  * 3. Otherwise, try to match the ECC step size and ECC strength closest
5375  *    to the chip's requirement. If available OOB size can't fit the chip
5376  *    requirement then fallback to the maximum ECC step size and ECC strength.
5377  *
5378  * On success, the chosen ECC settings are set.
5379  */
5380 int nand_ecc_choose_conf(struct nand_chip *chip,
5381 			 const struct nand_ecc_caps *caps, int oobavail)
5382 {
5383 	struct mtd_info *mtd = nand_to_mtd(chip);
5384 
5385 	if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5386 		return -EINVAL;
5387 
5388 	if (chip->ecc.size && chip->ecc.strength)
5389 		return nand_check_ecc_caps(chip, caps, oobavail);
5390 
5391 	if (chip->ecc.options & NAND_ECC_MAXIMIZE)
5392 		return nand_maximize_ecc(chip, caps, oobavail);
5393 
5394 	if (!nand_match_ecc_req(chip, caps, oobavail))
5395 		return 0;
5396 
5397 	return nand_maximize_ecc(chip, caps, oobavail);
5398 }
5399 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
5400 
5401 /*
5402  * Check if the chip configuration meet the datasheet requirements.
5403 
5404  * If our configuration corrects A bits per B bytes and the minimum
5405  * required correction level is X bits per Y bytes, then we must ensure
5406  * both of the following are true:
5407  *
5408  * (1) A / B >= X / Y
5409  * (2) A >= X
5410  *
5411  * Requirement (1) ensures we can correct for the required bitflip density.
5412  * Requirement (2) ensures we can correct even when all bitflips are clumped
5413  * in the same sector.
5414  */
5415 static bool nand_ecc_strength_good(struct nand_chip *chip)
5416 {
5417 	struct mtd_info *mtd = nand_to_mtd(chip);
5418 	struct nand_ecc_ctrl *ecc = &chip->ecc;
5419 	int corr, ds_corr;
5420 
5421 	if (ecc->size == 0 || chip->ecc_step_ds == 0)
5422 		/* Not enough information */
5423 		return true;
5424 
5425 	/*
5426 	 * We get the number of corrected bits per page to compare
5427 	 * the correction density.
5428 	 */
5429 	corr = (mtd->writesize * ecc->strength) / ecc->size;
5430 	ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
5431 
5432 	return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
5433 }
5434 
5435 /**
5436  * nand_scan_tail - Scan for the NAND device
5437  * @chip: NAND chip object
5438  *
5439  * This is the second phase of the normal nand_scan() function. It fills out
5440  * all the uninitialized function pointers with the defaults and scans for a
5441  * bad block table if appropriate.
5442  */
5443 static int nand_scan_tail(struct nand_chip *chip)
5444 {
5445 	struct mtd_info *mtd = nand_to_mtd(chip);
5446 	struct nand_ecc_ctrl *ecc = &chip->ecc;
5447 	int ret, i;
5448 
5449 	/* New bad blocks should be marked in OOB, flash-based BBT, or both */
5450 	if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
5451 		   !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
5452 		return -EINVAL;
5453 	}
5454 
5455 	chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
5456 	if (!chip->data_buf)
5457 		return -ENOMEM;
5458 
5459 	/*
5460 	 * FIXME: some NAND manufacturer drivers expect the first die to be
5461 	 * selected when manufacturer->init() is called. They should be fixed
5462 	 * to explictly select the relevant die when interacting with the NAND
5463 	 * chip.
5464 	 */
5465 	nand_select_target(chip, 0);
5466 	ret = nand_manufacturer_init(chip);
5467 	nand_deselect_target(chip);
5468 	if (ret)
5469 		goto err_free_buf;
5470 
5471 	/* Set the internal oob buffer location, just after the page data */
5472 	chip->oob_poi = chip->data_buf + mtd->writesize;
5473 
5474 	/*
5475 	 * If no default placement scheme is given, select an appropriate one.
5476 	 */
5477 	if (!mtd->ooblayout &&
5478 	    !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
5479 		switch (mtd->oobsize) {
5480 		case 8:
5481 		case 16:
5482 			mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
5483 			break;
5484 		case 64:
5485 		case 128:
5486 			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
5487 			break;
5488 		default:
5489 			/*
5490 			 * Expose the whole OOB area to users if ECC_NONE
5491 			 * is passed. We could do that for all kind of
5492 			 * ->oobsize, but we must keep the old large/small
5493 			 * page with ECC layout when ->oobsize <= 128 for
5494 			 * compatibility reasons.
5495 			 */
5496 			if (ecc->mode == NAND_ECC_NONE) {
5497 				mtd_set_ooblayout(mtd,
5498 						&nand_ooblayout_lp_ops);
5499 				break;
5500 			}
5501 
5502 			WARN(1, "No oob scheme defined for oobsize %d\n",
5503 				mtd->oobsize);
5504 			ret = -EINVAL;
5505 			goto err_nand_manuf_cleanup;
5506 		}
5507 	}
5508 
5509 	/*
5510 	 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
5511 	 * selected and we have 256 byte pagesize fallback to software ECC
5512 	 */
5513 
5514 	switch (ecc->mode) {
5515 	case NAND_ECC_HW_OOB_FIRST:
5516 		/* Similar to NAND_ECC_HW, but a separate read_page handle */
5517 		if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
5518 			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5519 			ret = -EINVAL;
5520 			goto err_nand_manuf_cleanup;
5521 		}
5522 		if (!ecc->read_page)
5523 			ecc->read_page = nand_read_page_hwecc_oob_first;
5524 		/* fall through */
5525 
5526 	case NAND_ECC_HW:
5527 		/* Use standard hwecc read page function? */
5528 		if (!ecc->read_page)
5529 			ecc->read_page = nand_read_page_hwecc;
5530 		if (!ecc->write_page)
5531 			ecc->write_page = nand_write_page_hwecc;
5532 		if (!ecc->read_page_raw)
5533 			ecc->read_page_raw = nand_read_page_raw;
5534 		if (!ecc->write_page_raw)
5535 			ecc->write_page_raw = nand_write_page_raw;
5536 		if (!ecc->read_oob)
5537 			ecc->read_oob = nand_read_oob_std;
5538 		if (!ecc->write_oob)
5539 			ecc->write_oob = nand_write_oob_std;
5540 		if (!ecc->read_subpage)
5541 			ecc->read_subpage = nand_read_subpage;
5542 		if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5543 			ecc->write_subpage = nand_write_subpage_hwecc;
5544 		/* fall through */
5545 
5546 	case NAND_ECC_HW_SYNDROME:
5547 		if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5548 		    (!ecc->read_page ||
5549 		     ecc->read_page == nand_read_page_hwecc ||
5550 		     !ecc->write_page ||
5551 		     ecc->write_page == nand_write_page_hwecc)) {
5552 			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5553 			ret = -EINVAL;
5554 			goto err_nand_manuf_cleanup;
5555 		}
5556 		/* Use standard syndrome read/write page function? */
5557 		if (!ecc->read_page)
5558 			ecc->read_page = nand_read_page_syndrome;
5559 		if (!ecc->write_page)
5560 			ecc->write_page = nand_write_page_syndrome;
5561 		if (!ecc->read_page_raw)
5562 			ecc->read_page_raw = nand_read_page_raw_syndrome;
5563 		if (!ecc->write_page_raw)
5564 			ecc->write_page_raw = nand_write_page_raw_syndrome;
5565 		if (!ecc->read_oob)
5566 			ecc->read_oob = nand_read_oob_syndrome;
5567 		if (!ecc->write_oob)
5568 			ecc->write_oob = nand_write_oob_syndrome;
5569 
5570 		if (mtd->writesize >= ecc->size) {
5571 			if (!ecc->strength) {
5572 				WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
5573 				ret = -EINVAL;
5574 				goto err_nand_manuf_cleanup;
5575 			}
5576 			break;
5577 		}
5578 		pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
5579 			ecc->size, mtd->writesize);
5580 		ecc->mode = NAND_ECC_SOFT;
5581 		ecc->algo = NAND_ECC_HAMMING;
5582 		/* fall through */
5583 
5584 	case NAND_ECC_SOFT:
5585 		ret = nand_set_ecc_soft_ops(chip);
5586 		if (ret) {
5587 			ret = -EINVAL;
5588 			goto err_nand_manuf_cleanup;
5589 		}
5590 		break;
5591 
5592 	case NAND_ECC_ON_DIE:
5593 		if (!ecc->read_page || !ecc->write_page) {
5594 			WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
5595 			ret = -EINVAL;
5596 			goto err_nand_manuf_cleanup;
5597 		}
5598 		if (!ecc->read_oob)
5599 			ecc->read_oob = nand_read_oob_std;
5600 		if (!ecc->write_oob)
5601 			ecc->write_oob = nand_write_oob_std;
5602 		break;
5603 
5604 	case NAND_ECC_NONE:
5605 		pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
5606 		ecc->read_page = nand_read_page_raw;
5607 		ecc->write_page = nand_write_page_raw;
5608 		ecc->read_oob = nand_read_oob_std;
5609 		ecc->read_page_raw = nand_read_page_raw;
5610 		ecc->write_page_raw = nand_write_page_raw;
5611 		ecc->write_oob = nand_write_oob_std;
5612 		ecc->size = mtd->writesize;
5613 		ecc->bytes = 0;
5614 		ecc->strength = 0;
5615 		break;
5616 
5617 	default:
5618 		WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
5619 		ret = -EINVAL;
5620 		goto err_nand_manuf_cleanup;
5621 	}
5622 
5623 	if (ecc->correct || ecc->calculate) {
5624 		ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5625 		ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5626 		if (!ecc->calc_buf || !ecc->code_buf) {
5627 			ret = -ENOMEM;
5628 			goto err_nand_manuf_cleanup;
5629 		}
5630 	}
5631 
5632 	/* For many systems, the standard OOB write also works for raw */
5633 	if (!ecc->read_oob_raw)
5634 		ecc->read_oob_raw = ecc->read_oob;
5635 	if (!ecc->write_oob_raw)
5636 		ecc->write_oob_raw = ecc->write_oob;
5637 
5638 	/* propagate ecc info to mtd_info */
5639 	mtd->ecc_strength = ecc->strength;
5640 	mtd->ecc_step_size = ecc->size;
5641 
5642 	/*
5643 	 * Set the number of read / write steps for one page depending on ECC
5644 	 * mode.
5645 	 */
5646 	ecc->steps = mtd->writesize / ecc->size;
5647 	if (ecc->steps * ecc->size != mtd->writesize) {
5648 		WARN(1, "Invalid ECC parameters\n");
5649 		ret = -EINVAL;
5650 		goto err_nand_manuf_cleanup;
5651 	}
5652 	ecc->total = ecc->steps * ecc->bytes;
5653 	if (ecc->total > mtd->oobsize) {
5654 		WARN(1, "Total number of ECC bytes exceeded oobsize\n");
5655 		ret = -EINVAL;
5656 		goto err_nand_manuf_cleanup;
5657 	}
5658 
5659 	/*
5660 	 * The number of bytes available for a client to place data into
5661 	 * the out of band area.
5662 	 */
5663 	ret = mtd_ooblayout_count_freebytes(mtd);
5664 	if (ret < 0)
5665 		ret = 0;
5666 
5667 	mtd->oobavail = ret;
5668 
5669 	/* ECC sanity check: warn if it's too weak */
5670 	if (!nand_ecc_strength_good(chip))
5671 		pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
5672 			mtd->name);
5673 
5674 	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
5675 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
5676 		switch (ecc->steps) {
5677 		case 2:
5678 			mtd->subpage_sft = 1;
5679 			break;
5680 		case 4:
5681 		case 8:
5682 		case 16:
5683 			mtd->subpage_sft = 2;
5684 			break;
5685 		}
5686 	}
5687 	chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
5688 
5689 	/* Invalidate the pagebuffer reference */
5690 	chip->pagebuf = -1;
5691 
5692 	/* Large page NAND with SOFT_ECC should support subpage reads */
5693 	switch (ecc->mode) {
5694 	case NAND_ECC_SOFT:
5695 		if (chip->page_shift > 9)
5696 			chip->options |= NAND_SUBPAGE_READ;
5697 		break;
5698 
5699 	default:
5700 		break;
5701 	}
5702 
5703 	/* Fill in remaining MTD driver data */
5704 	mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
5705 	mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
5706 						MTD_CAP_NANDFLASH;
5707 	mtd->_erase = nand_erase;
5708 	mtd->_point = NULL;
5709 	mtd->_unpoint = NULL;
5710 	mtd->_panic_write = panic_nand_write;
5711 	mtd->_read_oob = nand_read_oob;
5712 	mtd->_write_oob = nand_write_oob;
5713 	mtd->_sync = nand_sync;
5714 	mtd->_lock = NULL;
5715 	mtd->_unlock = NULL;
5716 	mtd->_suspend = nand_suspend;
5717 	mtd->_resume = nand_resume;
5718 	mtd->_reboot = nand_shutdown;
5719 	mtd->_block_isreserved = nand_block_isreserved;
5720 	mtd->_block_isbad = nand_block_isbad;
5721 	mtd->_block_markbad = nand_block_markbad;
5722 	mtd->_max_bad_blocks = nand_max_bad_blocks;
5723 	mtd->writebufsize = mtd->writesize;
5724 
5725 	/*
5726 	 * Initialize bitflip_threshold to its default prior scan_bbt() call.
5727 	 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
5728 	 * properly set.
5729 	 */
5730 	if (!mtd->bitflip_threshold)
5731 		mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
5732 
5733 	/* Initialize the ->data_interface field. */
5734 	ret = nand_init_data_interface(chip);
5735 	if (ret)
5736 		goto err_nand_manuf_cleanup;
5737 
5738 	/* Enter fastest possible mode on all dies. */
5739 	for (i = 0; i < chip->numchips; i++) {
5740 		ret = nand_setup_data_interface(chip, i);
5741 		if (ret)
5742 			goto err_nand_manuf_cleanup;
5743 	}
5744 
5745 	/* Check, if we should skip the bad block table scan */
5746 	if (chip->options & NAND_SKIP_BBTSCAN)
5747 		return 0;
5748 
5749 	/* Build bad block table */
5750 	ret = nand_create_bbt(chip);
5751 	if (ret)
5752 		goto err_nand_manuf_cleanup;
5753 
5754 	return 0;
5755 
5756 
5757 err_nand_manuf_cleanup:
5758 	nand_manufacturer_cleanup(chip);
5759 
5760 err_free_buf:
5761 	kfree(chip->data_buf);
5762 	kfree(ecc->code_buf);
5763 	kfree(ecc->calc_buf);
5764 
5765 	return ret;
5766 }
5767 
5768 static int nand_attach(struct nand_chip *chip)
5769 {
5770 	if (chip->controller->ops && chip->controller->ops->attach_chip)
5771 		return chip->controller->ops->attach_chip(chip);
5772 
5773 	return 0;
5774 }
5775 
5776 static void nand_detach(struct nand_chip *chip)
5777 {
5778 	if (chip->controller->ops && chip->controller->ops->detach_chip)
5779 		chip->controller->ops->detach_chip(chip);
5780 }
5781 
5782 /**
5783  * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
5784  * @chip: NAND chip object
5785  * @maxchips: number of chips to scan for.
5786  * @ids: optional flash IDs table
5787  *
5788  * This fills out all the uninitialized function pointers with the defaults.
5789  * The flash ID is read and the mtd/chip structures are filled with the
5790  * appropriate values.
5791  */
5792 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
5793 		       struct nand_flash_dev *ids)
5794 {
5795 	int ret;
5796 
5797 	if (!maxchips)
5798 		return -EINVAL;
5799 
5800 	ret = nand_scan_ident(chip, maxchips, ids);
5801 	if (ret)
5802 		return ret;
5803 
5804 	ret = nand_attach(chip);
5805 	if (ret)
5806 		goto cleanup_ident;
5807 
5808 	ret = nand_scan_tail(chip);
5809 	if (ret)
5810 		goto detach_chip;
5811 
5812 	return 0;
5813 
5814 detach_chip:
5815 	nand_detach(chip);
5816 cleanup_ident:
5817 	nand_scan_ident_cleanup(chip);
5818 
5819 	return ret;
5820 }
5821 EXPORT_SYMBOL(nand_scan_with_ids);
5822 
5823 /**
5824  * nand_cleanup - [NAND Interface] Free resources held by the NAND device
5825  * @chip: NAND chip object
5826  */
5827 void nand_cleanup(struct nand_chip *chip)
5828 {
5829 	if (chip->ecc.mode == NAND_ECC_SOFT &&
5830 	    chip->ecc.algo == NAND_ECC_BCH)
5831 		nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
5832 
5833 	/* Free bad block table memory */
5834 	kfree(chip->bbt);
5835 	kfree(chip->data_buf);
5836 	kfree(chip->ecc.code_buf);
5837 	kfree(chip->ecc.calc_buf);
5838 
5839 	/* Free bad block descriptor memory */
5840 	if (chip->badblock_pattern && chip->badblock_pattern->options
5841 			& NAND_BBT_DYNAMICSTRUCT)
5842 		kfree(chip->badblock_pattern);
5843 
5844 	/* Free manufacturer priv data. */
5845 	nand_manufacturer_cleanup(chip);
5846 
5847 	/* Free controller specific allocations after chip identification */
5848 	nand_detach(chip);
5849 
5850 	/* Free identification phase allocations */
5851 	nand_scan_ident_cleanup(chip);
5852 }
5853 
5854 EXPORT_SYMBOL_GPL(nand_cleanup);
5855 
5856 /**
5857  * nand_release - [NAND Interface] Unregister the MTD device and free resources
5858  *		  held by the NAND device
5859  * @chip: NAND chip object
5860  */
5861 void nand_release(struct nand_chip *chip)
5862 {
5863 	mtd_device_unregister(nand_to_mtd(chip));
5864 	nand_cleanup(chip);
5865 }
5866 EXPORT_SYMBOL_GPL(nand_release);
5867 
5868 MODULE_LICENSE("GPL");
5869 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
5870 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
5871 MODULE_DESCRIPTION("Generic NAND flash driver code");
5872