xref: /openbmc/linux/drivers/mtd/nand/raw/nand_base.c (revision e149ca29)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Overview:
4  *   This is the generic MTD driver for NAND flash devices. It should be
5  *   capable of working with almost all NAND chips currently available.
6  *
7  *	Additional technical information is available on
8  *	http://www.linux-mtd.infradead.org/doc/nand.html
9  *
10  *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
11  *		  2002-2006 Thomas Gleixner (tglx@linutronix.de)
12  *
13  *  Credits:
14  *	David Woodhouse for adding multichip support
15  *
16  *	Aleph One Ltd. and Toby Churchill Ltd. for supporting the
17  *	rework for 2K page size chips
18  *
19  *  TODO:
20  *	Enable cached programming for 2k page size chips
21  *	Check, if mtd->ecctype should be set to MTD_ECC_HW
22  *	if we have HW ECC support.
23  *	BBT table is not serialized, has to be fixed
24  */
25 
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 
28 #include <linux/module.h>
29 #include <linux/delay.h>
30 #include <linux/errno.h>
31 #include <linux/err.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/mm.h>
35 #include <linux/types.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/nand_ecc.h>
38 #include <linux/mtd/nand_bch.h>
39 #include <linux/interrupt.h>
40 #include <linux/bitops.h>
41 #include <linux/io.h>
42 #include <linux/mtd/partitions.h>
43 #include <linux/of.h>
44 #include <linux/gpio/consumer.h>
45 
46 #include "internals.h"
47 
48 /* Define default oob placement schemes for large and small page devices */
49 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
50 				 struct mtd_oob_region *oobregion)
51 {
52 	struct nand_chip *chip = mtd_to_nand(mtd);
53 	struct nand_ecc_ctrl *ecc = &chip->ecc;
54 
55 	if (section > 1)
56 		return -ERANGE;
57 
58 	if (!section) {
59 		oobregion->offset = 0;
60 		if (mtd->oobsize == 16)
61 			oobregion->length = 4;
62 		else
63 			oobregion->length = 3;
64 	} else {
65 		if (mtd->oobsize == 8)
66 			return -ERANGE;
67 
68 		oobregion->offset = 6;
69 		oobregion->length = ecc->total - 4;
70 	}
71 
72 	return 0;
73 }
74 
75 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
76 				  struct mtd_oob_region *oobregion)
77 {
78 	if (section > 1)
79 		return -ERANGE;
80 
81 	if (mtd->oobsize == 16) {
82 		if (section)
83 			return -ERANGE;
84 
85 		oobregion->length = 8;
86 		oobregion->offset = 8;
87 	} else {
88 		oobregion->length = 2;
89 		if (!section)
90 			oobregion->offset = 3;
91 		else
92 			oobregion->offset = 6;
93 	}
94 
95 	return 0;
96 }
97 
98 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
99 	.ecc = nand_ooblayout_ecc_sp,
100 	.free = nand_ooblayout_free_sp,
101 };
102 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
103 
104 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
105 				 struct mtd_oob_region *oobregion)
106 {
107 	struct nand_chip *chip = mtd_to_nand(mtd);
108 	struct nand_ecc_ctrl *ecc = &chip->ecc;
109 
110 	if (section || !ecc->total)
111 		return -ERANGE;
112 
113 	oobregion->length = ecc->total;
114 	oobregion->offset = mtd->oobsize - oobregion->length;
115 
116 	return 0;
117 }
118 
119 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
120 				  struct mtd_oob_region *oobregion)
121 {
122 	struct nand_chip *chip = mtd_to_nand(mtd);
123 	struct nand_ecc_ctrl *ecc = &chip->ecc;
124 
125 	if (section)
126 		return -ERANGE;
127 
128 	oobregion->length = mtd->oobsize - ecc->total - 2;
129 	oobregion->offset = 2;
130 
131 	return 0;
132 }
133 
134 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
135 	.ecc = nand_ooblayout_ecc_lp,
136 	.free = nand_ooblayout_free_lp,
137 };
138 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
139 
140 /*
141  * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
142  * are placed at a fixed offset.
143  */
144 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
145 					 struct mtd_oob_region *oobregion)
146 {
147 	struct nand_chip *chip = mtd_to_nand(mtd);
148 	struct nand_ecc_ctrl *ecc = &chip->ecc;
149 
150 	if (section)
151 		return -ERANGE;
152 
153 	switch (mtd->oobsize) {
154 	case 64:
155 		oobregion->offset = 40;
156 		break;
157 	case 128:
158 		oobregion->offset = 80;
159 		break;
160 	default:
161 		return -EINVAL;
162 	}
163 
164 	oobregion->length = ecc->total;
165 	if (oobregion->offset + oobregion->length > mtd->oobsize)
166 		return -ERANGE;
167 
168 	return 0;
169 }
170 
171 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
172 					  struct mtd_oob_region *oobregion)
173 {
174 	struct nand_chip *chip = mtd_to_nand(mtd);
175 	struct nand_ecc_ctrl *ecc = &chip->ecc;
176 	int ecc_offset = 0;
177 
178 	if (section < 0 || section > 1)
179 		return -ERANGE;
180 
181 	switch (mtd->oobsize) {
182 	case 64:
183 		ecc_offset = 40;
184 		break;
185 	case 128:
186 		ecc_offset = 80;
187 		break;
188 	default:
189 		return -EINVAL;
190 	}
191 
192 	if (section == 0) {
193 		oobregion->offset = 2;
194 		oobregion->length = ecc_offset - 2;
195 	} else {
196 		oobregion->offset = ecc_offset + ecc->total;
197 		oobregion->length = mtd->oobsize - oobregion->offset;
198 	}
199 
200 	return 0;
201 }
202 
203 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
204 	.ecc = nand_ooblayout_ecc_lp_hamming,
205 	.free = nand_ooblayout_free_lp_hamming,
206 };
207 
208 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
209 {
210 	int ret = 0;
211 
212 	/* Start address must align on block boundary */
213 	if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
214 		pr_debug("%s: unaligned address\n", __func__);
215 		ret = -EINVAL;
216 	}
217 
218 	/* Length must align on block boundary */
219 	if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
220 		pr_debug("%s: length not block aligned\n", __func__);
221 		ret = -EINVAL;
222 	}
223 
224 	return ret;
225 }
226 
227 /**
228  * nand_select_target() - Select a NAND target (A.K.A. die)
229  * @chip: NAND chip object
230  * @cs: the CS line to select. Note that this CS id is always from the chip
231  *	PoV, not the controller one
232  *
233  * Select a NAND target so that further operations executed on @chip go to the
234  * selected NAND target.
235  */
236 void nand_select_target(struct nand_chip *chip, unsigned int cs)
237 {
238 	/*
239 	 * cs should always lie between 0 and nanddev_ntargets(), when that's
240 	 * not the case it's a bug and the caller should be fixed.
241 	 */
242 	if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
243 		return;
244 
245 	chip->cur_cs = cs;
246 
247 	if (chip->legacy.select_chip)
248 		chip->legacy.select_chip(chip, cs);
249 }
250 EXPORT_SYMBOL_GPL(nand_select_target);
251 
252 /**
253  * nand_deselect_target() - Deselect the currently selected target
254  * @chip: NAND chip object
255  *
256  * Deselect the currently selected NAND target. The result of operations
257  * executed on @chip after the target has been deselected is undefined.
258  */
259 void nand_deselect_target(struct nand_chip *chip)
260 {
261 	if (chip->legacy.select_chip)
262 		chip->legacy.select_chip(chip, -1);
263 
264 	chip->cur_cs = -1;
265 }
266 EXPORT_SYMBOL_GPL(nand_deselect_target);
267 
268 /**
269  * nand_release_device - [GENERIC] release chip
270  * @chip: NAND chip object
271  *
272  * Release chip lock and wake up anyone waiting on the device.
273  */
274 static void nand_release_device(struct nand_chip *chip)
275 {
276 	/* Release the controller and the chip */
277 	mutex_unlock(&chip->controller->lock);
278 	mutex_unlock(&chip->lock);
279 }
280 
281 /**
282  * nand_bbm_get_next_page - Get the next page for bad block markers
283  * @chip: NAND chip object
284  * @page: First page to start checking for bad block marker usage
285  *
286  * Returns an integer that corresponds to the page offset within a block, for
287  * a page that is used to store bad block markers. If no more pages are
288  * available, -EINVAL is returned.
289  */
290 int nand_bbm_get_next_page(struct nand_chip *chip, int page)
291 {
292 	struct mtd_info *mtd = nand_to_mtd(chip);
293 	int last_page = ((mtd->erasesize - mtd->writesize) >>
294 			 chip->page_shift) & chip->pagemask;
295 	unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
296 		| NAND_BBM_LASTPAGE;
297 
298 	if (page == 0 && !(chip->options & bbm_flags))
299 		return 0;
300 	if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
301 		return 0;
302 	if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
303 		return 1;
304 	if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
305 		return last_page;
306 
307 	return -EINVAL;
308 }
309 
310 /**
311  * nand_block_bad - [DEFAULT] Read bad block marker from the chip
312  * @chip: NAND chip object
313  * @ofs: offset from device start
314  *
315  * Check, if the block is bad.
316  */
317 static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
318 {
319 	int first_page, page_offset;
320 	int res;
321 	u8 bad;
322 
323 	first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
324 	page_offset = nand_bbm_get_next_page(chip, 0);
325 
326 	while (page_offset >= 0) {
327 		res = chip->ecc.read_oob(chip, first_page + page_offset);
328 		if (res < 0)
329 			return res;
330 
331 		bad = chip->oob_poi[chip->badblockpos];
332 
333 		if (likely(chip->badblockbits == 8))
334 			res = bad != 0xFF;
335 		else
336 			res = hweight8(bad) < chip->badblockbits;
337 		if (res)
338 			return res;
339 
340 		page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
341 	}
342 
343 	return 0;
344 }
345 
346 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
347 {
348 	if (chip->legacy.block_bad)
349 		return chip->legacy.block_bad(chip, ofs);
350 
351 	return nand_block_bad(chip, ofs);
352 }
353 
354 /**
355  * nand_get_device - [GENERIC] Get chip for selected access
356  * @chip: NAND chip structure
357  *
358  * Lock the device and its controller for exclusive access
359  *
360  * Return: -EBUSY if the chip has been suspended, 0 otherwise
361  */
362 static int nand_get_device(struct nand_chip *chip)
363 {
364 	mutex_lock(&chip->lock);
365 	if (chip->suspended) {
366 		mutex_unlock(&chip->lock);
367 		return -EBUSY;
368 	}
369 	mutex_lock(&chip->controller->lock);
370 
371 	return 0;
372 }
373 
374 /**
375  * nand_check_wp - [GENERIC] check if the chip is write protected
376  * @chip: NAND chip object
377  *
378  * Check, if the device is write protected. The function expects, that the
379  * device is already selected.
380  */
381 static int nand_check_wp(struct nand_chip *chip)
382 {
383 	u8 status;
384 	int ret;
385 
386 	/* Broken xD cards report WP despite being writable */
387 	if (chip->options & NAND_BROKEN_XD)
388 		return 0;
389 
390 	/* Check the WP bit */
391 	ret = nand_status_op(chip, &status);
392 	if (ret)
393 		return ret;
394 
395 	return status & NAND_STATUS_WP ? 0 : 1;
396 }
397 
398 /**
399  * nand_fill_oob - [INTERN] Transfer client buffer to oob
400  * @chip: NAND chip object
401  * @oob: oob data buffer
402  * @len: oob data write length
403  * @ops: oob ops structure
404  */
405 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
406 			      struct mtd_oob_ops *ops)
407 {
408 	struct mtd_info *mtd = nand_to_mtd(chip);
409 	int ret;
410 
411 	/*
412 	 * Initialise to all 0xFF, to avoid the possibility of left over OOB
413 	 * data from a previous OOB read.
414 	 */
415 	memset(chip->oob_poi, 0xff, mtd->oobsize);
416 
417 	switch (ops->mode) {
418 
419 	case MTD_OPS_PLACE_OOB:
420 	case MTD_OPS_RAW:
421 		memcpy(chip->oob_poi + ops->ooboffs, oob, len);
422 		return oob + len;
423 
424 	case MTD_OPS_AUTO_OOB:
425 		ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
426 						  ops->ooboffs, len);
427 		BUG_ON(ret);
428 		return oob + len;
429 
430 	default:
431 		BUG();
432 	}
433 	return NULL;
434 }
435 
436 /**
437  * nand_do_write_oob - [MTD Interface] NAND write out-of-band
438  * @chip: NAND chip object
439  * @to: offset to write to
440  * @ops: oob operation description structure
441  *
442  * NAND write out-of-band.
443  */
444 static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
445 			     struct mtd_oob_ops *ops)
446 {
447 	struct mtd_info *mtd = nand_to_mtd(chip);
448 	int chipnr, page, status, len, ret;
449 
450 	pr_debug("%s: to = 0x%08x, len = %i\n",
451 			 __func__, (unsigned int)to, (int)ops->ooblen);
452 
453 	len = mtd_oobavail(mtd, ops);
454 
455 	/* Do not allow write past end of page */
456 	if ((ops->ooboffs + ops->ooblen) > len) {
457 		pr_debug("%s: attempt to write past end of page\n",
458 				__func__);
459 		return -EINVAL;
460 	}
461 
462 	chipnr = (int)(to >> chip->chip_shift);
463 
464 	/*
465 	 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
466 	 * of my DiskOnChip 2000 test units) will clear the whole data page too
467 	 * if we don't do this. I have no clue why, but I seem to have 'fixed'
468 	 * it in the doc2000 driver in August 1999.  dwmw2.
469 	 */
470 	ret = nand_reset(chip, chipnr);
471 	if (ret)
472 		return ret;
473 
474 	nand_select_target(chip, chipnr);
475 
476 	/* Shift to get page */
477 	page = (int)(to >> chip->page_shift);
478 
479 	/* Check, if it is write protected */
480 	if (nand_check_wp(chip)) {
481 		nand_deselect_target(chip);
482 		return -EROFS;
483 	}
484 
485 	/* Invalidate the page cache, if we write to the cached page */
486 	if (page == chip->pagecache.page)
487 		chip->pagecache.page = -1;
488 
489 	nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
490 
491 	if (ops->mode == MTD_OPS_RAW)
492 		status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
493 	else
494 		status = chip->ecc.write_oob(chip, page & chip->pagemask);
495 
496 	nand_deselect_target(chip);
497 
498 	if (status)
499 		return status;
500 
501 	ops->oobretlen = ops->ooblen;
502 
503 	return 0;
504 }
505 
506 /**
507  * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
508  * @chip: NAND chip object
509  * @ofs: offset from device start
510  *
511  * This is the default implementation, which can be overridden by a hardware
512  * specific driver. It provides the details for writing a bad block marker to a
513  * block.
514  */
515 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
516 {
517 	struct mtd_info *mtd = nand_to_mtd(chip);
518 	struct mtd_oob_ops ops;
519 	uint8_t buf[2] = { 0, 0 };
520 	int ret = 0, res, page_offset;
521 
522 	memset(&ops, 0, sizeof(ops));
523 	ops.oobbuf = buf;
524 	ops.ooboffs = chip->badblockpos;
525 	if (chip->options & NAND_BUSWIDTH_16) {
526 		ops.ooboffs &= ~0x01;
527 		ops.len = ops.ooblen = 2;
528 	} else {
529 		ops.len = ops.ooblen = 1;
530 	}
531 	ops.mode = MTD_OPS_PLACE_OOB;
532 
533 	page_offset = nand_bbm_get_next_page(chip, 0);
534 
535 	while (page_offset >= 0) {
536 		res = nand_do_write_oob(chip,
537 					ofs + (page_offset * mtd->writesize),
538 					&ops);
539 
540 		if (!ret)
541 			ret = res;
542 
543 		page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
544 	}
545 
546 	return ret;
547 }
548 
549 /**
550  * nand_markbad_bbm - mark a block by updating the BBM
551  * @chip: NAND chip object
552  * @ofs: offset of the block to mark bad
553  */
554 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
555 {
556 	if (chip->legacy.block_markbad)
557 		return chip->legacy.block_markbad(chip, ofs);
558 
559 	return nand_default_block_markbad(chip, ofs);
560 }
561 
562 /**
563  * nand_block_markbad_lowlevel - mark a block bad
564  * @chip: NAND chip object
565  * @ofs: offset from device start
566  *
567  * This function performs the generic NAND bad block marking steps (i.e., bad
568  * block table(s) and/or marker(s)). We only allow the hardware driver to
569  * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
570  *
571  * We try operations in the following order:
572  *
573  *  (1) erase the affected block, to allow OOB marker to be written cleanly
574  *  (2) write bad block marker to OOB area of affected block (unless flag
575  *      NAND_BBT_NO_OOB_BBM is present)
576  *  (3) update the BBT
577  *
578  * Note that we retain the first error encountered in (2) or (3), finish the
579  * procedures, and dump the error in the end.
580 */
581 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
582 {
583 	struct mtd_info *mtd = nand_to_mtd(chip);
584 	int res, ret = 0;
585 
586 	if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
587 		struct erase_info einfo;
588 
589 		/* Attempt erase before marking OOB */
590 		memset(&einfo, 0, sizeof(einfo));
591 		einfo.addr = ofs;
592 		einfo.len = 1ULL << chip->phys_erase_shift;
593 		nand_erase_nand(chip, &einfo, 0);
594 
595 		/* Write bad block marker to OOB */
596 		ret = nand_get_device(chip);
597 		if (ret)
598 			return ret;
599 
600 		ret = nand_markbad_bbm(chip, ofs);
601 		nand_release_device(chip);
602 	}
603 
604 	/* Mark block bad in BBT */
605 	if (chip->bbt) {
606 		res = nand_markbad_bbt(chip, ofs);
607 		if (!ret)
608 			ret = res;
609 	}
610 
611 	if (!ret)
612 		mtd->ecc_stats.badblocks++;
613 
614 	return ret;
615 }
616 
617 /**
618  * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
619  * @mtd: MTD device structure
620  * @ofs: offset from device start
621  *
622  * Check if the block is marked as reserved.
623  */
624 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
625 {
626 	struct nand_chip *chip = mtd_to_nand(mtd);
627 
628 	if (!chip->bbt)
629 		return 0;
630 	/* Return info from the table */
631 	return nand_isreserved_bbt(chip, ofs);
632 }
633 
634 /**
635  * nand_block_checkbad - [GENERIC] Check if a block is marked bad
636  * @chip: NAND chip object
637  * @ofs: offset from device start
638  * @allowbbt: 1, if its allowed to access the bbt area
639  *
640  * Check, if the block is bad. Either by reading the bad block table or
641  * calling of the scan function.
642  */
643 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
644 {
645 	/* Return info from the table */
646 	if (chip->bbt)
647 		return nand_isbad_bbt(chip, ofs, allowbbt);
648 
649 	return nand_isbad_bbm(chip, ofs);
650 }
651 
652 /**
653  * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
654  * @chip: NAND chip structure
655  * @timeout_ms: Timeout in ms
656  *
657  * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
658  * If that does not happen whitin the specified timeout, -ETIMEDOUT is
659  * returned.
660  *
661  * This helper is intended to be used when the controller does not have access
662  * to the NAND R/B pin.
663  *
664  * Be aware that calling this helper from an ->exec_op() implementation means
665  * ->exec_op() must be re-entrant.
666  *
667  * Return 0 if the NAND chip is ready, a negative error otherwise.
668  */
669 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
670 {
671 	const struct nand_sdr_timings *timings;
672 	u8 status = 0;
673 	int ret;
674 
675 	if (!nand_has_exec_op(chip))
676 		return -ENOTSUPP;
677 
678 	/* Wait tWB before polling the STATUS reg. */
679 	timings = nand_get_sdr_timings(&chip->data_interface);
680 	ndelay(PSEC_TO_NSEC(timings->tWB_max));
681 
682 	ret = nand_status_op(chip, NULL);
683 	if (ret)
684 		return ret;
685 
686 	/*
687 	 * +1 below is necessary because if we are now in the last fraction
688 	 * of jiffy and msecs_to_jiffies is 1 then we will wait only that
689 	 * small jiffy fraction - possibly leading to false timeout
690 	 */
691 	timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
692 	do {
693 		ret = nand_read_data_op(chip, &status, sizeof(status), true);
694 		if (ret)
695 			break;
696 
697 		if (status & NAND_STATUS_READY)
698 			break;
699 
700 		/*
701 		 * Typical lowest execution time for a tR on most NANDs is 10us,
702 		 * use this as polling delay before doing something smarter (ie.
703 		 * deriving a delay from the timeout value, timeout_ms/ratio).
704 		 */
705 		udelay(10);
706 	} while	(time_before(jiffies, timeout_ms));
707 
708 	/*
709 	 * We have to exit READ_STATUS mode in order to read real data on the
710 	 * bus in case the WAITRDY instruction is preceding a DATA_IN
711 	 * instruction.
712 	 */
713 	nand_exit_status_op(chip);
714 
715 	if (ret)
716 		return ret;
717 
718 	return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
719 };
720 EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
721 
722 /**
723  * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
724  * @chip: NAND chip structure
725  * @gpiod: GPIO descriptor of R/B pin
726  * @timeout_ms: Timeout in ms
727  *
728  * Poll the R/B GPIO pin until it becomes ready. If that does not happen
729  * whitin the specified timeout, -ETIMEDOUT is returned.
730  *
731  * This helper is intended to be used when the controller has access to the
732  * NAND R/B pin over GPIO.
733  *
734  * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
735  */
736 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
737 		      unsigned long timeout_ms)
738 {
739 	/* Wait until R/B pin indicates chip is ready or timeout occurs */
740 	timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
741 	do {
742 		if (gpiod_get_value_cansleep(gpiod))
743 			return 0;
744 
745 		cond_resched();
746 	} while	(time_before(jiffies, timeout_ms));
747 
748 	return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
749 };
750 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
751 
752 /**
753  * panic_nand_wait - [GENERIC] wait until the command is done
754  * @chip: NAND chip structure
755  * @timeo: timeout
756  *
757  * Wait for command done. This is a helper function for nand_wait used when
758  * we are in interrupt context. May happen when in panic and trying to write
759  * an oops through mtdoops.
760  */
761 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
762 {
763 	int i;
764 	for (i = 0; i < timeo; i++) {
765 		if (chip->legacy.dev_ready) {
766 			if (chip->legacy.dev_ready(chip))
767 				break;
768 		} else {
769 			int ret;
770 			u8 status;
771 
772 			ret = nand_read_data_op(chip, &status, sizeof(status),
773 						true);
774 			if (ret)
775 				return;
776 
777 			if (status & NAND_STATUS_READY)
778 				break;
779 		}
780 		mdelay(1);
781 	}
782 }
783 
784 static bool nand_supports_get_features(struct nand_chip *chip, int addr)
785 {
786 	return (chip->parameters.supports_set_get_features &&
787 		test_bit(addr, chip->parameters.get_feature_list));
788 }
789 
790 static bool nand_supports_set_features(struct nand_chip *chip, int addr)
791 {
792 	return (chip->parameters.supports_set_get_features &&
793 		test_bit(addr, chip->parameters.set_feature_list));
794 }
795 
796 /**
797  * nand_reset_data_interface - Reset data interface and timings
798  * @chip: The NAND chip
799  * @chipnr: Internal die id
800  *
801  * Reset the Data interface and timings to ONFI mode 0.
802  *
803  * Returns 0 for success or negative error code otherwise.
804  */
805 static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
806 {
807 	int ret;
808 
809 	if (!nand_has_setup_data_iface(chip))
810 		return 0;
811 
812 	/*
813 	 * The ONFI specification says:
814 	 * "
815 	 * To transition from NV-DDR or NV-DDR2 to the SDR data
816 	 * interface, the host shall use the Reset (FFh) command
817 	 * using SDR timing mode 0. A device in any timing mode is
818 	 * required to recognize Reset (FFh) command issued in SDR
819 	 * timing mode 0.
820 	 * "
821 	 *
822 	 * Configure the data interface in SDR mode and set the
823 	 * timings to timing mode 0.
824 	 */
825 
826 	onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
827 	ret = chip->controller->ops->setup_data_interface(chip, chipnr,
828 							&chip->data_interface);
829 	if (ret)
830 		pr_err("Failed to configure data interface to SDR timing mode 0\n");
831 
832 	return ret;
833 }
834 
835 /**
836  * nand_setup_data_interface - Setup the best data interface and timings
837  * @chip: The NAND chip
838  * @chipnr: Internal die id
839  *
840  * Find and configure the best data interface and NAND timings supported by
841  * the chip and the driver.
842  * First tries to retrieve supported timing modes from ONFI information,
843  * and if the NAND chip does not support ONFI, relies on the
844  * ->onfi_timing_mode_default specified in the nand_ids table.
845  *
846  * Returns 0 for success or negative error code otherwise.
847  */
848 static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
849 {
850 	u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
851 		chip->onfi_timing_mode_default,
852 	};
853 	int ret;
854 
855 	if (!nand_has_setup_data_iface(chip))
856 		return 0;
857 
858 	/* Change the mode on the chip side (if supported by the NAND chip) */
859 	if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
860 		nand_select_target(chip, chipnr);
861 		ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
862 					tmode_param);
863 		nand_deselect_target(chip);
864 		if (ret)
865 			return ret;
866 	}
867 
868 	/* Change the mode on the controller side */
869 	ret = chip->controller->ops->setup_data_interface(chip, chipnr,
870 							&chip->data_interface);
871 	if (ret)
872 		return ret;
873 
874 	/* Check the mode has been accepted by the chip, if supported */
875 	if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
876 		return 0;
877 
878 	memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
879 	nand_select_target(chip, chipnr);
880 	ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
881 				tmode_param);
882 	nand_deselect_target(chip);
883 	if (ret)
884 		goto err_reset_chip;
885 
886 	if (tmode_param[0] != chip->onfi_timing_mode_default) {
887 		pr_warn("timing mode %d not acknowledged by the NAND chip\n",
888 			chip->onfi_timing_mode_default);
889 		goto err_reset_chip;
890 	}
891 
892 	return 0;
893 
894 err_reset_chip:
895 	/*
896 	 * Fallback to mode 0 if the chip explicitly did not ack the chosen
897 	 * timing mode.
898 	 */
899 	nand_reset_data_interface(chip, chipnr);
900 	nand_select_target(chip, chipnr);
901 	nand_reset_op(chip);
902 	nand_deselect_target(chip);
903 
904 	return ret;
905 }
906 
907 /**
908  * nand_init_data_interface - find the best data interface and timings
909  * @chip: The NAND chip
910  *
911  * Find the best data interface and NAND timings supported by the chip
912  * and the driver.
913  * First tries to retrieve supported timing modes from ONFI information,
914  * and if the NAND chip does not support ONFI, relies on the
915  * ->onfi_timing_mode_default specified in the nand_ids table. After this
916  * function nand_chip->data_interface is initialized with the best timing mode
917  * available.
918  *
919  * Returns 0 for success or negative error code otherwise.
920  */
921 static int nand_init_data_interface(struct nand_chip *chip)
922 {
923 	int modes, mode, ret;
924 
925 	if (!nand_has_setup_data_iface(chip))
926 		return 0;
927 
928 	/*
929 	 * First try to identify the best timings from ONFI parameters and
930 	 * if the NAND does not support ONFI, fallback to the default ONFI
931 	 * timing mode.
932 	 */
933 	if (chip->parameters.onfi) {
934 		modes = chip->parameters.onfi->async_timing_mode;
935 	} else {
936 		if (!chip->onfi_timing_mode_default)
937 			return 0;
938 
939 		modes = GENMASK(chip->onfi_timing_mode_default, 0);
940 	}
941 
942 	for (mode = fls(modes) - 1; mode >= 0; mode--) {
943 		ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
944 		if (ret)
945 			continue;
946 
947 		/*
948 		 * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
949 		 * controller supports the requested timings.
950 		 */
951 		ret = chip->controller->ops->setup_data_interface(chip,
952 						 NAND_DATA_IFACE_CHECK_ONLY,
953 						 &chip->data_interface);
954 		if (!ret) {
955 			chip->onfi_timing_mode_default = mode;
956 			break;
957 		}
958 	}
959 
960 	return 0;
961 }
962 
963 /**
964  * nand_fill_column_cycles - fill the column cycles of an address
965  * @chip: The NAND chip
966  * @addrs: Array of address cycles to fill
967  * @offset_in_page: The offset in the page
968  *
969  * Fills the first or the first two bytes of the @addrs field depending
970  * on the NAND bus width and the page size.
971  *
972  * Returns the number of cycles needed to encode the column, or a negative
973  * error code in case one of the arguments is invalid.
974  */
975 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
976 				   unsigned int offset_in_page)
977 {
978 	struct mtd_info *mtd = nand_to_mtd(chip);
979 
980 	/* Make sure the offset is less than the actual page size. */
981 	if (offset_in_page > mtd->writesize + mtd->oobsize)
982 		return -EINVAL;
983 
984 	/*
985 	 * On small page NANDs, there's a dedicated command to access the OOB
986 	 * area, and the column address is relative to the start of the OOB
987 	 * area, not the start of the page. Asjust the address accordingly.
988 	 */
989 	if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
990 		offset_in_page -= mtd->writesize;
991 
992 	/*
993 	 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
994 	 * wide, then it must be divided by 2.
995 	 */
996 	if (chip->options & NAND_BUSWIDTH_16) {
997 		if (WARN_ON(offset_in_page % 2))
998 			return -EINVAL;
999 
1000 		offset_in_page /= 2;
1001 	}
1002 
1003 	addrs[0] = offset_in_page;
1004 
1005 	/*
1006 	 * Small page NANDs use 1 cycle for the columns, while large page NANDs
1007 	 * need 2
1008 	 */
1009 	if (mtd->writesize <= 512)
1010 		return 1;
1011 
1012 	addrs[1] = offset_in_page >> 8;
1013 
1014 	return 2;
1015 }
1016 
1017 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1018 				     unsigned int offset_in_page, void *buf,
1019 				     unsigned int len)
1020 {
1021 	struct mtd_info *mtd = nand_to_mtd(chip);
1022 	const struct nand_sdr_timings *sdr =
1023 		nand_get_sdr_timings(&chip->data_interface);
1024 	u8 addrs[4];
1025 	struct nand_op_instr instrs[] = {
1026 		NAND_OP_CMD(NAND_CMD_READ0, 0),
1027 		NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
1028 		NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1029 				 PSEC_TO_NSEC(sdr->tRR_min)),
1030 		NAND_OP_DATA_IN(len, buf, 0),
1031 	};
1032 	struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1033 	int ret;
1034 
1035 	/* Drop the DATA_IN instruction if len is set to 0. */
1036 	if (!len)
1037 		op.ninstrs--;
1038 
1039 	if (offset_in_page >= mtd->writesize)
1040 		instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1041 	else if (offset_in_page >= 256 &&
1042 		 !(chip->options & NAND_BUSWIDTH_16))
1043 		instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1044 
1045 	ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1046 	if (ret < 0)
1047 		return ret;
1048 
1049 	addrs[1] = page;
1050 	addrs[2] = page >> 8;
1051 
1052 	if (chip->options & NAND_ROW_ADDR_3) {
1053 		addrs[3] = page >> 16;
1054 		instrs[1].ctx.addr.naddrs++;
1055 	}
1056 
1057 	return nand_exec_op(chip, &op);
1058 }
1059 
1060 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1061 				     unsigned int offset_in_page, void *buf,
1062 				     unsigned int len)
1063 {
1064 	const struct nand_sdr_timings *sdr =
1065 		nand_get_sdr_timings(&chip->data_interface);
1066 	u8 addrs[5];
1067 	struct nand_op_instr instrs[] = {
1068 		NAND_OP_CMD(NAND_CMD_READ0, 0),
1069 		NAND_OP_ADDR(4, addrs, 0),
1070 		NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1071 		NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1072 				 PSEC_TO_NSEC(sdr->tRR_min)),
1073 		NAND_OP_DATA_IN(len, buf, 0),
1074 	};
1075 	struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1076 	int ret;
1077 
1078 	/* Drop the DATA_IN instruction if len is set to 0. */
1079 	if (!len)
1080 		op.ninstrs--;
1081 
1082 	ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1083 	if (ret < 0)
1084 		return ret;
1085 
1086 	addrs[2] = page;
1087 	addrs[3] = page >> 8;
1088 
1089 	if (chip->options & NAND_ROW_ADDR_3) {
1090 		addrs[4] = page >> 16;
1091 		instrs[1].ctx.addr.naddrs++;
1092 	}
1093 
1094 	return nand_exec_op(chip, &op);
1095 }
1096 
1097 /**
1098  * nand_read_page_op - Do a READ PAGE operation
1099  * @chip: The NAND chip
1100  * @page: page to read
1101  * @offset_in_page: offset within the page
1102  * @buf: buffer used to store the data
1103  * @len: length of the buffer
1104  *
1105  * This function issues a READ PAGE operation.
1106  * This function does not select/unselect the CS line.
1107  *
1108  * Returns 0 on success, a negative error code otherwise.
1109  */
1110 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1111 		      unsigned int offset_in_page, void *buf, unsigned int len)
1112 {
1113 	struct mtd_info *mtd = nand_to_mtd(chip);
1114 
1115 	if (len && !buf)
1116 		return -EINVAL;
1117 
1118 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1119 		return -EINVAL;
1120 
1121 	if (nand_has_exec_op(chip)) {
1122 		if (mtd->writesize > 512)
1123 			return nand_lp_exec_read_page_op(chip, page,
1124 							 offset_in_page, buf,
1125 							 len);
1126 
1127 		return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1128 						 buf, len);
1129 	}
1130 
1131 	chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1132 	if (len)
1133 		chip->legacy.read_buf(chip, buf, len);
1134 
1135 	return 0;
1136 }
1137 EXPORT_SYMBOL_GPL(nand_read_page_op);
1138 
1139 /**
1140  * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1141  * @chip: The NAND chip
1142  * @page: parameter page to read
1143  * @buf: buffer used to store the data
1144  * @len: length of the buffer
1145  *
1146  * This function issues a READ PARAMETER PAGE operation.
1147  * This function does not select/unselect the CS line.
1148  *
1149  * Returns 0 on success, a negative error code otherwise.
1150  */
1151 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1152 			    unsigned int len)
1153 {
1154 	unsigned int i;
1155 	u8 *p = buf;
1156 
1157 	if (len && !buf)
1158 		return -EINVAL;
1159 
1160 	if (nand_has_exec_op(chip)) {
1161 		const struct nand_sdr_timings *sdr =
1162 			nand_get_sdr_timings(&chip->data_interface);
1163 		struct nand_op_instr instrs[] = {
1164 			NAND_OP_CMD(NAND_CMD_PARAM, 0),
1165 			NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1166 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1167 					 PSEC_TO_NSEC(sdr->tRR_min)),
1168 			NAND_OP_8BIT_DATA_IN(len, buf, 0),
1169 		};
1170 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1171 
1172 		/* Drop the DATA_IN instruction if len is set to 0. */
1173 		if (!len)
1174 			op.ninstrs--;
1175 
1176 		return nand_exec_op(chip, &op);
1177 	}
1178 
1179 	chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1180 	for (i = 0; i < len; i++)
1181 		p[i] = chip->legacy.read_byte(chip);
1182 
1183 	return 0;
1184 }
1185 
1186 /**
1187  * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1188  * @chip: The NAND chip
1189  * @offset_in_page: offset within the page
1190  * @buf: buffer used to store the data
1191  * @len: length of the buffer
1192  * @force_8bit: force 8-bit bus access
1193  *
1194  * This function issues a CHANGE READ COLUMN operation.
1195  * This function does not select/unselect the CS line.
1196  *
1197  * Returns 0 on success, a negative error code otherwise.
1198  */
1199 int nand_change_read_column_op(struct nand_chip *chip,
1200 			       unsigned int offset_in_page, void *buf,
1201 			       unsigned int len, bool force_8bit)
1202 {
1203 	struct mtd_info *mtd = nand_to_mtd(chip);
1204 
1205 	if (len && !buf)
1206 		return -EINVAL;
1207 
1208 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1209 		return -EINVAL;
1210 
1211 	/* Small page NANDs do not support column change. */
1212 	if (mtd->writesize <= 512)
1213 		return -ENOTSUPP;
1214 
1215 	if (nand_has_exec_op(chip)) {
1216 		const struct nand_sdr_timings *sdr =
1217 			nand_get_sdr_timings(&chip->data_interface);
1218 		u8 addrs[2] = {};
1219 		struct nand_op_instr instrs[] = {
1220 			NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1221 			NAND_OP_ADDR(2, addrs, 0),
1222 			NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1223 				    PSEC_TO_NSEC(sdr->tCCS_min)),
1224 			NAND_OP_DATA_IN(len, buf, 0),
1225 		};
1226 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1227 		int ret;
1228 
1229 		ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1230 		if (ret < 0)
1231 			return ret;
1232 
1233 		/* Drop the DATA_IN instruction if len is set to 0. */
1234 		if (!len)
1235 			op.ninstrs--;
1236 
1237 		instrs[3].ctx.data.force_8bit = force_8bit;
1238 
1239 		return nand_exec_op(chip, &op);
1240 	}
1241 
1242 	chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1243 	if (len)
1244 		chip->legacy.read_buf(chip, buf, len);
1245 
1246 	return 0;
1247 }
1248 EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1249 
1250 /**
1251  * nand_read_oob_op - Do a READ OOB operation
1252  * @chip: The NAND chip
1253  * @page: page to read
1254  * @offset_in_oob: offset within the OOB area
1255  * @buf: buffer used to store the data
1256  * @len: length of the buffer
1257  *
1258  * This function issues a READ OOB operation.
1259  * This function does not select/unselect the CS line.
1260  *
1261  * Returns 0 on success, a negative error code otherwise.
1262  */
1263 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1264 		     unsigned int offset_in_oob, void *buf, unsigned int len)
1265 {
1266 	struct mtd_info *mtd = nand_to_mtd(chip);
1267 
1268 	if (len && !buf)
1269 		return -EINVAL;
1270 
1271 	if (offset_in_oob + len > mtd->oobsize)
1272 		return -EINVAL;
1273 
1274 	if (nand_has_exec_op(chip))
1275 		return nand_read_page_op(chip, page,
1276 					 mtd->writesize + offset_in_oob,
1277 					 buf, len);
1278 
1279 	chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1280 	if (len)
1281 		chip->legacy.read_buf(chip, buf, len);
1282 
1283 	return 0;
1284 }
1285 EXPORT_SYMBOL_GPL(nand_read_oob_op);
1286 
1287 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1288 				  unsigned int offset_in_page, const void *buf,
1289 				  unsigned int len, bool prog)
1290 {
1291 	struct mtd_info *mtd = nand_to_mtd(chip);
1292 	const struct nand_sdr_timings *sdr =
1293 		nand_get_sdr_timings(&chip->data_interface);
1294 	u8 addrs[5] = {};
1295 	struct nand_op_instr instrs[] = {
1296 		/*
1297 		 * The first instruction will be dropped if we're dealing
1298 		 * with a large page NAND and adjusted if we're dealing
1299 		 * with a small page NAND and the page offset is > 255.
1300 		 */
1301 		NAND_OP_CMD(NAND_CMD_READ0, 0),
1302 		NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1303 		NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1304 		NAND_OP_DATA_OUT(len, buf, 0),
1305 		NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1306 		NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1307 	};
1308 	struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1309 	int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1310 	int ret;
1311 	u8 status;
1312 
1313 	if (naddrs < 0)
1314 		return naddrs;
1315 
1316 	addrs[naddrs++] = page;
1317 	addrs[naddrs++] = page >> 8;
1318 	if (chip->options & NAND_ROW_ADDR_3)
1319 		addrs[naddrs++] = page >> 16;
1320 
1321 	instrs[2].ctx.addr.naddrs = naddrs;
1322 
1323 	/* Drop the last two instructions if we're not programming the page. */
1324 	if (!prog) {
1325 		op.ninstrs -= 2;
1326 		/* Also drop the DATA_OUT instruction if empty. */
1327 		if (!len)
1328 			op.ninstrs--;
1329 	}
1330 
1331 	if (mtd->writesize <= 512) {
1332 		/*
1333 		 * Small pages need some more tweaking: we have to adjust the
1334 		 * first instruction depending on the page offset we're trying
1335 		 * to access.
1336 		 */
1337 		if (offset_in_page >= mtd->writesize)
1338 			instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1339 		else if (offset_in_page >= 256 &&
1340 			 !(chip->options & NAND_BUSWIDTH_16))
1341 			instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1342 	} else {
1343 		/*
1344 		 * Drop the first command if we're dealing with a large page
1345 		 * NAND.
1346 		 */
1347 		op.instrs++;
1348 		op.ninstrs--;
1349 	}
1350 
1351 	ret = nand_exec_op(chip, &op);
1352 	if (!prog || ret)
1353 		return ret;
1354 
1355 	ret = nand_status_op(chip, &status);
1356 	if (ret)
1357 		return ret;
1358 
1359 	return status;
1360 }
1361 
1362 /**
1363  * nand_prog_page_begin_op - starts a PROG PAGE operation
1364  * @chip: The NAND chip
1365  * @page: page to write
1366  * @offset_in_page: offset within the page
1367  * @buf: buffer containing the data to write to the page
1368  * @len: length of the buffer
1369  *
1370  * This function issues the first half of a PROG PAGE operation.
1371  * This function does not select/unselect the CS line.
1372  *
1373  * Returns 0 on success, a negative error code otherwise.
1374  */
1375 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1376 			    unsigned int offset_in_page, const void *buf,
1377 			    unsigned int len)
1378 {
1379 	struct mtd_info *mtd = nand_to_mtd(chip);
1380 
1381 	if (len && !buf)
1382 		return -EINVAL;
1383 
1384 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1385 		return -EINVAL;
1386 
1387 	if (nand_has_exec_op(chip))
1388 		return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1389 					      len, false);
1390 
1391 	chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1392 
1393 	if (buf)
1394 		chip->legacy.write_buf(chip, buf, len);
1395 
1396 	return 0;
1397 }
1398 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1399 
1400 /**
1401  * nand_prog_page_end_op - ends a PROG PAGE operation
1402  * @chip: The NAND chip
1403  *
1404  * This function issues the second half of a PROG PAGE operation.
1405  * This function does not select/unselect the CS line.
1406  *
1407  * Returns 0 on success, a negative error code otherwise.
1408  */
1409 int nand_prog_page_end_op(struct nand_chip *chip)
1410 {
1411 	int ret;
1412 	u8 status;
1413 
1414 	if (nand_has_exec_op(chip)) {
1415 		const struct nand_sdr_timings *sdr =
1416 			nand_get_sdr_timings(&chip->data_interface);
1417 		struct nand_op_instr instrs[] = {
1418 			NAND_OP_CMD(NAND_CMD_PAGEPROG,
1419 				    PSEC_TO_NSEC(sdr->tWB_max)),
1420 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1421 		};
1422 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1423 
1424 		ret = nand_exec_op(chip, &op);
1425 		if (ret)
1426 			return ret;
1427 
1428 		ret = nand_status_op(chip, &status);
1429 		if (ret)
1430 			return ret;
1431 	} else {
1432 		chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1433 		ret = chip->legacy.waitfunc(chip);
1434 		if (ret < 0)
1435 			return ret;
1436 
1437 		status = ret;
1438 	}
1439 
1440 	if (status & NAND_STATUS_FAIL)
1441 		return -EIO;
1442 
1443 	return 0;
1444 }
1445 EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1446 
1447 /**
1448  * nand_prog_page_op - Do a full PROG PAGE operation
1449  * @chip: The NAND chip
1450  * @page: page to write
1451  * @offset_in_page: offset within the page
1452  * @buf: buffer containing the data to write to the page
1453  * @len: length of the buffer
1454  *
1455  * This function issues a full PROG PAGE operation.
1456  * This function does not select/unselect the CS line.
1457  *
1458  * Returns 0 on success, a negative error code otherwise.
1459  */
1460 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1461 		      unsigned int offset_in_page, const void *buf,
1462 		      unsigned int len)
1463 {
1464 	struct mtd_info *mtd = nand_to_mtd(chip);
1465 	int status;
1466 
1467 	if (!len || !buf)
1468 		return -EINVAL;
1469 
1470 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1471 		return -EINVAL;
1472 
1473 	if (nand_has_exec_op(chip)) {
1474 		status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1475 						len, true);
1476 	} else {
1477 		chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1478 				     page);
1479 		chip->legacy.write_buf(chip, buf, len);
1480 		chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1481 		status = chip->legacy.waitfunc(chip);
1482 	}
1483 
1484 	if (status & NAND_STATUS_FAIL)
1485 		return -EIO;
1486 
1487 	return 0;
1488 }
1489 EXPORT_SYMBOL_GPL(nand_prog_page_op);
1490 
1491 /**
1492  * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1493  * @chip: The NAND chip
1494  * @offset_in_page: offset within the page
1495  * @buf: buffer containing the data to send to the NAND
1496  * @len: length of the buffer
1497  * @force_8bit: force 8-bit bus access
1498  *
1499  * This function issues a CHANGE WRITE COLUMN operation.
1500  * This function does not select/unselect the CS line.
1501  *
1502  * Returns 0 on success, a negative error code otherwise.
1503  */
1504 int nand_change_write_column_op(struct nand_chip *chip,
1505 				unsigned int offset_in_page,
1506 				const void *buf, unsigned int len,
1507 				bool force_8bit)
1508 {
1509 	struct mtd_info *mtd = nand_to_mtd(chip);
1510 
1511 	if (len && !buf)
1512 		return -EINVAL;
1513 
1514 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1515 		return -EINVAL;
1516 
1517 	/* Small page NANDs do not support column change. */
1518 	if (mtd->writesize <= 512)
1519 		return -ENOTSUPP;
1520 
1521 	if (nand_has_exec_op(chip)) {
1522 		const struct nand_sdr_timings *sdr =
1523 			nand_get_sdr_timings(&chip->data_interface);
1524 		u8 addrs[2];
1525 		struct nand_op_instr instrs[] = {
1526 			NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1527 			NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1528 			NAND_OP_DATA_OUT(len, buf, 0),
1529 		};
1530 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1531 		int ret;
1532 
1533 		ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1534 		if (ret < 0)
1535 			return ret;
1536 
1537 		instrs[2].ctx.data.force_8bit = force_8bit;
1538 
1539 		/* Drop the DATA_OUT instruction if len is set to 0. */
1540 		if (!len)
1541 			op.ninstrs--;
1542 
1543 		return nand_exec_op(chip, &op);
1544 	}
1545 
1546 	chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1547 	if (len)
1548 		chip->legacy.write_buf(chip, buf, len);
1549 
1550 	return 0;
1551 }
1552 EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1553 
1554 /**
1555  * nand_readid_op - Do a READID operation
1556  * @chip: The NAND chip
1557  * @addr: address cycle to pass after the READID command
1558  * @buf: buffer used to store the ID
1559  * @len: length of the buffer
1560  *
1561  * This function sends a READID command and reads back the ID returned by the
1562  * NAND.
1563  * This function does not select/unselect the CS line.
1564  *
1565  * Returns 0 on success, a negative error code otherwise.
1566  */
1567 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1568 		   unsigned int len)
1569 {
1570 	unsigned int i;
1571 	u8 *id = buf;
1572 
1573 	if (len && !buf)
1574 		return -EINVAL;
1575 
1576 	if (nand_has_exec_op(chip)) {
1577 		const struct nand_sdr_timings *sdr =
1578 			nand_get_sdr_timings(&chip->data_interface);
1579 		struct nand_op_instr instrs[] = {
1580 			NAND_OP_CMD(NAND_CMD_READID, 0),
1581 			NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
1582 			NAND_OP_8BIT_DATA_IN(len, buf, 0),
1583 		};
1584 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1585 
1586 		/* Drop the DATA_IN instruction if len is set to 0. */
1587 		if (!len)
1588 			op.ninstrs--;
1589 
1590 		return nand_exec_op(chip, &op);
1591 	}
1592 
1593 	chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1594 
1595 	for (i = 0; i < len; i++)
1596 		id[i] = chip->legacy.read_byte(chip);
1597 
1598 	return 0;
1599 }
1600 EXPORT_SYMBOL_GPL(nand_readid_op);
1601 
1602 /**
1603  * nand_status_op - Do a STATUS operation
1604  * @chip: The NAND chip
1605  * @status: out variable to store the NAND status
1606  *
1607  * This function sends a STATUS command and reads back the status returned by
1608  * the NAND.
1609  * This function does not select/unselect the CS line.
1610  *
1611  * Returns 0 on success, a negative error code otherwise.
1612  */
1613 int nand_status_op(struct nand_chip *chip, u8 *status)
1614 {
1615 	if (nand_has_exec_op(chip)) {
1616 		const struct nand_sdr_timings *sdr =
1617 			nand_get_sdr_timings(&chip->data_interface);
1618 		struct nand_op_instr instrs[] = {
1619 			NAND_OP_CMD(NAND_CMD_STATUS,
1620 				    PSEC_TO_NSEC(sdr->tADL_min)),
1621 			NAND_OP_8BIT_DATA_IN(1, status, 0),
1622 		};
1623 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1624 
1625 		if (!status)
1626 			op.ninstrs--;
1627 
1628 		return nand_exec_op(chip, &op);
1629 	}
1630 
1631 	chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1632 	if (status)
1633 		*status = chip->legacy.read_byte(chip);
1634 
1635 	return 0;
1636 }
1637 EXPORT_SYMBOL_GPL(nand_status_op);
1638 
1639 /**
1640  * nand_exit_status_op - Exit a STATUS operation
1641  * @chip: The NAND chip
1642  *
1643  * This function sends a READ0 command to cancel the effect of the STATUS
1644  * command to avoid reading only the status until a new read command is sent.
1645  *
1646  * This function does not select/unselect the CS line.
1647  *
1648  * Returns 0 on success, a negative error code otherwise.
1649  */
1650 int nand_exit_status_op(struct nand_chip *chip)
1651 {
1652 	if (nand_has_exec_op(chip)) {
1653 		struct nand_op_instr instrs[] = {
1654 			NAND_OP_CMD(NAND_CMD_READ0, 0),
1655 		};
1656 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1657 
1658 		return nand_exec_op(chip, &op);
1659 	}
1660 
1661 	chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1662 
1663 	return 0;
1664 }
1665 
1666 /**
1667  * nand_erase_op - Do an erase operation
1668  * @chip: The NAND chip
1669  * @eraseblock: block to erase
1670  *
1671  * This function sends an ERASE command and waits for the NAND to be ready
1672  * before returning.
1673  * This function does not select/unselect the CS line.
1674  *
1675  * Returns 0 on success, a negative error code otherwise.
1676  */
1677 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1678 {
1679 	unsigned int page = eraseblock <<
1680 			    (chip->phys_erase_shift - chip->page_shift);
1681 	int ret;
1682 	u8 status;
1683 
1684 	if (nand_has_exec_op(chip)) {
1685 		const struct nand_sdr_timings *sdr =
1686 			nand_get_sdr_timings(&chip->data_interface);
1687 		u8 addrs[3] = {	page, page >> 8, page >> 16 };
1688 		struct nand_op_instr instrs[] = {
1689 			NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1690 			NAND_OP_ADDR(2, addrs, 0),
1691 			NAND_OP_CMD(NAND_CMD_ERASE2,
1692 				    PSEC_TO_MSEC(sdr->tWB_max)),
1693 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
1694 		};
1695 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1696 
1697 		if (chip->options & NAND_ROW_ADDR_3)
1698 			instrs[1].ctx.addr.naddrs++;
1699 
1700 		ret = nand_exec_op(chip, &op);
1701 		if (ret)
1702 			return ret;
1703 
1704 		ret = nand_status_op(chip, &status);
1705 		if (ret)
1706 			return ret;
1707 	} else {
1708 		chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1709 		chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1710 
1711 		ret = chip->legacy.waitfunc(chip);
1712 		if (ret < 0)
1713 			return ret;
1714 
1715 		status = ret;
1716 	}
1717 
1718 	if (status & NAND_STATUS_FAIL)
1719 		return -EIO;
1720 
1721 	return 0;
1722 }
1723 EXPORT_SYMBOL_GPL(nand_erase_op);
1724 
1725 /**
1726  * nand_set_features_op - Do a SET FEATURES operation
1727  * @chip: The NAND chip
1728  * @feature: feature id
1729  * @data: 4 bytes of data
1730  *
1731  * This function sends a SET FEATURES command and waits for the NAND to be
1732  * ready before returning.
1733  * This function does not select/unselect the CS line.
1734  *
1735  * Returns 0 on success, a negative error code otherwise.
1736  */
1737 static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1738 				const void *data)
1739 {
1740 	const u8 *params = data;
1741 	int i, ret;
1742 
1743 	if (nand_has_exec_op(chip)) {
1744 		const struct nand_sdr_timings *sdr =
1745 			nand_get_sdr_timings(&chip->data_interface);
1746 		struct nand_op_instr instrs[] = {
1747 			NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1748 			NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
1749 			NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1750 					      PSEC_TO_NSEC(sdr->tWB_max)),
1751 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
1752 		};
1753 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1754 
1755 		return nand_exec_op(chip, &op);
1756 	}
1757 
1758 	chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1759 	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1760 		chip->legacy.write_byte(chip, params[i]);
1761 
1762 	ret = chip->legacy.waitfunc(chip);
1763 	if (ret < 0)
1764 		return ret;
1765 
1766 	if (ret & NAND_STATUS_FAIL)
1767 		return -EIO;
1768 
1769 	return 0;
1770 }
1771 
1772 /**
1773  * nand_get_features_op - Do a GET FEATURES operation
1774  * @chip: The NAND chip
1775  * @feature: feature id
1776  * @data: 4 bytes of data
1777  *
1778  * This function sends a GET FEATURES command and waits for the NAND to be
1779  * ready before returning.
1780  * This function does not select/unselect the CS line.
1781  *
1782  * Returns 0 on success, a negative error code otherwise.
1783  */
1784 static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1785 				void *data)
1786 {
1787 	u8 *params = data;
1788 	int i;
1789 
1790 	if (nand_has_exec_op(chip)) {
1791 		const struct nand_sdr_timings *sdr =
1792 			nand_get_sdr_timings(&chip->data_interface);
1793 		struct nand_op_instr instrs[] = {
1794 			NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1795 			NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
1796 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
1797 					 PSEC_TO_NSEC(sdr->tRR_min)),
1798 			NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1799 					     data, 0),
1800 		};
1801 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1802 
1803 		return nand_exec_op(chip, &op);
1804 	}
1805 
1806 	chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1807 	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1808 		params[i] = chip->legacy.read_byte(chip);
1809 
1810 	return 0;
1811 }
1812 
1813 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1814 			    unsigned int delay_ns)
1815 {
1816 	if (nand_has_exec_op(chip)) {
1817 		struct nand_op_instr instrs[] = {
1818 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1819 					 PSEC_TO_NSEC(delay_ns)),
1820 		};
1821 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1822 
1823 		return nand_exec_op(chip, &op);
1824 	}
1825 
1826 	/* Apply delay or wait for ready/busy pin */
1827 	if (!chip->legacy.dev_ready)
1828 		udelay(chip->legacy.chip_delay);
1829 	else
1830 		nand_wait_ready(chip);
1831 
1832 	return 0;
1833 }
1834 
1835 /**
1836  * nand_reset_op - Do a reset operation
1837  * @chip: The NAND chip
1838  *
1839  * This function sends a RESET command and waits for the NAND to be ready
1840  * before returning.
1841  * This function does not select/unselect the CS line.
1842  *
1843  * Returns 0 on success, a negative error code otherwise.
1844  */
1845 int nand_reset_op(struct nand_chip *chip)
1846 {
1847 	if (nand_has_exec_op(chip)) {
1848 		const struct nand_sdr_timings *sdr =
1849 			nand_get_sdr_timings(&chip->data_interface);
1850 		struct nand_op_instr instrs[] = {
1851 			NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
1852 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
1853 		};
1854 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1855 
1856 		return nand_exec_op(chip, &op);
1857 	}
1858 
1859 	chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
1860 
1861 	return 0;
1862 }
1863 EXPORT_SYMBOL_GPL(nand_reset_op);
1864 
1865 /**
1866  * nand_read_data_op - Read data from the NAND
1867  * @chip: The NAND chip
1868  * @buf: buffer used to store the data
1869  * @len: length of the buffer
1870  * @force_8bit: force 8-bit bus access
1871  *
1872  * This function does a raw data read on the bus. Usually used after launching
1873  * another NAND operation like nand_read_page_op().
1874  * This function does not select/unselect the CS line.
1875  *
1876  * Returns 0 on success, a negative error code otherwise.
1877  */
1878 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1879 		      bool force_8bit)
1880 {
1881 	if (!len || !buf)
1882 		return -EINVAL;
1883 
1884 	if (nand_has_exec_op(chip)) {
1885 		struct nand_op_instr instrs[] = {
1886 			NAND_OP_DATA_IN(len, buf, 0),
1887 		};
1888 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1889 
1890 		instrs[0].ctx.data.force_8bit = force_8bit;
1891 
1892 		return nand_exec_op(chip, &op);
1893 	}
1894 
1895 	if (force_8bit) {
1896 		u8 *p = buf;
1897 		unsigned int i;
1898 
1899 		for (i = 0; i < len; i++)
1900 			p[i] = chip->legacy.read_byte(chip);
1901 	} else {
1902 		chip->legacy.read_buf(chip, buf, len);
1903 	}
1904 
1905 	return 0;
1906 }
1907 EXPORT_SYMBOL_GPL(nand_read_data_op);
1908 
1909 /**
1910  * nand_write_data_op - Write data from the NAND
1911  * @chip: The NAND chip
1912  * @buf: buffer containing the data to send on the bus
1913  * @len: length of the buffer
1914  * @force_8bit: force 8-bit bus access
1915  *
1916  * This function does a raw data write on the bus. Usually used after launching
1917  * another NAND operation like nand_write_page_begin_op().
1918  * This function does not select/unselect the CS line.
1919  *
1920  * Returns 0 on success, a negative error code otherwise.
1921  */
1922 int nand_write_data_op(struct nand_chip *chip, const void *buf,
1923 		       unsigned int len, bool force_8bit)
1924 {
1925 	if (!len || !buf)
1926 		return -EINVAL;
1927 
1928 	if (nand_has_exec_op(chip)) {
1929 		struct nand_op_instr instrs[] = {
1930 			NAND_OP_DATA_OUT(len, buf, 0),
1931 		};
1932 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1933 
1934 		instrs[0].ctx.data.force_8bit = force_8bit;
1935 
1936 		return nand_exec_op(chip, &op);
1937 	}
1938 
1939 	if (force_8bit) {
1940 		const u8 *p = buf;
1941 		unsigned int i;
1942 
1943 		for (i = 0; i < len; i++)
1944 			chip->legacy.write_byte(chip, p[i]);
1945 	} else {
1946 		chip->legacy.write_buf(chip, buf, len);
1947 	}
1948 
1949 	return 0;
1950 }
1951 EXPORT_SYMBOL_GPL(nand_write_data_op);
1952 
1953 /**
1954  * struct nand_op_parser_ctx - Context used by the parser
1955  * @instrs: array of all the instructions that must be addressed
1956  * @ninstrs: length of the @instrs array
1957  * @subop: Sub-operation to be passed to the NAND controller
1958  *
1959  * This structure is used by the core to split NAND operations into
1960  * sub-operations that can be handled by the NAND controller.
1961  */
1962 struct nand_op_parser_ctx {
1963 	const struct nand_op_instr *instrs;
1964 	unsigned int ninstrs;
1965 	struct nand_subop subop;
1966 };
1967 
1968 /**
1969  * nand_op_parser_must_split_instr - Checks if an instruction must be split
1970  * @pat: the parser pattern element that matches @instr
1971  * @instr: pointer to the instruction to check
1972  * @start_offset: this is an in/out parameter. If @instr has already been
1973  *		  split, then @start_offset is the offset from which to start
1974  *		  (either an address cycle or an offset in the data buffer).
1975  *		  Conversely, if the function returns true (ie. instr must be
1976  *		  split), this parameter is updated to point to the first
1977  *		  data/address cycle that has not been taken care of.
1978  *
1979  * Some NAND controllers are limited and cannot send X address cycles with a
1980  * unique operation, or cannot read/write more than Y bytes at the same time.
1981  * In this case, split the instruction that does not fit in a single
1982  * controller-operation into two or more chunks.
1983  *
1984  * Returns true if the instruction must be split, false otherwise.
1985  * The @start_offset parameter is also updated to the offset at which the next
1986  * bundle of instruction must start (if an address or a data instruction).
1987  */
1988 static bool
1989 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
1990 				const struct nand_op_instr *instr,
1991 				unsigned int *start_offset)
1992 {
1993 	switch (pat->type) {
1994 	case NAND_OP_ADDR_INSTR:
1995 		if (!pat->ctx.addr.maxcycles)
1996 			break;
1997 
1998 		if (instr->ctx.addr.naddrs - *start_offset >
1999 		    pat->ctx.addr.maxcycles) {
2000 			*start_offset += pat->ctx.addr.maxcycles;
2001 			return true;
2002 		}
2003 		break;
2004 
2005 	case NAND_OP_DATA_IN_INSTR:
2006 	case NAND_OP_DATA_OUT_INSTR:
2007 		if (!pat->ctx.data.maxlen)
2008 			break;
2009 
2010 		if (instr->ctx.data.len - *start_offset >
2011 		    pat->ctx.data.maxlen) {
2012 			*start_offset += pat->ctx.data.maxlen;
2013 			return true;
2014 		}
2015 		break;
2016 
2017 	default:
2018 		break;
2019 	}
2020 
2021 	return false;
2022 }
2023 
2024 /**
2025  * nand_op_parser_match_pat - Checks if a pattern matches the instructions
2026  *			      remaining in the parser context
2027  * @pat: the pattern to test
2028  * @ctx: the parser context structure to match with the pattern @pat
2029  *
2030  * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2031  * Returns true if this is the case, false ortherwise. When true is returned,
2032  * @ctx->subop is updated with the set of instructions to be passed to the
2033  * controller driver.
2034  */
2035 static bool
2036 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2037 			 struct nand_op_parser_ctx *ctx)
2038 {
2039 	unsigned int instr_offset = ctx->subop.first_instr_start_off;
2040 	const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2041 	const struct nand_op_instr *instr = ctx->subop.instrs;
2042 	unsigned int i, ninstrs;
2043 
2044 	for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2045 		/*
2046 		 * The pattern instruction does not match the operation
2047 		 * instruction. If the instruction is marked optional in the
2048 		 * pattern definition, we skip the pattern element and continue
2049 		 * to the next one. If the element is mandatory, there's no
2050 		 * match and we can return false directly.
2051 		 */
2052 		if (instr->type != pat->elems[i].type) {
2053 			if (!pat->elems[i].optional)
2054 				return false;
2055 
2056 			continue;
2057 		}
2058 
2059 		/*
2060 		 * Now check the pattern element constraints. If the pattern is
2061 		 * not able to handle the whole instruction in a single step,
2062 		 * we have to split it.
2063 		 * The last_instr_end_off value comes back updated to point to
2064 		 * the position where we have to split the instruction (the
2065 		 * start of the next subop chunk).
2066 		 */
2067 		if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2068 						    &instr_offset)) {
2069 			ninstrs++;
2070 			i++;
2071 			break;
2072 		}
2073 
2074 		instr++;
2075 		ninstrs++;
2076 		instr_offset = 0;
2077 	}
2078 
2079 	/*
2080 	 * This can happen if all instructions of a pattern are optional.
2081 	 * Still, if there's not at least one instruction handled by this
2082 	 * pattern, this is not a match, and we should try the next one (if
2083 	 * any).
2084 	 */
2085 	if (!ninstrs)
2086 		return false;
2087 
2088 	/*
2089 	 * We had a match on the pattern head, but the pattern may be longer
2090 	 * than the instructions we're asked to execute. We need to make sure
2091 	 * there's no mandatory elements in the pattern tail.
2092 	 */
2093 	for (; i < pat->nelems; i++) {
2094 		if (!pat->elems[i].optional)
2095 			return false;
2096 	}
2097 
2098 	/*
2099 	 * We have a match: update the subop structure accordingly and return
2100 	 * true.
2101 	 */
2102 	ctx->subop.ninstrs = ninstrs;
2103 	ctx->subop.last_instr_end_off = instr_offset;
2104 
2105 	return true;
2106 }
2107 
2108 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2109 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2110 {
2111 	const struct nand_op_instr *instr;
2112 	char *prefix = "      ";
2113 	unsigned int i;
2114 
2115 	pr_debug("executing subop:\n");
2116 
2117 	for (i = 0; i < ctx->ninstrs; i++) {
2118 		instr = &ctx->instrs[i];
2119 
2120 		if (instr == &ctx->subop.instrs[0])
2121 			prefix = "    ->";
2122 
2123 		nand_op_trace(prefix, instr);
2124 
2125 		if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2126 			prefix = "      ";
2127 	}
2128 }
2129 #else
2130 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2131 {
2132 	/* NOP */
2133 }
2134 #endif
2135 
2136 static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
2137 				  const struct nand_op_parser_ctx *b)
2138 {
2139 	if (a->subop.ninstrs < b->subop.ninstrs)
2140 		return -1;
2141 	else if (a->subop.ninstrs > b->subop.ninstrs)
2142 		return 1;
2143 
2144 	if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
2145 		return -1;
2146 	else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
2147 		return 1;
2148 
2149 	return 0;
2150 }
2151 
2152 /**
2153  * nand_op_parser_exec_op - exec_op parser
2154  * @chip: the NAND chip
2155  * @parser: patterns description provided by the controller driver
2156  * @op: the NAND operation to address
2157  * @check_only: when true, the function only checks if @op can be handled but
2158  *		does not execute the operation
2159  *
2160  * Helper function designed to ease integration of NAND controller drivers that
2161  * only support a limited set of instruction sequences. The supported sequences
2162  * are described in @parser, and the framework takes care of splitting @op into
2163  * multiple sub-operations (if required) and pass them back to the ->exec()
2164  * callback of the matching pattern if @check_only is set to false.
2165  *
2166  * NAND controller drivers should call this function from their own ->exec_op()
2167  * implementation.
2168  *
2169  * Returns 0 on success, a negative error code otherwise. A failure can be
2170  * caused by an unsupported operation (none of the supported patterns is able
2171  * to handle the requested operation), or an error returned by one of the
2172  * matching pattern->exec() hook.
2173  */
2174 int nand_op_parser_exec_op(struct nand_chip *chip,
2175 			   const struct nand_op_parser *parser,
2176 			   const struct nand_operation *op, bool check_only)
2177 {
2178 	struct nand_op_parser_ctx ctx = {
2179 		.subop.instrs = op->instrs,
2180 		.instrs = op->instrs,
2181 		.ninstrs = op->ninstrs,
2182 	};
2183 	unsigned int i;
2184 
2185 	while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2186 		const struct nand_op_parser_pattern *pattern;
2187 		struct nand_op_parser_ctx best_ctx;
2188 		int ret, best_pattern = -1;
2189 
2190 		for (i = 0; i < parser->npatterns; i++) {
2191 			struct nand_op_parser_ctx test_ctx = ctx;
2192 
2193 			pattern = &parser->patterns[i];
2194 			if (!nand_op_parser_match_pat(pattern, &test_ctx))
2195 				continue;
2196 
2197 			if (best_pattern >= 0 &&
2198 			    nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
2199 				continue;
2200 
2201 			best_pattern = i;
2202 			best_ctx = test_ctx;
2203 		}
2204 
2205 		if (best_pattern < 0) {
2206 			pr_debug("->exec_op() parser: pattern not found!\n");
2207 			return -ENOTSUPP;
2208 		}
2209 
2210 		ctx = best_ctx;
2211 		nand_op_parser_trace(&ctx);
2212 
2213 		if (!check_only) {
2214 			pattern = &parser->patterns[best_pattern];
2215 			ret = pattern->exec(chip, &ctx.subop);
2216 			if (ret)
2217 				return ret;
2218 		}
2219 
2220 		/*
2221 		 * Update the context structure by pointing to the start of the
2222 		 * next subop.
2223 		 */
2224 		ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2225 		if (ctx.subop.last_instr_end_off)
2226 			ctx.subop.instrs -= 1;
2227 
2228 		ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2229 	}
2230 
2231 	return 0;
2232 }
2233 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2234 
2235 static bool nand_instr_is_data(const struct nand_op_instr *instr)
2236 {
2237 	return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2238 			 instr->type == NAND_OP_DATA_OUT_INSTR);
2239 }
2240 
2241 static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2242 				      unsigned int instr_idx)
2243 {
2244 	return subop && instr_idx < subop->ninstrs;
2245 }
2246 
2247 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2248 					     unsigned int instr_idx)
2249 {
2250 	if (instr_idx)
2251 		return 0;
2252 
2253 	return subop->first_instr_start_off;
2254 }
2255 
2256 /**
2257  * nand_subop_get_addr_start_off - Get the start offset in an address array
2258  * @subop: The entire sub-operation
2259  * @instr_idx: Index of the instruction inside the sub-operation
2260  *
2261  * During driver development, one could be tempted to directly use the
2262  * ->addr.addrs field of address instructions. This is wrong as address
2263  * instructions might be split.
2264  *
2265  * Given an address instruction, returns the offset of the first cycle to issue.
2266  */
2267 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2268 					   unsigned int instr_idx)
2269 {
2270 	if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2271 		    subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2272 		return 0;
2273 
2274 	return nand_subop_get_start_off(subop, instr_idx);
2275 }
2276 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2277 
2278 /**
2279  * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2280  * @subop: The entire sub-operation
2281  * @instr_idx: Index of the instruction inside the sub-operation
2282  *
2283  * During driver development, one could be tempted to directly use the
2284  * ->addr->naddrs field of a data instruction. This is wrong as instructions
2285  * might be split.
2286  *
2287  * Given an address instruction, returns the number of address cycle to issue.
2288  */
2289 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2290 					 unsigned int instr_idx)
2291 {
2292 	int start_off, end_off;
2293 
2294 	if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2295 		    subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2296 		return 0;
2297 
2298 	start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2299 
2300 	if (instr_idx == subop->ninstrs - 1 &&
2301 	    subop->last_instr_end_off)
2302 		end_off = subop->last_instr_end_off;
2303 	else
2304 		end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2305 
2306 	return end_off - start_off;
2307 }
2308 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2309 
2310 /**
2311  * nand_subop_get_data_start_off - Get the start offset in a data array
2312  * @subop: The entire sub-operation
2313  * @instr_idx: Index of the instruction inside the sub-operation
2314  *
2315  * During driver development, one could be tempted to directly use the
2316  * ->data->buf.{in,out} field of data instructions. This is wrong as data
2317  * instructions might be split.
2318  *
2319  * Given a data instruction, returns the offset to start from.
2320  */
2321 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2322 					   unsigned int instr_idx)
2323 {
2324 	if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2325 		    !nand_instr_is_data(&subop->instrs[instr_idx])))
2326 		return 0;
2327 
2328 	return nand_subop_get_start_off(subop, instr_idx);
2329 }
2330 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2331 
2332 /**
2333  * nand_subop_get_data_len - Get the number of bytes to retrieve
2334  * @subop: The entire sub-operation
2335  * @instr_idx: Index of the instruction inside the sub-operation
2336  *
2337  * During driver development, one could be tempted to directly use the
2338  * ->data->len field of a data instruction. This is wrong as data instructions
2339  * might be split.
2340  *
2341  * Returns the length of the chunk of data to send/receive.
2342  */
2343 unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2344 				     unsigned int instr_idx)
2345 {
2346 	int start_off = 0, end_off;
2347 
2348 	if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2349 		    !nand_instr_is_data(&subop->instrs[instr_idx])))
2350 		return 0;
2351 
2352 	start_off = nand_subop_get_data_start_off(subop, instr_idx);
2353 
2354 	if (instr_idx == subop->ninstrs - 1 &&
2355 	    subop->last_instr_end_off)
2356 		end_off = subop->last_instr_end_off;
2357 	else
2358 		end_off = subop->instrs[instr_idx].ctx.data.len;
2359 
2360 	return end_off - start_off;
2361 }
2362 EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2363 
2364 /**
2365  * nand_reset - Reset and initialize a NAND device
2366  * @chip: The NAND chip
2367  * @chipnr: Internal die id
2368  *
2369  * Save the timings data structure, then apply SDR timings mode 0 (see
2370  * nand_reset_data_interface for details), do the reset operation, and
2371  * apply back the previous timings.
2372  *
2373  * Returns 0 on success, a negative error code otherwise.
2374  */
2375 int nand_reset(struct nand_chip *chip, int chipnr)
2376 {
2377 	struct nand_data_interface saved_data_intf = chip->data_interface;
2378 	int ret;
2379 
2380 	ret = nand_reset_data_interface(chip, chipnr);
2381 	if (ret)
2382 		return ret;
2383 
2384 	/*
2385 	 * The CS line has to be released before we can apply the new NAND
2386 	 * interface settings, hence this weird nand_select_target()
2387 	 * nand_deselect_target() dance.
2388 	 */
2389 	nand_select_target(chip, chipnr);
2390 	ret = nand_reset_op(chip);
2391 	nand_deselect_target(chip);
2392 	if (ret)
2393 		return ret;
2394 
2395 	/*
2396 	 * A nand_reset_data_interface() put both the NAND chip and the NAND
2397 	 * controller in timings mode 0. If the default mode for this chip is
2398 	 * also 0, no need to proceed to the change again. Plus, at probe time,
2399 	 * nand_setup_data_interface() uses ->set/get_features() which would
2400 	 * fail anyway as the parameter page is not available yet.
2401 	 */
2402 	if (!chip->onfi_timing_mode_default)
2403 		return 0;
2404 
2405 	chip->data_interface = saved_data_intf;
2406 	ret = nand_setup_data_interface(chip, chipnr);
2407 	if (ret)
2408 		return ret;
2409 
2410 	return 0;
2411 }
2412 EXPORT_SYMBOL_GPL(nand_reset);
2413 
2414 /**
2415  * nand_get_features - wrapper to perform a GET_FEATURE
2416  * @chip: NAND chip info structure
2417  * @addr: feature address
2418  * @subfeature_param: the subfeature parameters, a four bytes array
2419  *
2420  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2421  * operation cannot be handled.
2422  */
2423 int nand_get_features(struct nand_chip *chip, int addr,
2424 		      u8 *subfeature_param)
2425 {
2426 	if (!nand_supports_get_features(chip, addr))
2427 		return -ENOTSUPP;
2428 
2429 	if (chip->legacy.get_features)
2430 		return chip->legacy.get_features(chip, addr, subfeature_param);
2431 
2432 	return nand_get_features_op(chip, addr, subfeature_param);
2433 }
2434 
2435 /**
2436  * nand_set_features - wrapper to perform a SET_FEATURE
2437  * @chip: NAND chip info structure
2438  * @addr: feature address
2439  * @subfeature_param: the subfeature parameters, a four bytes array
2440  *
2441  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2442  * operation cannot be handled.
2443  */
2444 int nand_set_features(struct nand_chip *chip, int addr,
2445 		      u8 *subfeature_param)
2446 {
2447 	if (!nand_supports_set_features(chip, addr))
2448 		return -ENOTSUPP;
2449 
2450 	if (chip->legacy.set_features)
2451 		return chip->legacy.set_features(chip, addr, subfeature_param);
2452 
2453 	return nand_set_features_op(chip, addr, subfeature_param);
2454 }
2455 
2456 /**
2457  * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2458  * @buf: buffer to test
2459  * @len: buffer length
2460  * @bitflips_threshold: maximum number of bitflips
2461  *
2462  * Check if a buffer contains only 0xff, which means the underlying region
2463  * has been erased and is ready to be programmed.
2464  * The bitflips_threshold specify the maximum number of bitflips before
2465  * considering the region is not erased.
2466  * Note: The logic of this function has been extracted from the memweight
2467  * implementation, except that nand_check_erased_buf function exit before
2468  * testing the whole buffer if the number of bitflips exceed the
2469  * bitflips_threshold value.
2470  *
2471  * Returns a positive number of bitflips less than or equal to
2472  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2473  * threshold.
2474  */
2475 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2476 {
2477 	const unsigned char *bitmap = buf;
2478 	int bitflips = 0;
2479 	int weight;
2480 
2481 	for (; len && ((uintptr_t)bitmap) % sizeof(long);
2482 	     len--, bitmap++) {
2483 		weight = hweight8(*bitmap);
2484 		bitflips += BITS_PER_BYTE - weight;
2485 		if (unlikely(bitflips > bitflips_threshold))
2486 			return -EBADMSG;
2487 	}
2488 
2489 	for (; len >= sizeof(long);
2490 	     len -= sizeof(long), bitmap += sizeof(long)) {
2491 		unsigned long d = *((unsigned long *)bitmap);
2492 		if (d == ~0UL)
2493 			continue;
2494 		weight = hweight_long(d);
2495 		bitflips += BITS_PER_LONG - weight;
2496 		if (unlikely(bitflips > bitflips_threshold))
2497 			return -EBADMSG;
2498 	}
2499 
2500 	for (; len > 0; len--, bitmap++) {
2501 		weight = hweight8(*bitmap);
2502 		bitflips += BITS_PER_BYTE - weight;
2503 		if (unlikely(bitflips > bitflips_threshold))
2504 			return -EBADMSG;
2505 	}
2506 
2507 	return bitflips;
2508 }
2509 
2510 /**
2511  * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2512  *				 0xff data
2513  * @data: data buffer to test
2514  * @datalen: data length
2515  * @ecc: ECC buffer
2516  * @ecclen: ECC length
2517  * @extraoob: extra OOB buffer
2518  * @extraooblen: extra OOB length
2519  * @bitflips_threshold: maximum number of bitflips
2520  *
2521  * Check if a data buffer and its associated ECC and OOB data contains only
2522  * 0xff pattern, which means the underlying region has been erased and is
2523  * ready to be programmed.
2524  * The bitflips_threshold specify the maximum number of bitflips before
2525  * considering the region as not erased.
2526  *
2527  * Note:
2528  * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2529  *    different from the NAND page size. When fixing bitflips, ECC engines will
2530  *    report the number of errors per chunk, and the NAND core infrastructure
2531  *    expect you to return the maximum number of bitflips for the whole page.
2532  *    This is why you should always use this function on a single chunk and
2533  *    not on the whole page. After checking each chunk you should update your
2534  *    max_bitflips value accordingly.
2535  * 2/ When checking for bitflips in erased pages you should not only check
2536  *    the payload data but also their associated ECC data, because a user might
2537  *    have programmed almost all bits to 1 but a few. In this case, we
2538  *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
2539  *    this case.
2540  * 3/ The extraoob argument is optional, and should be used if some of your OOB
2541  *    data are protected by the ECC engine.
2542  *    It could also be used if you support subpages and want to attach some
2543  *    extra OOB data to an ECC chunk.
2544  *
2545  * Returns a positive number of bitflips less than or equal to
2546  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2547  * threshold. In case of success, the passed buffers are filled with 0xff.
2548  */
2549 int nand_check_erased_ecc_chunk(void *data, int datalen,
2550 				void *ecc, int ecclen,
2551 				void *extraoob, int extraooblen,
2552 				int bitflips_threshold)
2553 {
2554 	int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2555 
2556 	data_bitflips = nand_check_erased_buf(data, datalen,
2557 					      bitflips_threshold);
2558 	if (data_bitflips < 0)
2559 		return data_bitflips;
2560 
2561 	bitflips_threshold -= data_bitflips;
2562 
2563 	ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2564 	if (ecc_bitflips < 0)
2565 		return ecc_bitflips;
2566 
2567 	bitflips_threshold -= ecc_bitflips;
2568 
2569 	extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2570 						  bitflips_threshold);
2571 	if (extraoob_bitflips < 0)
2572 		return extraoob_bitflips;
2573 
2574 	if (data_bitflips)
2575 		memset(data, 0xff, datalen);
2576 
2577 	if (ecc_bitflips)
2578 		memset(ecc, 0xff, ecclen);
2579 
2580 	if (extraoob_bitflips)
2581 		memset(extraoob, 0xff, extraooblen);
2582 
2583 	return data_bitflips + ecc_bitflips + extraoob_bitflips;
2584 }
2585 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2586 
2587 /**
2588  * nand_read_page_raw_notsupp - dummy read raw page function
2589  * @chip: nand chip info structure
2590  * @buf: buffer to store read data
2591  * @oob_required: caller requires OOB data read to chip->oob_poi
2592  * @page: page number to read
2593  *
2594  * Returns -ENOTSUPP unconditionally.
2595  */
2596 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2597 			       int oob_required, int page)
2598 {
2599 	return -ENOTSUPP;
2600 }
2601 
2602 /**
2603  * nand_read_page_raw - [INTERN] read raw page data without ecc
2604  * @chip: nand chip info structure
2605  * @buf: buffer to store read data
2606  * @oob_required: caller requires OOB data read to chip->oob_poi
2607  * @page: page number to read
2608  *
2609  * Not for syndrome calculating ECC controllers, which use a special oob layout.
2610  */
2611 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2612 		       int page)
2613 {
2614 	struct mtd_info *mtd = nand_to_mtd(chip);
2615 	int ret;
2616 
2617 	ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2618 	if (ret)
2619 		return ret;
2620 
2621 	if (oob_required) {
2622 		ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2623 					false);
2624 		if (ret)
2625 			return ret;
2626 	}
2627 
2628 	return 0;
2629 }
2630 EXPORT_SYMBOL(nand_read_page_raw);
2631 
2632 /**
2633  * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
2634  * @chip: nand chip info structure
2635  * @buf: buffer to store read data
2636  * @oob_required: caller requires OOB data read to chip->oob_poi
2637  * @page: page number to read
2638  *
2639  * We need a special oob layout and handling even when OOB isn't used.
2640  */
2641 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2642 				       int oob_required, int page)
2643 {
2644 	struct mtd_info *mtd = nand_to_mtd(chip);
2645 	int eccsize = chip->ecc.size;
2646 	int eccbytes = chip->ecc.bytes;
2647 	uint8_t *oob = chip->oob_poi;
2648 	int steps, size, ret;
2649 
2650 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
2651 	if (ret)
2652 		return ret;
2653 
2654 	for (steps = chip->ecc.steps; steps > 0; steps--) {
2655 		ret = nand_read_data_op(chip, buf, eccsize, false);
2656 		if (ret)
2657 			return ret;
2658 
2659 		buf += eccsize;
2660 
2661 		if (chip->ecc.prepad) {
2662 			ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2663 						false);
2664 			if (ret)
2665 				return ret;
2666 
2667 			oob += chip->ecc.prepad;
2668 		}
2669 
2670 		ret = nand_read_data_op(chip, oob, eccbytes, false);
2671 		if (ret)
2672 			return ret;
2673 
2674 		oob += eccbytes;
2675 
2676 		if (chip->ecc.postpad) {
2677 			ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2678 						false);
2679 			if (ret)
2680 				return ret;
2681 
2682 			oob += chip->ecc.postpad;
2683 		}
2684 	}
2685 
2686 	size = mtd->oobsize - (oob - chip->oob_poi);
2687 	if (size) {
2688 		ret = nand_read_data_op(chip, oob, size, false);
2689 		if (ret)
2690 			return ret;
2691 	}
2692 
2693 	return 0;
2694 }
2695 
2696 /**
2697  * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
2698  * @chip: nand chip info structure
2699  * @buf: buffer to store read data
2700  * @oob_required: caller requires OOB data read to chip->oob_poi
2701  * @page: page number to read
2702  */
2703 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2704 				int oob_required, int page)
2705 {
2706 	struct mtd_info *mtd = nand_to_mtd(chip);
2707 	int i, eccsize = chip->ecc.size, ret;
2708 	int eccbytes = chip->ecc.bytes;
2709 	int eccsteps = chip->ecc.steps;
2710 	uint8_t *p = buf;
2711 	uint8_t *ecc_calc = chip->ecc.calc_buf;
2712 	uint8_t *ecc_code = chip->ecc.code_buf;
2713 	unsigned int max_bitflips = 0;
2714 
2715 	chip->ecc.read_page_raw(chip, buf, 1, page);
2716 
2717 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2718 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
2719 
2720 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2721 					 chip->ecc.total);
2722 	if (ret)
2723 		return ret;
2724 
2725 	eccsteps = chip->ecc.steps;
2726 	p = buf;
2727 
2728 	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2729 		int stat;
2730 
2731 		stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2732 		if (stat < 0) {
2733 			mtd->ecc_stats.failed++;
2734 		} else {
2735 			mtd->ecc_stats.corrected += stat;
2736 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2737 		}
2738 	}
2739 	return max_bitflips;
2740 }
2741 
2742 /**
2743  * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
2744  * @chip: nand chip info structure
2745  * @data_offs: offset of requested data within the page
2746  * @readlen: data length
2747  * @bufpoi: buffer to store read data
2748  * @page: page number to read
2749  */
2750 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2751 			     uint32_t readlen, uint8_t *bufpoi, int page)
2752 {
2753 	struct mtd_info *mtd = nand_to_mtd(chip);
2754 	int start_step, end_step, num_steps, ret;
2755 	uint8_t *p;
2756 	int data_col_addr, i, gaps = 0;
2757 	int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
2758 	int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
2759 	int index, section = 0;
2760 	unsigned int max_bitflips = 0;
2761 	struct mtd_oob_region oobregion = { };
2762 
2763 	/* Column address within the page aligned to ECC size (256bytes) */
2764 	start_step = data_offs / chip->ecc.size;
2765 	end_step = (data_offs + readlen - 1) / chip->ecc.size;
2766 	num_steps = end_step - start_step + 1;
2767 	index = start_step * chip->ecc.bytes;
2768 
2769 	/* Data size aligned to ECC ecc.size */
2770 	datafrag_len = num_steps * chip->ecc.size;
2771 	eccfrag_len = num_steps * chip->ecc.bytes;
2772 
2773 	data_col_addr = start_step * chip->ecc.size;
2774 	/* If we read not a page aligned data */
2775 	p = bufpoi + data_col_addr;
2776 	ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
2777 	if (ret)
2778 		return ret;
2779 
2780 	/* Calculate ECC */
2781 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
2782 		chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
2783 
2784 	/*
2785 	 * The performance is faster if we position offsets according to
2786 	 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
2787 	 */
2788 	ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
2789 	if (ret)
2790 		return ret;
2791 
2792 	if (oobregion.length < eccfrag_len)
2793 		gaps = 1;
2794 
2795 	if (gaps) {
2796 		ret = nand_change_read_column_op(chip, mtd->writesize,
2797 						 chip->oob_poi, mtd->oobsize,
2798 						 false);
2799 		if (ret)
2800 			return ret;
2801 	} else {
2802 		/*
2803 		 * Send the command to read the particular ECC bytes take care
2804 		 * about buswidth alignment in read_buf.
2805 		 */
2806 		aligned_pos = oobregion.offset & ~(busw - 1);
2807 		aligned_len = eccfrag_len;
2808 		if (oobregion.offset & (busw - 1))
2809 			aligned_len++;
2810 		if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
2811 		    (busw - 1))
2812 			aligned_len++;
2813 
2814 		ret = nand_change_read_column_op(chip,
2815 						 mtd->writesize + aligned_pos,
2816 						 &chip->oob_poi[aligned_pos],
2817 						 aligned_len, false);
2818 		if (ret)
2819 			return ret;
2820 	}
2821 
2822 	ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
2823 					 chip->oob_poi, index, eccfrag_len);
2824 	if (ret)
2825 		return ret;
2826 
2827 	p = bufpoi + data_col_addr;
2828 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
2829 		int stat;
2830 
2831 		stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
2832 					 &chip->ecc.calc_buf[i]);
2833 		if (stat == -EBADMSG &&
2834 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2835 			/* check for empty pages with bitflips */
2836 			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2837 						&chip->ecc.code_buf[i],
2838 						chip->ecc.bytes,
2839 						NULL, 0,
2840 						chip->ecc.strength);
2841 		}
2842 
2843 		if (stat < 0) {
2844 			mtd->ecc_stats.failed++;
2845 		} else {
2846 			mtd->ecc_stats.corrected += stat;
2847 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2848 		}
2849 	}
2850 	return max_bitflips;
2851 }
2852 
2853 /**
2854  * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
2855  * @chip: nand chip info structure
2856  * @buf: buffer to store read data
2857  * @oob_required: caller requires OOB data read to chip->oob_poi
2858  * @page: page number to read
2859  *
2860  * Not for syndrome calculating ECC controllers which need a special oob layout.
2861  */
2862 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
2863 				int oob_required, int page)
2864 {
2865 	struct mtd_info *mtd = nand_to_mtd(chip);
2866 	int i, eccsize = chip->ecc.size, ret;
2867 	int eccbytes = chip->ecc.bytes;
2868 	int eccsteps = chip->ecc.steps;
2869 	uint8_t *p = buf;
2870 	uint8_t *ecc_calc = chip->ecc.calc_buf;
2871 	uint8_t *ecc_code = chip->ecc.code_buf;
2872 	unsigned int max_bitflips = 0;
2873 
2874 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
2875 	if (ret)
2876 		return ret;
2877 
2878 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2879 		chip->ecc.hwctl(chip, NAND_ECC_READ);
2880 
2881 		ret = nand_read_data_op(chip, p, eccsize, false);
2882 		if (ret)
2883 			return ret;
2884 
2885 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
2886 	}
2887 
2888 	ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
2889 	if (ret)
2890 		return ret;
2891 
2892 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2893 					 chip->ecc.total);
2894 	if (ret)
2895 		return ret;
2896 
2897 	eccsteps = chip->ecc.steps;
2898 	p = buf;
2899 
2900 	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2901 		int stat;
2902 
2903 		stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2904 		if (stat == -EBADMSG &&
2905 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2906 			/* check for empty pages with bitflips */
2907 			stat = nand_check_erased_ecc_chunk(p, eccsize,
2908 						&ecc_code[i], eccbytes,
2909 						NULL, 0,
2910 						chip->ecc.strength);
2911 		}
2912 
2913 		if (stat < 0) {
2914 			mtd->ecc_stats.failed++;
2915 		} else {
2916 			mtd->ecc_stats.corrected += stat;
2917 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2918 		}
2919 	}
2920 	return max_bitflips;
2921 }
2922 
2923 /**
2924  * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
2925  * @chip: nand chip info structure
2926  * @buf: buffer to store read data
2927  * @oob_required: caller requires OOB data read to chip->oob_poi
2928  * @page: page number to read
2929  *
2930  * Hardware ECC for large page chips, require OOB to be read first. For this
2931  * ECC mode, the write_page method is re-used from ECC_HW. These methods
2932  * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
2933  * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
2934  * the data area, by overwriting the NAND manufacturer bad block markings.
2935  */
2936 static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
2937 					  int oob_required, int page)
2938 {
2939 	struct mtd_info *mtd = nand_to_mtd(chip);
2940 	int i, eccsize = chip->ecc.size, ret;
2941 	int eccbytes = chip->ecc.bytes;
2942 	int eccsteps = chip->ecc.steps;
2943 	uint8_t *p = buf;
2944 	uint8_t *ecc_code = chip->ecc.code_buf;
2945 	uint8_t *ecc_calc = chip->ecc.calc_buf;
2946 	unsigned int max_bitflips = 0;
2947 
2948 	/* Read the OOB area first */
2949 	ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
2950 	if (ret)
2951 		return ret;
2952 
2953 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
2954 	if (ret)
2955 		return ret;
2956 
2957 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2958 					 chip->ecc.total);
2959 	if (ret)
2960 		return ret;
2961 
2962 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2963 		int stat;
2964 
2965 		chip->ecc.hwctl(chip, NAND_ECC_READ);
2966 
2967 		ret = nand_read_data_op(chip, p, eccsize, false);
2968 		if (ret)
2969 			return ret;
2970 
2971 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
2972 
2973 		stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
2974 		if (stat == -EBADMSG &&
2975 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2976 			/* check for empty pages with bitflips */
2977 			stat = nand_check_erased_ecc_chunk(p, eccsize,
2978 						&ecc_code[i], eccbytes,
2979 						NULL, 0,
2980 						chip->ecc.strength);
2981 		}
2982 
2983 		if (stat < 0) {
2984 			mtd->ecc_stats.failed++;
2985 		} else {
2986 			mtd->ecc_stats.corrected += stat;
2987 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2988 		}
2989 	}
2990 	return max_bitflips;
2991 }
2992 
2993 /**
2994  * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
2995  * @chip: nand chip info structure
2996  * @buf: buffer to store read data
2997  * @oob_required: caller requires OOB data read to chip->oob_poi
2998  * @page: page number to read
2999  *
3000  * The hw generator calculates the error syndrome automatically. Therefore we
3001  * need a special oob layout and handling.
3002  */
3003 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
3004 				   int oob_required, int page)
3005 {
3006 	struct mtd_info *mtd = nand_to_mtd(chip);
3007 	int ret, i, eccsize = chip->ecc.size;
3008 	int eccbytes = chip->ecc.bytes;
3009 	int eccsteps = chip->ecc.steps;
3010 	int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
3011 	uint8_t *p = buf;
3012 	uint8_t *oob = chip->oob_poi;
3013 	unsigned int max_bitflips = 0;
3014 
3015 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
3016 	if (ret)
3017 		return ret;
3018 
3019 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3020 		int stat;
3021 
3022 		chip->ecc.hwctl(chip, NAND_ECC_READ);
3023 
3024 		ret = nand_read_data_op(chip, p, eccsize, false);
3025 		if (ret)
3026 			return ret;
3027 
3028 		if (chip->ecc.prepad) {
3029 			ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3030 						false);
3031 			if (ret)
3032 				return ret;
3033 
3034 			oob += chip->ecc.prepad;
3035 		}
3036 
3037 		chip->ecc.hwctl(chip, NAND_ECC_READSYN);
3038 
3039 		ret = nand_read_data_op(chip, oob, eccbytes, false);
3040 		if (ret)
3041 			return ret;
3042 
3043 		stat = chip->ecc.correct(chip, p, oob, NULL);
3044 
3045 		oob += eccbytes;
3046 
3047 		if (chip->ecc.postpad) {
3048 			ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3049 						false);
3050 			if (ret)
3051 				return ret;
3052 
3053 			oob += chip->ecc.postpad;
3054 		}
3055 
3056 		if (stat == -EBADMSG &&
3057 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3058 			/* check for empty pages with bitflips */
3059 			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3060 							   oob - eccpadbytes,
3061 							   eccpadbytes,
3062 							   NULL, 0,
3063 							   chip->ecc.strength);
3064 		}
3065 
3066 		if (stat < 0) {
3067 			mtd->ecc_stats.failed++;
3068 		} else {
3069 			mtd->ecc_stats.corrected += stat;
3070 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
3071 		}
3072 	}
3073 
3074 	/* Calculate remaining oob bytes */
3075 	i = mtd->oobsize - (oob - chip->oob_poi);
3076 	if (i) {
3077 		ret = nand_read_data_op(chip, oob, i, false);
3078 		if (ret)
3079 			return ret;
3080 	}
3081 
3082 	return max_bitflips;
3083 }
3084 
3085 /**
3086  * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3087  * @chip: NAND chip object
3088  * @oob: oob destination address
3089  * @ops: oob ops structure
3090  * @len: size of oob to transfer
3091  */
3092 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3093 				  struct mtd_oob_ops *ops, size_t len)
3094 {
3095 	struct mtd_info *mtd = nand_to_mtd(chip);
3096 	int ret;
3097 
3098 	switch (ops->mode) {
3099 
3100 	case MTD_OPS_PLACE_OOB:
3101 	case MTD_OPS_RAW:
3102 		memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3103 		return oob + len;
3104 
3105 	case MTD_OPS_AUTO_OOB:
3106 		ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3107 						  ops->ooboffs, len);
3108 		BUG_ON(ret);
3109 		return oob + len;
3110 
3111 	default:
3112 		BUG();
3113 	}
3114 	return NULL;
3115 }
3116 
3117 /**
3118  * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3119  * @chip: NAND chip object
3120  * @retry_mode: the retry mode to use
3121  *
3122  * Some vendors supply a special command to shift the Vt threshold, to be used
3123  * when there are too many bitflips in a page (i.e., ECC error). After setting
3124  * a new threshold, the host should retry reading the page.
3125  */
3126 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3127 {
3128 	pr_debug("setting READ RETRY mode %d\n", retry_mode);
3129 
3130 	if (retry_mode >= chip->read_retries)
3131 		return -EINVAL;
3132 
3133 	if (!chip->setup_read_retry)
3134 		return -EOPNOTSUPP;
3135 
3136 	return chip->setup_read_retry(chip, retry_mode);
3137 }
3138 
3139 static void nand_wait_readrdy(struct nand_chip *chip)
3140 {
3141 	const struct nand_sdr_timings *sdr;
3142 
3143 	if (!(chip->options & NAND_NEED_READRDY))
3144 		return;
3145 
3146 	sdr = nand_get_sdr_timings(&chip->data_interface);
3147 	WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
3148 }
3149 
3150 /**
3151  * nand_do_read_ops - [INTERN] Read data with ECC
3152  * @chip: NAND chip object
3153  * @from: offset to read from
3154  * @ops: oob ops structure
3155  *
3156  * Internal function. Called with chip held.
3157  */
3158 static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3159 			    struct mtd_oob_ops *ops)
3160 {
3161 	int chipnr, page, realpage, col, bytes, aligned, oob_required;
3162 	struct mtd_info *mtd = nand_to_mtd(chip);
3163 	int ret = 0;
3164 	uint32_t readlen = ops->len;
3165 	uint32_t oobreadlen = ops->ooblen;
3166 	uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3167 
3168 	uint8_t *bufpoi, *oob, *buf;
3169 	int use_bufpoi;
3170 	unsigned int max_bitflips = 0;
3171 	int retry_mode = 0;
3172 	bool ecc_fail = false;
3173 
3174 	chipnr = (int)(from >> chip->chip_shift);
3175 	nand_select_target(chip, chipnr);
3176 
3177 	realpage = (int)(from >> chip->page_shift);
3178 	page = realpage & chip->pagemask;
3179 
3180 	col = (int)(from & (mtd->writesize - 1));
3181 
3182 	buf = ops->datbuf;
3183 	oob = ops->oobbuf;
3184 	oob_required = oob ? 1 : 0;
3185 
3186 	while (1) {
3187 		unsigned int ecc_failures = mtd->ecc_stats.failed;
3188 
3189 		bytes = min(mtd->writesize - col, readlen);
3190 		aligned = (bytes == mtd->writesize);
3191 
3192 		if (!aligned)
3193 			use_bufpoi = 1;
3194 		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
3195 			use_bufpoi = !virt_addr_valid(buf) ||
3196 				     !IS_ALIGNED((unsigned long)buf,
3197 						 chip->buf_align);
3198 		else
3199 			use_bufpoi = 0;
3200 
3201 		/* Is the current page in the buffer? */
3202 		if (realpage != chip->pagecache.page || oob) {
3203 			bufpoi = use_bufpoi ? chip->data_buf : buf;
3204 
3205 			if (use_bufpoi && aligned)
3206 				pr_debug("%s: using read bounce buffer for buf@%p\n",
3207 						 __func__, buf);
3208 
3209 read_retry:
3210 			/*
3211 			 * Now read the page into the buffer.  Absent an error,
3212 			 * the read methods return max bitflips per ecc step.
3213 			 */
3214 			if (unlikely(ops->mode == MTD_OPS_RAW))
3215 				ret = chip->ecc.read_page_raw(chip, bufpoi,
3216 							      oob_required,
3217 							      page);
3218 			else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3219 				 !oob)
3220 				ret = chip->ecc.read_subpage(chip, col, bytes,
3221 							     bufpoi, page);
3222 			else
3223 				ret = chip->ecc.read_page(chip, bufpoi,
3224 							  oob_required, page);
3225 			if (ret < 0) {
3226 				if (use_bufpoi)
3227 					/* Invalidate page cache */
3228 					chip->pagecache.page = -1;
3229 				break;
3230 			}
3231 
3232 			/* Transfer not aligned data */
3233 			if (use_bufpoi) {
3234 				if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3235 				    !(mtd->ecc_stats.failed - ecc_failures) &&
3236 				    (ops->mode != MTD_OPS_RAW)) {
3237 					chip->pagecache.page = realpage;
3238 					chip->pagecache.bitflips = ret;
3239 				} else {
3240 					/* Invalidate page cache */
3241 					chip->pagecache.page = -1;
3242 				}
3243 				memcpy(buf, chip->data_buf + col, bytes);
3244 			}
3245 
3246 			if (unlikely(oob)) {
3247 				int toread = min(oobreadlen, max_oobsize);
3248 
3249 				if (toread) {
3250 					oob = nand_transfer_oob(chip, oob, ops,
3251 								toread);
3252 					oobreadlen -= toread;
3253 				}
3254 			}
3255 
3256 			nand_wait_readrdy(chip);
3257 
3258 			if (mtd->ecc_stats.failed - ecc_failures) {
3259 				if (retry_mode + 1 < chip->read_retries) {
3260 					retry_mode++;
3261 					ret = nand_setup_read_retry(chip,
3262 							retry_mode);
3263 					if (ret < 0)
3264 						break;
3265 
3266 					/* Reset failures; retry */
3267 					mtd->ecc_stats.failed = ecc_failures;
3268 					goto read_retry;
3269 				} else {
3270 					/* No more retry modes; real failure */
3271 					ecc_fail = true;
3272 				}
3273 			}
3274 
3275 			buf += bytes;
3276 			max_bitflips = max_t(unsigned int, max_bitflips, ret);
3277 		} else {
3278 			memcpy(buf, chip->data_buf + col, bytes);
3279 			buf += bytes;
3280 			max_bitflips = max_t(unsigned int, max_bitflips,
3281 					     chip->pagecache.bitflips);
3282 		}
3283 
3284 		readlen -= bytes;
3285 
3286 		/* Reset to retry mode 0 */
3287 		if (retry_mode) {
3288 			ret = nand_setup_read_retry(chip, 0);
3289 			if (ret < 0)
3290 				break;
3291 			retry_mode = 0;
3292 		}
3293 
3294 		if (!readlen)
3295 			break;
3296 
3297 		/* For subsequent reads align to page boundary */
3298 		col = 0;
3299 		/* Increment page address */
3300 		realpage++;
3301 
3302 		page = realpage & chip->pagemask;
3303 		/* Check, if we cross a chip boundary */
3304 		if (!page) {
3305 			chipnr++;
3306 			nand_deselect_target(chip);
3307 			nand_select_target(chip, chipnr);
3308 		}
3309 	}
3310 	nand_deselect_target(chip);
3311 
3312 	ops->retlen = ops->len - (size_t) readlen;
3313 	if (oob)
3314 		ops->oobretlen = ops->ooblen - oobreadlen;
3315 
3316 	if (ret < 0)
3317 		return ret;
3318 
3319 	if (ecc_fail)
3320 		return -EBADMSG;
3321 
3322 	return max_bitflips;
3323 }
3324 
3325 /**
3326  * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3327  * @chip: nand chip info structure
3328  * @page: page number to read
3329  */
3330 int nand_read_oob_std(struct nand_chip *chip, int page)
3331 {
3332 	struct mtd_info *mtd = nand_to_mtd(chip);
3333 
3334 	return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3335 }
3336 EXPORT_SYMBOL(nand_read_oob_std);
3337 
3338 /**
3339  * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3340  *			    with syndromes
3341  * @chip: nand chip info structure
3342  * @page: page number to read
3343  */
3344 static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3345 {
3346 	struct mtd_info *mtd = nand_to_mtd(chip);
3347 	int length = mtd->oobsize;
3348 	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3349 	int eccsize = chip->ecc.size;
3350 	uint8_t *bufpoi = chip->oob_poi;
3351 	int i, toread, sndrnd = 0, pos, ret;
3352 
3353 	ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3354 	if (ret)
3355 		return ret;
3356 
3357 	for (i = 0; i < chip->ecc.steps; i++) {
3358 		if (sndrnd) {
3359 			int ret;
3360 
3361 			pos = eccsize + i * (eccsize + chunk);
3362 			if (mtd->writesize > 512)
3363 				ret = nand_change_read_column_op(chip, pos,
3364 								 NULL, 0,
3365 								 false);
3366 			else
3367 				ret = nand_read_page_op(chip, page, pos, NULL,
3368 							0);
3369 
3370 			if (ret)
3371 				return ret;
3372 		} else
3373 			sndrnd = 1;
3374 		toread = min_t(int, length, chunk);
3375 
3376 		ret = nand_read_data_op(chip, bufpoi, toread, false);
3377 		if (ret)
3378 			return ret;
3379 
3380 		bufpoi += toread;
3381 		length -= toread;
3382 	}
3383 	if (length > 0) {
3384 		ret = nand_read_data_op(chip, bufpoi, length, false);
3385 		if (ret)
3386 			return ret;
3387 	}
3388 
3389 	return 0;
3390 }
3391 
3392 /**
3393  * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3394  * @chip: nand chip info structure
3395  * @page: page number to write
3396  */
3397 int nand_write_oob_std(struct nand_chip *chip, int page)
3398 {
3399 	struct mtd_info *mtd = nand_to_mtd(chip);
3400 
3401 	return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3402 				 mtd->oobsize);
3403 }
3404 EXPORT_SYMBOL(nand_write_oob_std);
3405 
3406 /**
3407  * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3408  *			     with syndrome - only for large page flash
3409  * @chip: nand chip info structure
3410  * @page: page number to write
3411  */
3412 static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3413 {
3414 	struct mtd_info *mtd = nand_to_mtd(chip);
3415 	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3416 	int eccsize = chip->ecc.size, length = mtd->oobsize;
3417 	int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3418 	const uint8_t *bufpoi = chip->oob_poi;
3419 
3420 	/*
3421 	 * data-ecc-data-ecc ... ecc-oob
3422 	 * or
3423 	 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3424 	 */
3425 	if (!chip->ecc.prepad && !chip->ecc.postpad) {
3426 		pos = steps * (eccsize + chunk);
3427 		steps = 0;
3428 	} else
3429 		pos = eccsize;
3430 
3431 	ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3432 	if (ret)
3433 		return ret;
3434 
3435 	for (i = 0; i < steps; i++) {
3436 		if (sndcmd) {
3437 			if (mtd->writesize <= 512) {
3438 				uint32_t fill = 0xFFFFFFFF;
3439 
3440 				len = eccsize;
3441 				while (len > 0) {
3442 					int num = min_t(int, len, 4);
3443 
3444 					ret = nand_write_data_op(chip, &fill,
3445 								 num, false);
3446 					if (ret)
3447 						return ret;
3448 
3449 					len -= num;
3450 				}
3451 			} else {
3452 				pos = eccsize + i * (eccsize + chunk);
3453 				ret = nand_change_write_column_op(chip, pos,
3454 								  NULL, 0,
3455 								  false);
3456 				if (ret)
3457 					return ret;
3458 			}
3459 		} else
3460 			sndcmd = 1;
3461 		len = min_t(int, length, chunk);
3462 
3463 		ret = nand_write_data_op(chip, bufpoi, len, false);
3464 		if (ret)
3465 			return ret;
3466 
3467 		bufpoi += len;
3468 		length -= len;
3469 	}
3470 	if (length > 0) {
3471 		ret = nand_write_data_op(chip, bufpoi, length, false);
3472 		if (ret)
3473 			return ret;
3474 	}
3475 
3476 	return nand_prog_page_end_op(chip);
3477 }
3478 
3479 /**
3480  * nand_do_read_oob - [INTERN] NAND read out-of-band
3481  * @chip: NAND chip object
3482  * @from: offset to read from
3483  * @ops: oob operations description structure
3484  *
3485  * NAND read out-of-band data from the spare area.
3486  */
3487 static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3488 			    struct mtd_oob_ops *ops)
3489 {
3490 	struct mtd_info *mtd = nand_to_mtd(chip);
3491 	unsigned int max_bitflips = 0;
3492 	int page, realpage, chipnr;
3493 	struct mtd_ecc_stats stats;
3494 	int readlen = ops->ooblen;
3495 	int len;
3496 	uint8_t *buf = ops->oobbuf;
3497 	int ret = 0;
3498 
3499 	pr_debug("%s: from = 0x%08Lx, len = %i\n",
3500 			__func__, (unsigned long long)from, readlen);
3501 
3502 	stats = mtd->ecc_stats;
3503 
3504 	len = mtd_oobavail(mtd, ops);
3505 
3506 	chipnr = (int)(from >> chip->chip_shift);
3507 	nand_select_target(chip, chipnr);
3508 
3509 	/* Shift to get page */
3510 	realpage = (int)(from >> chip->page_shift);
3511 	page = realpage & chip->pagemask;
3512 
3513 	while (1) {
3514 		if (ops->mode == MTD_OPS_RAW)
3515 			ret = chip->ecc.read_oob_raw(chip, page);
3516 		else
3517 			ret = chip->ecc.read_oob(chip, page);
3518 
3519 		if (ret < 0)
3520 			break;
3521 
3522 		len = min(len, readlen);
3523 		buf = nand_transfer_oob(chip, buf, ops, len);
3524 
3525 		nand_wait_readrdy(chip);
3526 
3527 		max_bitflips = max_t(unsigned int, max_bitflips, ret);
3528 
3529 		readlen -= len;
3530 		if (!readlen)
3531 			break;
3532 
3533 		/* Increment page address */
3534 		realpage++;
3535 
3536 		page = realpage & chip->pagemask;
3537 		/* Check, if we cross a chip boundary */
3538 		if (!page) {
3539 			chipnr++;
3540 			nand_deselect_target(chip);
3541 			nand_select_target(chip, chipnr);
3542 		}
3543 	}
3544 	nand_deselect_target(chip);
3545 
3546 	ops->oobretlen = ops->ooblen - readlen;
3547 
3548 	if (ret < 0)
3549 		return ret;
3550 
3551 	if (mtd->ecc_stats.failed - stats.failed)
3552 		return -EBADMSG;
3553 
3554 	return max_bitflips;
3555 }
3556 
3557 /**
3558  * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3559  * @mtd: MTD device structure
3560  * @from: offset to read from
3561  * @ops: oob operation description structure
3562  *
3563  * NAND read data and/or out-of-band data.
3564  */
3565 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3566 			 struct mtd_oob_ops *ops)
3567 {
3568 	struct nand_chip *chip = mtd_to_nand(mtd);
3569 	int ret;
3570 
3571 	ops->retlen = 0;
3572 
3573 	if (ops->mode != MTD_OPS_PLACE_OOB &&
3574 	    ops->mode != MTD_OPS_AUTO_OOB &&
3575 	    ops->mode != MTD_OPS_RAW)
3576 		return -ENOTSUPP;
3577 
3578 	ret = nand_get_device(chip);
3579 	if (ret)
3580 		return ret;
3581 
3582 	if (!ops->datbuf)
3583 		ret = nand_do_read_oob(chip, from, ops);
3584 	else
3585 		ret = nand_do_read_ops(chip, from, ops);
3586 
3587 	nand_release_device(chip);
3588 	return ret;
3589 }
3590 
3591 /**
3592  * nand_write_page_raw_notsupp - dummy raw page write function
3593  * @chip: nand chip info structure
3594  * @buf: data buffer
3595  * @oob_required: must write chip->oob_poi to OOB
3596  * @page: page number to write
3597  *
3598  * Returns -ENOTSUPP unconditionally.
3599  */
3600 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3601 				int oob_required, int page)
3602 {
3603 	return -ENOTSUPP;
3604 }
3605 
3606 /**
3607  * nand_write_page_raw - [INTERN] raw page write function
3608  * @chip: nand chip info structure
3609  * @buf: data buffer
3610  * @oob_required: must write chip->oob_poi to OOB
3611  * @page: page number to write
3612  *
3613  * Not for syndrome calculating ECC controllers, which use a special oob layout.
3614  */
3615 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3616 			int oob_required, int page)
3617 {
3618 	struct mtd_info *mtd = nand_to_mtd(chip);
3619 	int ret;
3620 
3621 	ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3622 	if (ret)
3623 		return ret;
3624 
3625 	if (oob_required) {
3626 		ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3627 					 false);
3628 		if (ret)
3629 			return ret;
3630 	}
3631 
3632 	return nand_prog_page_end_op(chip);
3633 }
3634 EXPORT_SYMBOL(nand_write_page_raw);
3635 
3636 /**
3637  * nand_write_page_raw_syndrome - [INTERN] raw page write function
3638  * @chip: nand chip info structure
3639  * @buf: data buffer
3640  * @oob_required: must write chip->oob_poi to OOB
3641  * @page: page number to write
3642  *
3643  * We need a special oob layout and handling even when ECC isn't checked.
3644  */
3645 static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3646 					const uint8_t *buf, int oob_required,
3647 					int page)
3648 {
3649 	struct mtd_info *mtd = nand_to_mtd(chip);
3650 	int eccsize = chip->ecc.size;
3651 	int eccbytes = chip->ecc.bytes;
3652 	uint8_t *oob = chip->oob_poi;
3653 	int steps, size, ret;
3654 
3655 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3656 	if (ret)
3657 		return ret;
3658 
3659 	for (steps = chip->ecc.steps; steps > 0; steps--) {
3660 		ret = nand_write_data_op(chip, buf, eccsize, false);
3661 		if (ret)
3662 			return ret;
3663 
3664 		buf += eccsize;
3665 
3666 		if (chip->ecc.prepad) {
3667 			ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3668 						 false);
3669 			if (ret)
3670 				return ret;
3671 
3672 			oob += chip->ecc.prepad;
3673 		}
3674 
3675 		ret = nand_write_data_op(chip, oob, eccbytes, false);
3676 		if (ret)
3677 			return ret;
3678 
3679 		oob += eccbytes;
3680 
3681 		if (chip->ecc.postpad) {
3682 			ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3683 						 false);
3684 			if (ret)
3685 				return ret;
3686 
3687 			oob += chip->ecc.postpad;
3688 		}
3689 	}
3690 
3691 	size = mtd->oobsize - (oob - chip->oob_poi);
3692 	if (size) {
3693 		ret = nand_write_data_op(chip, oob, size, false);
3694 		if (ret)
3695 			return ret;
3696 	}
3697 
3698 	return nand_prog_page_end_op(chip);
3699 }
3700 /**
3701  * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
3702  * @chip: nand chip info structure
3703  * @buf: data buffer
3704  * @oob_required: must write chip->oob_poi to OOB
3705  * @page: page number to write
3706  */
3707 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3708 				 int oob_required, int page)
3709 {
3710 	struct mtd_info *mtd = nand_to_mtd(chip);
3711 	int i, eccsize = chip->ecc.size, ret;
3712 	int eccbytes = chip->ecc.bytes;
3713 	int eccsteps = chip->ecc.steps;
3714 	uint8_t *ecc_calc = chip->ecc.calc_buf;
3715 	const uint8_t *p = buf;
3716 
3717 	/* Software ECC calculation */
3718 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3719 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
3720 
3721 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3722 					 chip->ecc.total);
3723 	if (ret)
3724 		return ret;
3725 
3726 	return chip->ecc.write_page_raw(chip, buf, 1, page);
3727 }
3728 
3729 /**
3730  * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
3731  * @chip: nand chip info structure
3732  * @buf: data buffer
3733  * @oob_required: must write chip->oob_poi to OOB
3734  * @page: page number to write
3735  */
3736 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
3737 				 int oob_required, int page)
3738 {
3739 	struct mtd_info *mtd = nand_to_mtd(chip);
3740 	int i, eccsize = chip->ecc.size, ret;
3741 	int eccbytes = chip->ecc.bytes;
3742 	int eccsteps = chip->ecc.steps;
3743 	uint8_t *ecc_calc = chip->ecc.calc_buf;
3744 	const uint8_t *p = buf;
3745 
3746 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3747 	if (ret)
3748 		return ret;
3749 
3750 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3751 		chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3752 
3753 		ret = nand_write_data_op(chip, p, eccsize, false);
3754 		if (ret)
3755 			return ret;
3756 
3757 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
3758 	}
3759 
3760 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3761 					 chip->ecc.total);
3762 	if (ret)
3763 		return ret;
3764 
3765 	ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3766 	if (ret)
3767 		return ret;
3768 
3769 	return nand_prog_page_end_op(chip);
3770 }
3771 
3772 
3773 /**
3774  * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
3775  * @chip:	nand chip info structure
3776  * @offset:	column address of subpage within the page
3777  * @data_len:	data length
3778  * @buf:	data buffer
3779  * @oob_required: must write chip->oob_poi to OOB
3780  * @page: page number to write
3781  */
3782 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
3783 				    uint32_t data_len, const uint8_t *buf,
3784 				    int oob_required, int page)
3785 {
3786 	struct mtd_info *mtd = nand_to_mtd(chip);
3787 	uint8_t *oob_buf  = chip->oob_poi;
3788 	uint8_t *ecc_calc = chip->ecc.calc_buf;
3789 	int ecc_size      = chip->ecc.size;
3790 	int ecc_bytes     = chip->ecc.bytes;
3791 	int ecc_steps     = chip->ecc.steps;
3792 	uint32_t start_step = offset / ecc_size;
3793 	uint32_t end_step   = (offset + data_len - 1) / ecc_size;
3794 	int oob_bytes       = mtd->oobsize / ecc_steps;
3795 	int step, ret;
3796 
3797 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3798 	if (ret)
3799 		return ret;
3800 
3801 	for (step = 0; step < ecc_steps; step++) {
3802 		/* configure controller for WRITE access */
3803 		chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3804 
3805 		/* write data (untouched subpages already masked by 0xFF) */
3806 		ret = nand_write_data_op(chip, buf, ecc_size, false);
3807 		if (ret)
3808 			return ret;
3809 
3810 		/* mask ECC of un-touched subpages by padding 0xFF */
3811 		if ((step < start_step) || (step > end_step))
3812 			memset(ecc_calc, 0xff, ecc_bytes);
3813 		else
3814 			chip->ecc.calculate(chip, buf, ecc_calc);
3815 
3816 		/* mask OOB of un-touched subpages by padding 0xFF */
3817 		/* if oob_required, preserve OOB metadata of written subpage */
3818 		if (!oob_required || (step < start_step) || (step > end_step))
3819 			memset(oob_buf, 0xff, oob_bytes);
3820 
3821 		buf += ecc_size;
3822 		ecc_calc += ecc_bytes;
3823 		oob_buf  += oob_bytes;
3824 	}
3825 
3826 	/* copy calculated ECC for whole page to chip->buffer->oob */
3827 	/* this include masked-value(0xFF) for unwritten subpages */
3828 	ecc_calc = chip->ecc.calc_buf;
3829 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3830 					 chip->ecc.total);
3831 	if (ret)
3832 		return ret;
3833 
3834 	/* write OOB buffer to NAND device */
3835 	ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3836 	if (ret)
3837 		return ret;
3838 
3839 	return nand_prog_page_end_op(chip);
3840 }
3841 
3842 
3843 /**
3844  * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
3845  * @chip: nand chip info structure
3846  * @buf: data buffer
3847  * @oob_required: must write chip->oob_poi to OOB
3848  * @page: page number to write
3849  *
3850  * The hw generator calculates the error syndrome automatically. Therefore we
3851  * need a special oob layout and handling.
3852  */
3853 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
3854 				    int oob_required, int page)
3855 {
3856 	struct mtd_info *mtd = nand_to_mtd(chip);
3857 	int i, eccsize = chip->ecc.size;
3858 	int eccbytes = chip->ecc.bytes;
3859 	int eccsteps = chip->ecc.steps;
3860 	const uint8_t *p = buf;
3861 	uint8_t *oob = chip->oob_poi;
3862 	int ret;
3863 
3864 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3865 	if (ret)
3866 		return ret;
3867 
3868 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3869 		chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3870 
3871 		ret = nand_write_data_op(chip, p, eccsize, false);
3872 		if (ret)
3873 			return ret;
3874 
3875 		if (chip->ecc.prepad) {
3876 			ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3877 						 false);
3878 			if (ret)
3879 				return ret;
3880 
3881 			oob += chip->ecc.prepad;
3882 		}
3883 
3884 		chip->ecc.calculate(chip, p, oob);
3885 
3886 		ret = nand_write_data_op(chip, oob, eccbytes, false);
3887 		if (ret)
3888 			return ret;
3889 
3890 		oob += eccbytes;
3891 
3892 		if (chip->ecc.postpad) {
3893 			ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3894 						 false);
3895 			if (ret)
3896 				return ret;
3897 
3898 			oob += chip->ecc.postpad;
3899 		}
3900 	}
3901 
3902 	/* Calculate remaining oob bytes */
3903 	i = mtd->oobsize - (oob - chip->oob_poi);
3904 	if (i) {
3905 		ret = nand_write_data_op(chip, oob, i, false);
3906 		if (ret)
3907 			return ret;
3908 	}
3909 
3910 	return nand_prog_page_end_op(chip);
3911 }
3912 
3913 /**
3914  * nand_write_page - write one page
3915  * @chip: NAND chip descriptor
3916  * @offset: address offset within the page
3917  * @data_len: length of actual data to be written
3918  * @buf: the data to write
3919  * @oob_required: must write chip->oob_poi to OOB
3920  * @page: page number to write
3921  * @raw: use _raw version of write_page
3922  */
3923 static int nand_write_page(struct nand_chip *chip, uint32_t offset,
3924 			   int data_len, const uint8_t *buf, int oob_required,
3925 			   int page, int raw)
3926 {
3927 	struct mtd_info *mtd = nand_to_mtd(chip);
3928 	int status, subpage;
3929 
3930 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3931 		chip->ecc.write_subpage)
3932 		subpage = offset || (data_len < mtd->writesize);
3933 	else
3934 		subpage = 0;
3935 
3936 	if (unlikely(raw))
3937 		status = chip->ecc.write_page_raw(chip, buf, oob_required,
3938 						  page);
3939 	else if (subpage)
3940 		status = chip->ecc.write_subpage(chip, offset, data_len, buf,
3941 						 oob_required, page);
3942 	else
3943 		status = chip->ecc.write_page(chip, buf, oob_required, page);
3944 
3945 	if (status < 0)
3946 		return status;
3947 
3948 	return 0;
3949 }
3950 
3951 #define NOTALIGNED(x)	((x & (chip->subpagesize - 1)) != 0)
3952 
3953 /**
3954  * nand_do_write_ops - [INTERN] NAND write with ECC
3955  * @chip: NAND chip object
3956  * @to: offset to write to
3957  * @ops: oob operations description structure
3958  *
3959  * NAND write with ECC.
3960  */
3961 static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
3962 			     struct mtd_oob_ops *ops)
3963 {
3964 	struct mtd_info *mtd = nand_to_mtd(chip);
3965 	int chipnr, realpage, page, column;
3966 	uint32_t writelen = ops->len;
3967 
3968 	uint32_t oobwritelen = ops->ooblen;
3969 	uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
3970 
3971 	uint8_t *oob = ops->oobbuf;
3972 	uint8_t *buf = ops->datbuf;
3973 	int ret;
3974 	int oob_required = oob ? 1 : 0;
3975 
3976 	ops->retlen = 0;
3977 	if (!writelen)
3978 		return 0;
3979 
3980 	/* Reject writes, which are not page aligned */
3981 	if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
3982 		pr_notice("%s: attempt to write non page aligned data\n",
3983 			   __func__);
3984 		return -EINVAL;
3985 	}
3986 
3987 	column = to & (mtd->writesize - 1);
3988 
3989 	chipnr = (int)(to >> chip->chip_shift);
3990 	nand_select_target(chip, chipnr);
3991 
3992 	/* Check, if it is write protected */
3993 	if (nand_check_wp(chip)) {
3994 		ret = -EIO;
3995 		goto err_out;
3996 	}
3997 
3998 	realpage = (int)(to >> chip->page_shift);
3999 	page = realpage & chip->pagemask;
4000 
4001 	/* Invalidate the page cache, when we write to the cached page */
4002 	if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
4003 	    ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
4004 		chip->pagecache.page = -1;
4005 
4006 	/* Don't allow multipage oob writes with offset */
4007 	if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
4008 		ret = -EINVAL;
4009 		goto err_out;
4010 	}
4011 
4012 	while (1) {
4013 		int bytes = mtd->writesize;
4014 		uint8_t *wbuf = buf;
4015 		int use_bufpoi;
4016 		int part_pagewr = (column || writelen < mtd->writesize);
4017 
4018 		if (part_pagewr)
4019 			use_bufpoi = 1;
4020 		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
4021 			use_bufpoi = !virt_addr_valid(buf) ||
4022 				     !IS_ALIGNED((unsigned long)buf,
4023 						 chip->buf_align);
4024 		else
4025 			use_bufpoi = 0;
4026 
4027 		/* Partial page write?, or need to use bounce buffer */
4028 		if (use_bufpoi) {
4029 			pr_debug("%s: using write bounce buffer for buf@%p\n",
4030 					 __func__, buf);
4031 			if (part_pagewr)
4032 				bytes = min_t(int, bytes - column, writelen);
4033 			wbuf = nand_get_data_buf(chip);
4034 			memset(wbuf, 0xff, mtd->writesize);
4035 			memcpy(&wbuf[column], buf, bytes);
4036 		}
4037 
4038 		if (unlikely(oob)) {
4039 			size_t len = min(oobwritelen, oobmaxlen);
4040 			oob = nand_fill_oob(chip, oob, len, ops);
4041 			oobwritelen -= len;
4042 		} else {
4043 			/* We still need to erase leftover OOB data */
4044 			memset(chip->oob_poi, 0xff, mtd->oobsize);
4045 		}
4046 
4047 		ret = nand_write_page(chip, column, bytes, wbuf,
4048 				      oob_required, page,
4049 				      (ops->mode == MTD_OPS_RAW));
4050 		if (ret)
4051 			break;
4052 
4053 		writelen -= bytes;
4054 		if (!writelen)
4055 			break;
4056 
4057 		column = 0;
4058 		buf += bytes;
4059 		realpage++;
4060 
4061 		page = realpage & chip->pagemask;
4062 		/* Check, if we cross a chip boundary */
4063 		if (!page) {
4064 			chipnr++;
4065 			nand_deselect_target(chip);
4066 			nand_select_target(chip, chipnr);
4067 		}
4068 	}
4069 
4070 	ops->retlen = ops->len - writelen;
4071 	if (unlikely(oob))
4072 		ops->oobretlen = ops->ooblen;
4073 
4074 err_out:
4075 	nand_deselect_target(chip);
4076 	return ret;
4077 }
4078 
4079 /**
4080  * panic_nand_write - [MTD Interface] NAND write with ECC
4081  * @mtd: MTD device structure
4082  * @to: offset to write to
4083  * @len: number of bytes to write
4084  * @retlen: pointer to variable to store the number of written bytes
4085  * @buf: the data to write
4086  *
4087  * NAND write with ECC. Used when performing writes in interrupt context, this
4088  * may for example be called by mtdoops when writing an oops while in panic.
4089  */
4090 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4091 			    size_t *retlen, const uint8_t *buf)
4092 {
4093 	struct nand_chip *chip = mtd_to_nand(mtd);
4094 	int chipnr = (int)(to >> chip->chip_shift);
4095 	struct mtd_oob_ops ops;
4096 	int ret;
4097 
4098 	nand_select_target(chip, chipnr);
4099 
4100 	/* Wait for the device to get ready */
4101 	panic_nand_wait(chip, 400);
4102 
4103 	memset(&ops, 0, sizeof(ops));
4104 	ops.len = len;
4105 	ops.datbuf = (uint8_t *)buf;
4106 	ops.mode = MTD_OPS_PLACE_OOB;
4107 
4108 	ret = nand_do_write_ops(chip, to, &ops);
4109 
4110 	*retlen = ops.retlen;
4111 	return ret;
4112 }
4113 
4114 /**
4115  * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4116  * @mtd: MTD device structure
4117  * @to: offset to write to
4118  * @ops: oob operation description structure
4119  */
4120 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4121 			  struct mtd_oob_ops *ops)
4122 {
4123 	struct nand_chip *chip = mtd_to_nand(mtd);
4124 	int ret;
4125 
4126 	ops->retlen = 0;
4127 
4128 	ret = nand_get_device(chip);
4129 	if (ret)
4130 		return ret;
4131 
4132 	switch (ops->mode) {
4133 	case MTD_OPS_PLACE_OOB:
4134 	case MTD_OPS_AUTO_OOB:
4135 	case MTD_OPS_RAW:
4136 		break;
4137 
4138 	default:
4139 		goto out;
4140 	}
4141 
4142 	if (!ops->datbuf)
4143 		ret = nand_do_write_oob(chip, to, ops);
4144 	else
4145 		ret = nand_do_write_ops(chip, to, ops);
4146 
4147 out:
4148 	nand_release_device(chip);
4149 	return ret;
4150 }
4151 
4152 /**
4153  * nand_erase - [MTD Interface] erase block(s)
4154  * @mtd: MTD device structure
4155  * @instr: erase instruction
4156  *
4157  * Erase one ore more blocks.
4158  */
4159 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4160 {
4161 	return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4162 }
4163 
4164 /**
4165  * nand_erase_nand - [INTERN] erase block(s)
4166  * @chip: NAND chip object
4167  * @instr: erase instruction
4168  * @allowbbt: allow erasing the bbt area
4169  *
4170  * Erase one ore more blocks.
4171  */
4172 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4173 		    int allowbbt)
4174 {
4175 	int page, pages_per_block, ret, chipnr;
4176 	loff_t len;
4177 
4178 	pr_debug("%s: start = 0x%012llx, len = %llu\n",
4179 			__func__, (unsigned long long)instr->addr,
4180 			(unsigned long long)instr->len);
4181 
4182 	if (check_offs_len(chip, instr->addr, instr->len))
4183 		return -EINVAL;
4184 
4185 	/* Grab the lock and see if the device is available */
4186 	ret = nand_get_device(chip);
4187 	if (ret)
4188 		return ret;
4189 
4190 	/* Shift to get first page */
4191 	page = (int)(instr->addr >> chip->page_shift);
4192 	chipnr = (int)(instr->addr >> chip->chip_shift);
4193 
4194 	/* Calculate pages in each block */
4195 	pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4196 
4197 	/* Select the NAND device */
4198 	nand_select_target(chip, chipnr);
4199 
4200 	/* Check, if it is write protected */
4201 	if (nand_check_wp(chip)) {
4202 		pr_debug("%s: device is write protected!\n",
4203 				__func__);
4204 		ret = -EIO;
4205 		goto erase_exit;
4206 	}
4207 
4208 	/* Loop through the pages */
4209 	len = instr->len;
4210 
4211 	while (len) {
4212 		/* Check if we have a bad block, we do not erase bad blocks! */
4213 		if (nand_block_checkbad(chip, ((loff_t) page) <<
4214 					chip->page_shift, allowbbt)) {
4215 			pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4216 				    __func__, page);
4217 			ret = -EIO;
4218 			goto erase_exit;
4219 		}
4220 
4221 		/*
4222 		 * Invalidate the page cache, if we erase the block which
4223 		 * contains the current cached page.
4224 		 */
4225 		if (page <= chip->pagecache.page && chip->pagecache.page <
4226 		    (page + pages_per_block))
4227 			chip->pagecache.page = -1;
4228 
4229 		ret = nand_erase_op(chip, (page & chip->pagemask) >>
4230 				    (chip->phys_erase_shift - chip->page_shift));
4231 		if (ret) {
4232 			pr_debug("%s: failed erase, page 0x%08x\n",
4233 					__func__, page);
4234 			instr->fail_addr =
4235 				((loff_t)page << chip->page_shift);
4236 			goto erase_exit;
4237 		}
4238 
4239 		/* Increment page address and decrement length */
4240 		len -= (1ULL << chip->phys_erase_shift);
4241 		page += pages_per_block;
4242 
4243 		/* Check, if we cross a chip boundary */
4244 		if (len && !(page & chip->pagemask)) {
4245 			chipnr++;
4246 			nand_deselect_target(chip);
4247 			nand_select_target(chip, chipnr);
4248 		}
4249 	}
4250 
4251 	ret = 0;
4252 erase_exit:
4253 
4254 	/* Deselect and wake up anyone waiting on the device */
4255 	nand_deselect_target(chip);
4256 	nand_release_device(chip);
4257 
4258 	/* Return more or less happy */
4259 	return ret;
4260 }
4261 
4262 /**
4263  * nand_sync - [MTD Interface] sync
4264  * @mtd: MTD device structure
4265  *
4266  * Sync is actually a wait for chip ready function.
4267  */
4268 static void nand_sync(struct mtd_info *mtd)
4269 {
4270 	struct nand_chip *chip = mtd_to_nand(mtd);
4271 
4272 	pr_debug("%s: called\n", __func__);
4273 
4274 	/* Grab the lock and see if the device is available */
4275 	WARN_ON(nand_get_device(chip));
4276 	/* Release it and go back */
4277 	nand_release_device(chip);
4278 }
4279 
4280 /**
4281  * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4282  * @mtd: MTD device structure
4283  * @offs: offset relative to mtd start
4284  */
4285 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4286 {
4287 	struct nand_chip *chip = mtd_to_nand(mtd);
4288 	int chipnr = (int)(offs >> chip->chip_shift);
4289 	int ret;
4290 
4291 	/* Select the NAND device */
4292 	ret = nand_get_device(chip);
4293 	if (ret)
4294 		return ret;
4295 
4296 	nand_select_target(chip, chipnr);
4297 
4298 	ret = nand_block_checkbad(chip, offs, 0);
4299 
4300 	nand_deselect_target(chip);
4301 	nand_release_device(chip);
4302 
4303 	return ret;
4304 }
4305 
4306 /**
4307  * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4308  * @mtd: MTD device structure
4309  * @ofs: offset relative to mtd start
4310  */
4311 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4312 {
4313 	int ret;
4314 
4315 	ret = nand_block_isbad(mtd, ofs);
4316 	if (ret) {
4317 		/* If it was bad already, return success and do nothing */
4318 		if (ret > 0)
4319 			return 0;
4320 		return ret;
4321 	}
4322 
4323 	return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4324 }
4325 
4326 /**
4327  * nand_suspend - [MTD Interface] Suspend the NAND flash
4328  * @mtd: MTD device structure
4329  *
4330  * Returns 0 for success or negative error code otherwise.
4331  */
4332 static int nand_suspend(struct mtd_info *mtd)
4333 {
4334 	struct nand_chip *chip = mtd_to_nand(mtd);
4335 	int ret = 0;
4336 
4337 	mutex_lock(&chip->lock);
4338 	if (chip->suspend)
4339 		ret = chip->suspend(chip);
4340 	if (!ret)
4341 		chip->suspended = 1;
4342 	mutex_unlock(&chip->lock);
4343 
4344 	return ret;
4345 }
4346 
4347 /**
4348  * nand_resume - [MTD Interface] Resume the NAND flash
4349  * @mtd: MTD device structure
4350  */
4351 static void nand_resume(struct mtd_info *mtd)
4352 {
4353 	struct nand_chip *chip = mtd_to_nand(mtd);
4354 
4355 	mutex_lock(&chip->lock);
4356 	if (chip->suspended) {
4357 		if (chip->resume)
4358 			chip->resume(chip);
4359 		chip->suspended = 0;
4360 	} else {
4361 		pr_err("%s called for a chip which is not in suspended state\n",
4362 			__func__);
4363 	}
4364 	mutex_unlock(&chip->lock);
4365 }
4366 
4367 /**
4368  * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4369  *                 prevent further operations
4370  * @mtd: MTD device structure
4371  */
4372 static void nand_shutdown(struct mtd_info *mtd)
4373 {
4374 	nand_suspend(mtd);
4375 }
4376 
4377 /**
4378  * nand_lock - [MTD Interface] Lock the NAND flash
4379  * @mtd: MTD device structure
4380  * @ofs: offset byte address
4381  * @len: number of bytes to lock (must be a multiple of block/page size)
4382  */
4383 static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4384 {
4385 	struct nand_chip *chip = mtd_to_nand(mtd);
4386 
4387 	if (!chip->lock_area)
4388 		return -ENOTSUPP;
4389 
4390 	return chip->lock_area(chip, ofs, len);
4391 }
4392 
4393 /**
4394  * nand_unlock - [MTD Interface] Unlock the NAND flash
4395  * @mtd: MTD device structure
4396  * @ofs: offset byte address
4397  * @len: number of bytes to unlock (must be a multiple of block/page size)
4398  */
4399 static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4400 {
4401 	struct nand_chip *chip = mtd_to_nand(mtd);
4402 
4403 	if (!chip->unlock_area)
4404 		return -ENOTSUPP;
4405 
4406 	return chip->unlock_area(chip, ofs, len);
4407 }
4408 
4409 /* Set default functions */
4410 static void nand_set_defaults(struct nand_chip *chip)
4411 {
4412 	/* If no controller is provided, use the dummy, legacy one. */
4413 	if (!chip->controller) {
4414 		chip->controller = &chip->legacy.dummy_controller;
4415 		nand_controller_init(chip->controller);
4416 	}
4417 
4418 	nand_legacy_set_defaults(chip);
4419 
4420 	if (!chip->buf_align)
4421 		chip->buf_align = 1;
4422 }
4423 
4424 /* Sanitize ONFI strings so we can safely print them */
4425 void sanitize_string(uint8_t *s, size_t len)
4426 {
4427 	ssize_t i;
4428 
4429 	/* Null terminate */
4430 	s[len - 1] = 0;
4431 
4432 	/* Remove non printable chars */
4433 	for (i = 0; i < len - 1; i++) {
4434 		if (s[i] < ' ' || s[i] > 127)
4435 			s[i] = '?';
4436 	}
4437 
4438 	/* Remove trailing spaces */
4439 	strim(s);
4440 }
4441 
4442 /*
4443  * nand_id_has_period - Check if an ID string has a given wraparound period
4444  * @id_data: the ID string
4445  * @arrlen: the length of the @id_data array
4446  * @period: the period of repitition
4447  *
4448  * Check if an ID string is repeated within a given sequence of bytes at
4449  * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
4450  * period of 3). This is a helper function for nand_id_len(). Returns non-zero
4451  * if the repetition has a period of @period; otherwise, returns zero.
4452  */
4453 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4454 {
4455 	int i, j;
4456 	for (i = 0; i < period; i++)
4457 		for (j = i + period; j < arrlen; j += period)
4458 			if (id_data[i] != id_data[j])
4459 				return 0;
4460 	return 1;
4461 }
4462 
4463 /*
4464  * nand_id_len - Get the length of an ID string returned by CMD_READID
4465  * @id_data: the ID string
4466  * @arrlen: the length of the @id_data array
4467 
4468  * Returns the length of the ID string, according to known wraparound/trailing
4469  * zero patterns. If no pattern exists, returns the length of the array.
4470  */
4471 static int nand_id_len(u8 *id_data, int arrlen)
4472 {
4473 	int last_nonzero, period;
4474 
4475 	/* Find last non-zero byte */
4476 	for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4477 		if (id_data[last_nonzero])
4478 			break;
4479 
4480 	/* All zeros */
4481 	if (last_nonzero < 0)
4482 		return 0;
4483 
4484 	/* Calculate wraparound period */
4485 	for (period = 1; period < arrlen; period++)
4486 		if (nand_id_has_period(id_data, arrlen, period))
4487 			break;
4488 
4489 	/* There's a repeated pattern */
4490 	if (period < arrlen)
4491 		return period;
4492 
4493 	/* There are trailing zeros */
4494 	if (last_nonzero < arrlen - 1)
4495 		return last_nonzero + 1;
4496 
4497 	/* No pattern detected */
4498 	return arrlen;
4499 }
4500 
4501 /* Extract the bits of per cell from the 3rd byte of the extended ID */
4502 static int nand_get_bits_per_cell(u8 cellinfo)
4503 {
4504 	int bits;
4505 
4506 	bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4507 	bits >>= NAND_CI_CELLTYPE_SHIFT;
4508 	return bits + 1;
4509 }
4510 
4511 /*
4512  * Many new NAND share similar device ID codes, which represent the size of the
4513  * chip. The rest of the parameters must be decoded according to generic or
4514  * manufacturer-specific "extended ID" decoding patterns.
4515  */
4516 void nand_decode_ext_id(struct nand_chip *chip)
4517 {
4518 	struct nand_memory_organization *memorg;
4519 	struct mtd_info *mtd = nand_to_mtd(chip);
4520 	int extid;
4521 	u8 *id_data = chip->id.data;
4522 
4523 	memorg = nanddev_get_memorg(&chip->base);
4524 
4525 	/* The 3rd id byte holds MLC / multichip data */
4526 	memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4527 	/* The 4th id byte is the important one */
4528 	extid = id_data[3];
4529 
4530 	/* Calc pagesize */
4531 	memorg->pagesize = 1024 << (extid & 0x03);
4532 	mtd->writesize = memorg->pagesize;
4533 	extid >>= 2;
4534 	/* Calc oobsize */
4535 	memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4536 	mtd->oobsize = memorg->oobsize;
4537 	extid >>= 2;
4538 	/* Calc blocksize. Blocksize is multiples of 64KiB */
4539 	memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
4540 				       memorg->pagesize;
4541 	mtd->erasesize = (64 * 1024) << (extid & 0x03);
4542 	extid >>= 2;
4543 	/* Get buswidth information */
4544 	if (extid & 0x1)
4545 		chip->options |= NAND_BUSWIDTH_16;
4546 }
4547 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4548 
4549 /*
4550  * Old devices have chip data hardcoded in the device ID table. nand_decode_id
4551  * decodes a matching ID table entry and assigns the MTD size parameters for
4552  * the chip.
4553  */
4554 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4555 {
4556 	struct mtd_info *mtd = nand_to_mtd(chip);
4557 	struct nand_memory_organization *memorg;
4558 
4559 	memorg = nanddev_get_memorg(&chip->base);
4560 
4561 	memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
4562 	mtd->erasesize = type->erasesize;
4563 	memorg->pagesize = type->pagesize;
4564 	mtd->writesize = memorg->pagesize;
4565 	memorg->oobsize = memorg->pagesize / 32;
4566 	mtd->oobsize = memorg->oobsize;
4567 
4568 	/* All legacy ID NAND are small-page, SLC */
4569 	memorg->bits_per_cell = 1;
4570 }
4571 
4572 /*
4573  * Set the bad block marker/indicator (BBM/BBI) patterns according to some
4574  * heuristic patterns using various detected parameters (e.g., manufacturer,
4575  * page size, cell-type information).
4576  */
4577 static void nand_decode_bbm_options(struct nand_chip *chip)
4578 {
4579 	struct mtd_info *mtd = nand_to_mtd(chip);
4580 
4581 	/* Set the bad block position */
4582 	if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4583 		chip->badblockpos = NAND_BBM_POS_LARGE;
4584 	else
4585 		chip->badblockpos = NAND_BBM_POS_SMALL;
4586 }
4587 
4588 static inline bool is_full_id_nand(struct nand_flash_dev *type)
4589 {
4590 	return type->id_len;
4591 }
4592 
4593 static bool find_full_id_nand(struct nand_chip *chip,
4594 			      struct nand_flash_dev *type)
4595 {
4596 	struct mtd_info *mtd = nand_to_mtd(chip);
4597 	struct nand_memory_organization *memorg;
4598 	u8 *id_data = chip->id.data;
4599 
4600 	memorg = nanddev_get_memorg(&chip->base);
4601 
4602 	if (!strncmp(type->id, id_data, type->id_len)) {
4603 		memorg->pagesize = type->pagesize;
4604 		mtd->writesize = memorg->pagesize;
4605 		memorg->pages_per_eraseblock = type->erasesize /
4606 					       type->pagesize;
4607 		mtd->erasesize = type->erasesize;
4608 		memorg->oobsize = type->oobsize;
4609 		mtd->oobsize = memorg->oobsize;
4610 
4611 		memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4612 		memorg->eraseblocks_per_lun =
4613 			DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4614 					   memorg->pagesize *
4615 					   memorg->pages_per_eraseblock);
4616 		chip->options |= type->options;
4617 		chip->base.eccreq.strength = NAND_ECC_STRENGTH(type);
4618 		chip->base.eccreq.step_size = NAND_ECC_STEP(type);
4619 		chip->onfi_timing_mode_default =
4620 					type->onfi_timing_mode_default;
4621 
4622 		chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4623 		if (!chip->parameters.model)
4624 			return false;
4625 
4626 		return true;
4627 	}
4628 	return false;
4629 }
4630 
4631 /*
4632  * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
4633  * compliant and does not have a full-id or legacy-id entry in the nand_ids
4634  * table.
4635  */
4636 static void nand_manufacturer_detect(struct nand_chip *chip)
4637 {
4638 	/*
4639 	 * Try manufacturer detection if available and use
4640 	 * nand_decode_ext_id() otherwise.
4641 	 */
4642 	if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4643 	    chip->manufacturer.desc->ops->detect) {
4644 		struct nand_memory_organization *memorg;
4645 
4646 		memorg = nanddev_get_memorg(&chip->base);
4647 
4648 		/* The 3rd id byte holds MLC / multichip data */
4649 		memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4650 		chip->manufacturer.desc->ops->detect(chip);
4651 	} else {
4652 		nand_decode_ext_id(chip);
4653 	}
4654 }
4655 
4656 /*
4657  * Manufacturer initialization. This function is called for all NANDs including
4658  * ONFI and JEDEC compliant ones.
4659  * Manufacturer drivers should put all their specific initialization code in
4660  * their ->init() hook.
4661  */
4662 static int nand_manufacturer_init(struct nand_chip *chip)
4663 {
4664 	if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4665 	    !chip->manufacturer.desc->ops->init)
4666 		return 0;
4667 
4668 	return chip->manufacturer.desc->ops->init(chip);
4669 }
4670 
4671 /*
4672  * Manufacturer cleanup. This function is called for all NANDs including
4673  * ONFI and JEDEC compliant ones.
4674  * Manufacturer drivers should put all their specific cleanup code in their
4675  * ->cleanup() hook.
4676  */
4677 static void nand_manufacturer_cleanup(struct nand_chip *chip)
4678 {
4679 	/* Release manufacturer private data */
4680 	if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4681 	    chip->manufacturer.desc->ops->cleanup)
4682 		chip->manufacturer.desc->ops->cleanup(chip);
4683 }
4684 
4685 static const char *
4686 nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
4687 {
4688 	return manufacturer ? manufacturer->name : "Unknown";
4689 }
4690 
4691 /*
4692  * Get the flash and manufacturer id and lookup if the type is supported.
4693  */
4694 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4695 {
4696 	const struct nand_manufacturer *manufacturer;
4697 	struct mtd_info *mtd = nand_to_mtd(chip);
4698 	struct nand_memory_organization *memorg;
4699 	int busw, ret;
4700 	u8 *id_data = chip->id.data;
4701 	u8 maf_id, dev_id;
4702 	u64 targetsize;
4703 
4704 	/*
4705 	 * Let's start by initializing memorg fields that might be left
4706 	 * unassigned by the ID-based detection logic.
4707 	 */
4708 	memorg = nanddev_get_memorg(&chip->base);
4709 	memorg->planes_per_lun = 1;
4710 	memorg->luns_per_target = 1;
4711 
4712 	/*
4713 	 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4714 	 * after power-up.
4715 	 */
4716 	ret = nand_reset(chip, 0);
4717 	if (ret)
4718 		return ret;
4719 
4720 	/* Select the device */
4721 	nand_select_target(chip, 0);
4722 
4723 	/* Send the command for reading device ID */
4724 	ret = nand_readid_op(chip, 0, id_data, 2);
4725 	if (ret)
4726 		return ret;
4727 
4728 	/* Read manufacturer and device IDs */
4729 	maf_id = id_data[0];
4730 	dev_id = id_data[1];
4731 
4732 	/*
4733 	 * Try again to make sure, as some systems the bus-hold or other
4734 	 * interface concerns can cause random data which looks like a
4735 	 * possibly credible NAND flash to appear. If the two results do
4736 	 * not match, ignore the device completely.
4737 	 */
4738 
4739 	/* Read entire ID string */
4740 	ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
4741 	if (ret)
4742 		return ret;
4743 
4744 	if (id_data[0] != maf_id || id_data[1] != dev_id) {
4745 		pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4746 			maf_id, dev_id, id_data[0], id_data[1]);
4747 		return -ENODEV;
4748 	}
4749 
4750 	chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
4751 
4752 	/* Try to identify manufacturer */
4753 	manufacturer = nand_get_manufacturer(maf_id);
4754 	chip->manufacturer.desc = manufacturer;
4755 
4756 	if (!type)
4757 		type = nand_flash_ids;
4758 
4759 	/*
4760 	 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
4761 	 * override it.
4762 	 * This is required to make sure initial NAND bus width set by the
4763 	 * NAND controller driver is coherent with the real NAND bus width
4764 	 * (extracted by auto-detection code).
4765 	 */
4766 	busw = chip->options & NAND_BUSWIDTH_16;
4767 
4768 	/*
4769 	 * The flag is only set (never cleared), reset it to its default value
4770 	 * before starting auto-detection.
4771 	 */
4772 	chip->options &= ~NAND_BUSWIDTH_16;
4773 
4774 	for (; type->name != NULL; type++) {
4775 		if (is_full_id_nand(type)) {
4776 			if (find_full_id_nand(chip, type))
4777 				goto ident_done;
4778 		} else if (dev_id == type->dev_id) {
4779 			break;
4780 		}
4781 	}
4782 
4783 	if (!type->name || !type->pagesize) {
4784 		/* Check if the chip is ONFI compliant */
4785 		ret = nand_onfi_detect(chip);
4786 		if (ret < 0)
4787 			return ret;
4788 		else if (ret)
4789 			goto ident_done;
4790 
4791 		/* Check if the chip is JEDEC compliant */
4792 		ret = nand_jedec_detect(chip);
4793 		if (ret < 0)
4794 			return ret;
4795 		else if (ret)
4796 			goto ident_done;
4797 	}
4798 
4799 	if (!type->name)
4800 		return -ENODEV;
4801 
4802 	chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4803 	if (!chip->parameters.model)
4804 		return -ENOMEM;
4805 
4806 	if (!type->pagesize)
4807 		nand_manufacturer_detect(chip);
4808 	else
4809 		nand_decode_id(chip, type);
4810 
4811 	/* Get chip options */
4812 	chip->options |= type->options;
4813 
4814 	memorg->eraseblocks_per_lun =
4815 			DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4816 					   memorg->pagesize *
4817 					   memorg->pages_per_eraseblock);
4818 
4819 ident_done:
4820 	if (!mtd->name)
4821 		mtd->name = chip->parameters.model;
4822 
4823 	if (chip->options & NAND_BUSWIDTH_AUTO) {
4824 		WARN_ON(busw & NAND_BUSWIDTH_16);
4825 		nand_set_defaults(chip);
4826 	} else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4827 		/*
4828 		 * Check, if buswidth is correct. Hardware drivers should set
4829 		 * chip correct!
4830 		 */
4831 		pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4832 			maf_id, dev_id);
4833 		pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4834 			mtd->name);
4835 		pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4836 			(chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4837 		ret = -EINVAL;
4838 
4839 		goto free_detect_allocation;
4840 	}
4841 
4842 	nand_decode_bbm_options(chip);
4843 
4844 	/* Calculate the address shift from the page size */
4845 	chip->page_shift = ffs(mtd->writesize) - 1;
4846 	/* Convert chipsize to number of pages per chip -1 */
4847 	targetsize = nanddev_target_size(&chip->base);
4848 	chip->pagemask = (targetsize >> chip->page_shift) - 1;
4849 
4850 	chip->bbt_erase_shift = chip->phys_erase_shift =
4851 		ffs(mtd->erasesize) - 1;
4852 	if (targetsize & 0xffffffff)
4853 		chip->chip_shift = ffs((unsigned)targetsize) - 1;
4854 	else {
4855 		chip->chip_shift = ffs((unsigned)(targetsize >> 32));
4856 		chip->chip_shift += 32 - 1;
4857 	}
4858 
4859 	if (chip->chip_shift - chip->page_shift > 16)
4860 		chip->options |= NAND_ROW_ADDR_3;
4861 
4862 	chip->badblockbits = 8;
4863 
4864 	nand_legacy_adjust_cmdfunc(chip);
4865 
4866 	pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4867 		maf_id, dev_id);
4868 	pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4869 		chip->parameters.model);
4870 	pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4871 		(int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4872 		mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4873 	return 0;
4874 
4875 free_detect_allocation:
4876 	kfree(chip->parameters.model);
4877 
4878 	return ret;
4879 }
4880 
4881 static const char * const nand_ecc_modes[] = {
4882 	[NAND_ECC_NONE]		= "none",
4883 	[NAND_ECC_SOFT]		= "soft",
4884 	[NAND_ECC_HW]		= "hw",
4885 	[NAND_ECC_HW_SYNDROME]	= "hw_syndrome",
4886 	[NAND_ECC_HW_OOB_FIRST]	= "hw_oob_first",
4887 	[NAND_ECC_ON_DIE]	= "on-die",
4888 };
4889 
4890 static int of_get_nand_ecc_mode(struct device_node *np)
4891 {
4892 	const char *pm;
4893 	int err, i;
4894 
4895 	err = of_property_read_string(np, "nand-ecc-mode", &pm);
4896 	if (err < 0)
4897 		return err;
4898 
4899 	for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4900 		if (!strcasecmp(pm, nand_ecc_modes[i]))
4901 			return i;
4902 
4903 	/*
4904 	 * For backward compatibility we support few obsoleted values that don't
4905 	 * have their mappings into nand_ecc_modes_t anymore (they were merged
4906 	 * with other enums).
4907 	 */
4908 	if (!strcasecmp(pm, "soft_bch"))
4909 		return NAND_ECC_SOFT;
4910 
4911 	return -ENODEV;
4912 }
4913 
4914 static const char * const nand_ecc_algos[] = {
4915 	[NAND_ECC_HAMMING]	= "hamming",
4916 	[NAND_ECC_BCH]		= "bch",
4917 	[NAND_ECC_RS]		= "rs",
4918 };
4919 
4920 static int of_get_nand_ecc_algo(struct device_node *np)
4921 {
4922 	const char *pm;
4923 	int err, i;
4924 
4925 	err = of_property_read_string(np, "nand-ecc-algo", &pm);
4926 	if (!err) {
4927 		for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4928 			if (!strcasecmp(pm, nand_ecc_algos[i]))
4929 				return i;
4930 		return -ENODEV;
4931 	}
4932 
4933 	/*
4934 	 * For backward compatibility we also read "nand-ecc-mode" checking
4935 	 * for some obsoleted values that were specifying ECC algorithm.
4936 	 */
4937 	err = of_property_read_string(np, "nand-ecc-mode", &pm);
4938 	if (err < 0)
4939 		return err;
4940 
4941 	if (!strcasecmp(pm, "soft"))
4942 		return NAND_ECC_HAMMING;
4943 	else if (!strcasecmp(pm, "soft_bch"))
4944 		return NAND_ECC_BCH;
4945 
4946 	return -ENODEV;
4947 }
4948 
4949 static int of_get_nand_ecc_step_size(struct device_node *np)
4950 {
4951 	int ret;
4952 	u32 val;
4953 
4954 	ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4955 	return ret ? ret : val;
4956 }
4957 
4958 static int of_get_nand_ecc_strength(struct device_node *np)
4959 {
4960 	int ret;
4961 	u32 val;
4962 
4963 	ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4964 	return ret ? ret : val;
4965 }
4966 
4967 static int of_get_nand_bus_width(struct device_node *np)
4968 {
4969 	u32 val;
4970 
4971 	if (of_property_read_u32(np, "nand-bus-width", &val))
4972 		return 8;
4973 
4974 	switch (val) {
4975 	case 8:
4976 	case 16:
4977 		return val;
4978 	default:
4979 		return -EIO;
4980 	}
4981 }
4982 
4983 static bool of_get_nand_on_flash_bbt(struct device_node *np)
4984 {
4985 	return of_property_read_bool(np, "nand-on-flash-bbt");
4986 }
4987 
4988 static int nand_dt_init(struct nand_chip *chip)
4989 {
4990 	struct device_node *dn = nand_get_flash_node(chip);
4991 	int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4992 
4993 	if (!dn)
4994 		return 0;
4995 
4996 	if (of_get_nand_bus_width(dn) == 16)
4997 		chip->options |= NAND_BUSWIDTH_16;
4998 
4999 	if (of_property_read_bool(dn, "nand-is-boot-medium"))
5000 		chip->options |= NAND_IS_BOOT_MEDIUM;
5001 
5002 	if (of_get_nand_on_flash_bbt(dn))
5003 		chip->bbt_options |= NAND_BBT_USE_FLASH;
5004 
5005 	ecc_mode = of_get_nand_ecc_mode(dn);
5006 	ecc_algo = of_get_nand_ecc_algo(dn);
5007 	ecc_strength = of_get_nand_ecc_strength(dn);
5008 	ecc_step = of_get_nand_ecc_step_size(dn);
5009 
5010 	if (ecc_mode >= 0)
5011 		chip->ecc.mode = ecc_mode;
5012 
5013 	if (ecc_algo >= 0)
5014 		chip->ecc.algo = ecc_algo;
5015 
5016 	if (ecc_strength >= 0)
5017 		chip->ecc.strength = ecc_strength;
5018 
5019 	if (ecc_step > 0)
5020 		chip->ecc.size = ecc_step;
5021 
5022 	if (of_property_read_bool(dn, "nand-ecc-maximize"))
5023 		chip->ecc.options |= NAND_ECC_MAXIMIZE;
5024 
5025 	return 0;
5026 }
5027 
5028 /**
5029  * nand_scan_ident - Scan for the NAND device
5030  * @chip: NAND chip object
5031  * @maxchips: number of chips to scan for
5032  * @table: alternative NAND ID table
5033  *
5034  * This is the first phase of the normal nand_scan() function. It reads the
5035  * flash ID and sets up MTD fields accordingly.
5036  *
5037  * This helper used to be called directly from controller drivers that needed
5038  * to tweak some ECC-related parameters before nand_scan_tail(). This separation
5039  * prevented dynamic allocations during this phase which was unconvenient and
5040  * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
5041  */
5042 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
5043 			   struct nand_flash_dev *table)
5044 {
5045 	struct mtd_info *mtd = nand_to_mtd(chip);
5046 	struct nand_memory_organization *memorg;
5047 	int nand_maf_id, nand_dev_id;
5048 	unsigned int i;
5049 	int ret;
5050 
5051 	memorg = nanddev_get_memorg(&chip->base);
5052 
5053 	/* Assume all dies are deselected when we enter nand_scan_ident(). */
5054 	chip->cur_cs = -1;
5055 
5056 	mutex_init(&chip->lock);
5057 
5058 	/* Enforce the right timings for reset/detection */
5059 	onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
5060 
5061 	ret = nand_dt_init(chip);
5062 	if (ret)
5063 		return ret;
5064 
5065 	if (!mtd->name && mtd->dev.parent)
5066 		mtd->name = dev_name(mtd->dev.parent);
5067 
5068 	/* Set the default functions */
5069 	nand_set_defaults(chip);
5070 
5071 	ret = nand_legacy_check_hooks(chip);
5072 	if (ret)
5073 		return ret;
5074 
5075 	memorg->ntargets = maxchips;
5076 
5077 	/* Read the flash type */
5078 	ret = nand_detect(chip, table);
5079 	if (ret) {
5080 		if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5081 			pr_warn("No NAND device found\n");
5082 		nand_deselect_target(chip);
5083 		return ret;
5084 	}
5085 
5086 	nand_maf_id = chip->id.data[0];
5087 	nand_dev_id = chip->id.data[1];
5088 
5089 	nand_deselect_target(chip);
5090 
5091 	/* Check for a chip array */
5092 	for (i = 1; i < maxchips; i++) {
5093 		u8 id[2];
5094 
5095 		/* See comment in nand_get_flash_type for reset */
5096 		ret = nand_reset(chip, i);
5097 		if (ret)
5098 			break;
5099 
5100 		nand_select_target(chip, i);
5101 		/* Send the command for reading device ID */
5102 		ret = nand_readid_op(chip, 0, id, sizeof(id));
5103 		if (ret)
5104 			break;
5105 		/* Read manufacturer and device IDs */
5106 		if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5107 			nand_deselect_target(chip);
5108 			break;
5109 		}
5110 		nand_deselect_target(chip);
5111 	}
5112 	if (i > 1)
5113 		pr_info("%d chips detected\n", i);
5114 
5115 	/* Store the number of chips and calc total size for mtd */
5116 	memorg->ntargets = i;
5117 	mtd->size = i * nanddev_target_size(&chip->base);
5118 
5119 	return 0;
5120 }
5121 
5122 static void nand_scan_ident_cleanup(struct nand_chip *chip)
5123 {
5124 	kfree(chip->parameters.model);
5125 	kfree(chip->parameters.onfi);
5126 }
5127 
5128 static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5129 {
5130 	struct mtd_info *mtd = nand_to_mtd(chip);
5131 	struct nand_ecc_ctrl *ecc = &chip->ecc;
5132 
5133 	if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
5134 		return -EINVAL;
5135 
5136 	switch (ecc->algo) {
5137 	case NAND_ECC_HAMMING:
5138 		ecc->calculate = nand_calculate_ecc;
5139 		ecc->correct = nand_correct_data;
5140 		ecc->read_page = nand_read_page_swecc;
5141 		ecc->read_subpage = nand_read_subpage;
5142 		ecc->write_page = nand_write_page_swecc;
5143 		ecc->read_page_raw = nand_read_page_raw;
5144 		ecc->write_page_raw = nand_write_page_raw;
5145 		ecc->read_oob = nand_read_oob_std;
5146 		ecc->write_oob = nand_write_oob_std;
5147 		if (!ecc->size)
5148 			ecc->size = 256;
5149 		ecc->bytes = 3;
5150 		ecc->strength = 1;
5151 
5152 		if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
5153 			ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5154 
5155 		return 0;
5156 	case NAND_ECC_BCH:
5157 		if (!mtd_nand_has_bch()) {
5158 			WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
5159 			return -EINVAL;
5160 		}
5161 		ecc->calculate = nand_bch_calculate_ecc;
5162 		ecc->correct = nand_bch_correct_data;
5163 		ecc->read_page = nand_read_page_swecc;
5164 		ecc->read_subpage = nand_read_subpage;
5165 		ecc->write_page = nand_write_page_swecc;
5166 		ecc->read_page_raw = nand_read_page_raw;
5167 		ecc->write_page_raw = nand_write_page_raw;
5168 		ecc->read_oob = nand_read_oob_std;
5169 		ecc->write_oob = nand_write_oob_std;
5170 
5171 		/*
5172 		* Board driver should supply ecc.size and ecc.strength
5173 		* values to select how many bits are correctable.
5174 		* Otherwise, default to 4 bits for large page devices.
5175 		*/
5176 		if (!ecc->size && (mtd->oobsize >= 64)) {
5177 			ecc->size = 512;
5178 			ecc->strength = 4;
5179 		}
5180 
5181 		/*
5182 		 * if no ecc placement scheme was provided pickup the default
5183 		 * large page one.
5184 		 */
5185 		if (!mtd->ooblayout) {
5186 			/* handle large page devices only */
5187 			if (mtd->oobsize < 64) {
5188 				WARN(1, "OOB layout is required when using software BCH on small pages\n");
5189 				return -EINVAL;
5190 			}
5191 
5192 			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
5193 
5194 		}
5195 
5196 		/*
5197 		 * We can only maximize ECC config when the default layout is
5198 		 * used, otherwise we don't know how many bytes can really be
5199 		 * used.
5200 		 */
5201 		if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
5202 		    ecc->options & NAND_ECC_MAXIMIZE) {
5203 			int steps, bytes;
5204 
5205 			/* Always prefer 1k blocks over 512bytes ones */
5206 			ecc->size = 1024;
5207 			steps = mtd->writesize / ecc->size;
5208 
5209 			/* Reserve 2 bytes for the BBM */
5210 			bytes = (mtd->oobsize - 2) / steps;
5211 			ecc->strength = bytes * 8 / fls(8 * ecc->size);
5212 		}
5213 
5214 		/* See nand_bch_init() for details. */
5215 		ecc->bytes = 0;
5216 		ecc->priv = nand_bch_init(mtd);
5217 		if (!ecc->priv) {
5218 			WARN(1, "BCH ECC initialization failed!\n");
5219 			return -EINVAL;
5220 		}
5221 		return 0;
5222 	default:
5223 		WARN(1, "Unsupported ECC algorithm!\n");
5224 		return -EINVAL;
5225 	}
5226 }
5227 
5228 /**
5229  * nand_check_ecc_caps - check the sanity of preset ECC settings
5230  * @chip: nand chip info structure
5231  * @caps: ECC caps info structure
5232  * @oobavail: OOB size that the ECC engine can use
5233  *
5234  * When ECC step size and strength are already set, check if they are supported
5235  * by the controller and the calculated ECC bytes fit within the chip's OOB.
5236  * On success, the calculated ECC bytes is set.
5237  */
5238 static int
5239 nand_check_ecc_caps(struct nand_chip *chip,
5240 		    const struct nand_ecc_caps *caps, int oobavail)
5241 {
5242 	struct mtd_info *mtd = nand_to_mtd(chip);
5243 	const struct nand_ecc_step_info *stepinfo;
5244 	int preset_step = chip->ecc.size;
5245 	int preset_strength = chip->ecc.strength;
5246 	int ecc_bytes, nsteps = mtd->writesize / preset_step;
5247 	int i, j;
5248 
5249 	for (i = 0; i < caps->nstepinfos; i++) {
5250 		stepinfo = &caps->stepinfos[i];
5251 
5252 		if (stepinfo->stepsize != preset_step)
5253 			continue;
5254 
5255 		for (j = 0; j < stepinfo->nstrengths; j++) {
5256 			if (stepinfo->strengths[j] != preset_strength)
5257 				continue;
5258 
5259 			ecc_bytes = caps->calc_ecc_bytes(preset_step,
5260 							 preset_strength);
5261 			if (WARN_ON_ONCE(ecc_bytes < 0))
5262 				return ecc_bytes;
5263 
5264 			if (ecc_bytes * nsteps > oobavail) {
5265 				pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5266 				       preset_step, preset_strength);
5267 				return -ENOSPC;
5268 			}
5269 
5270 			chip->ecc.bytes = ecc_bytes;
5271 
5272 			return 0;
5273 		}
5274 	}
5275 
5276 	pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5277 	       preset_step, preset_strength);
5278 
5279 	return -ENOTSUPP;
5280 }
5281 
5282 /**
5283  * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
5284  * @chip: nand chip info structure
5285  * @caps: ECC engine caps info structure
5286  * @oobavail: OOB size that the ECC engine can use
5287  *
5288  * If a chip's ECC requirement is provided, try to meet it with the least
5289  * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
5290  * On success, the chosen ECC settings are set.
5291  */
5292 static int
5293 nand_match_ecc_req(struct nand_chip *chip,
5294 		   const struct nand_ecc_caps *caps, int oobavail)
5295 {
5296 	struct mtd_info *mtd = nand_to_mtd(chip);
5297 	const struct nand_ecc_step_info *stepinfo;
5298 	int req_step = chip->base.eccreq.step_size;
5299 	int req_strength = chip->base.eccreq.strength;
5300 	int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5301 	int best_step, best_strength, best_ecc_bytes;
5302 	int best_ecc_bytes_total = INT_MAX;
5303 	int i, j;
5304 
5305 	/* No information provided by the NAND chip */
5306 	if (!req_step || !req_strength)
5307 		return -ENOTSUPP;
5308 
5309 	/* number of correctable bits the chip requires in a page */
5310 	req_corr = mtd->writesize / req_step * req_strength;
5311 
5312 	for (i = 0; i < caps->nstepinfos; i++) {
5313 		stepinfo = &caps->stepinfos[i];
5314 		step_size = stepinfo->stepsize;
5315 
5316 		for (j = 0; j < stepinfo->nstrengths; j++) {
5317 			strength = stepinfo->strengths[j];
5318 
5319 			/*
5320 			 * If both step size and strength are smaller than the
5321 			 * chip's requirement, it is not easy to compare the
5322 			 * resulted reliability.
5323 			 */
5324 			if (step_size < req_step && strength < req_strength)
5325 				continue;
5326 
5327 			if (mtd->writesize % step_size)
5328 				continue;
5329 
5330 			nsteps = mtd->writesize / step_size;
5331 
5332 			ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5333 			if (WARN_ON_ONCE(ecc_bytes < 0))
5334 				continue;
5335 			ecc_bytes_total = ecc_bytes * nsteps;
5336 
5337 			if (ecc_bytes_total > oobavail ||
5338 			    strength * nsteps < req_corr)
5339 				continue;
5340 
5341 			/*
5342 			 * We assume the best is to meet the chip's requrement
5343 			 * with the least number of ECC bytes.
5344 			 */
5345 			if (ecc_bytes_total < best_ecc_bytes_total) {
5346 				best_ecc_bytes_total = ecc_bytes_total;
5347 				best_step = step_size;
5348 				best_strength = strength;
5349 				best_ecc_bytes = ecc_bytes;
5350 			}
5351 		}
5352 	}
5353 
5354 	if (best_ecc_bytes_total == INT_MAX)
5355 		return -ENOTSUPP;
5356 
5357 	chip->ecc.size = best_step;
5358 	chip->ecc.strength = best_strength;
5359 	chip->ecc.bytes = best_ecc_bytes;
5360 
5361 	return 0;
5362 }
5363 
5364 /**
5365  * nand_maximize_ecc - choose the max ECC strength available
5366  * @chip: nand chip info structure
5367  * @caps: ECC engine caps info structure
5368  * @oobavail: OOB size that the ECC engine can use
5369  *
5370  * Choose the max ECC strength that is supported on the controller, and can fit
5371  * within the chip's OOB.  On success, the chosen ECC settings are set.
5372  */
5373 static int
5374 nand_maximize_ecc(struct nand_chip *chip,
5375 		  const struct nand_ecc_caps *caps, int oobavail)
5376 {
5377 	struct mtd_info *mtd = nand_to_mtd(chip);
5378 	const struct nand_ecc_step_info *stepinfo;
5379 	int step_size, strength, nsteps, ecc_bytes, corr;
5380 	int best_corr = 0;
5381 	int best_step = 0;
5382 	int best_strength, best_ecc_bytes;
5383 	int i, j;
5384 
5385 	for (i = 0; i < caps->nstepinfos; i++) {
5386 		stepinfo = &caps->stepinfos[i];
5387 		step_size = stepinfo->stepsize;
5388 
5389 		/* If chip->ecc.size is already set, respect it */
5390 		if (chip->ecc.size && step_size != chip->ecc.size)
5391 			continue;
5392 
5393 		for (j = 0; j < stepinfo->nstrengths; j++) {
5394 			strength = stepinfo->strengths[j];
5395 
5396 			if (mtd->writesize % step_size)
5397 				continue;
5398 
5399 			nsteps = mtd->writesize / step_size;
5400 
5401 			ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5402 			if (WARN_ON_ONCE(ecc_bytes < 0))
5403 				continue;
5404 
5405 			if (ecc_bytes * nsteps > oobavail)
5406 				continue;
5407 
5408 			corr = strength * nsteps;
5409 
5410 			/*
5411 			 * If the number of correctable bits is the same,
5412 			 * bigger step_size has more reliability.
5413 			 */
5414 			if (corr > best_corr ||
5415 			    (corr == best_corr && step_size > best_step)) {
5416 				best_corr = corr;
5417 				best_step = step_size;
5418 				best_strength = strength;
5419 				best_ecc_bytes = ecc_bytes;
5420 			}
5421 		}
5422 	}
5423 
5424 	if (!best_corr)
5425 		return -ENOTSUPP;
5426 
5427 	chip->ecc.size = best_step;
5428 	chip->ecc.strength = best_strength;
5429 	chip->ecc.bytes = best_ecc_bytes;
5430 
5431 	return 0;
5432 }
5433 
5434 /**
5435  * nand_ecc_choose_conf - Set the ECC strength and ECC step size
5436  * @chip: nand chip info structure
5437  * @caps: ECC engine caps info structure
5438  * @oobavail: OOB size that the ECC engine can use
5439  *
5440  * Choose the ECC configuration according to following logic
5441  *
5442  * 1. If both ECC step size and ECC strength are already set (usually by DT)
5443  *    then check if it is supported by this controller.
5444  * 2. If NAND_ECC_MAXIMIZE is set, then select maximum ECC strength.
5445  * 3. Otherwise, try to match the ECC step size and ECC strength closest
5446  *    to the chip's requirement. If available OOB size can't fit the chip
5447  *    requirement then fallback to the maximum ECC step size and ECC strength.
5448  *
5449  * On success, the chosen ECC settings are set.
5450  */
5451 int nand_ecc_choose_conf(struct nand_chip *chip,
5452 			 const struct nand_ecc_caps *caps, int oobavail)
5453 {
5454 	struct mtd_info *mtd = nand_to_mtd(chip);
5455 
5456 	if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5457 		return -EINVAL;
5458 
5459 	if (chip->ecc.size && chip->ecc.strength)
5460 		return nand_check_ecc_caps(chip, caps, oobavail);
5461 
5462 	if (chip->ecc.options & NAND_ECC_MAXIMIZE)
5463 		return nand_maximize_ecc(chip, caps, oobavail);
5464 
5465 	if (!nand_match_ecc_req(chip, caps, oobavail))
5466 		return 0;
5467 
5468 	return nand_maximize_ecc(chip, caps, oobavail);
5469 }
5470 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
5471 
5472 /*
5473  * Check if the chip configuration meet the datasheet requirements.
5474 
5475  * If our configuration corrects A bits per B bytes and the minimum
5476  * required correction level is X bits per Y bytes, then we must ensure
5477  * both of the following are true:
5478  *
5479  * (1) A / B >= X / Y
5480  * (2) A >= X
5481  *
5482  * Requirement (1) ensures we can correct for the required bitflip density.
5483  * Requirement (2) ensures we can correct even when all bitflips are clumped
5484  * in the same sector.
5485  */
5486 static bool nand_ecc_strength_good(struct nand_chip *chip)
5487 {
5488 	struct mtd_info *mtd = nand_to_mtd(chip);
5489 	struct nand_ecc_ctrl *ecc = &chip->ecc;
5490 	int corr, ds_corr;
5491 
5492 	if (ecc->size == 0 || chip->base.eccreq.step_size == 0)
5493 		/* Not enough information */
5494 		return true;
5495 
5496 	/*
5497 	 * We get the number of corrected bits per page to compare
5498 	 * the correction density.
5499 	 */
5500 	corr = (mtd->writesize * ecc->strength) / ecc->size;
5501 	ds_corr = (mtd->writesize * chip->base.eccreq.strength) /
5502 		  chip->base.eccreq.step_size;
5503 
5504 	return corr >= ds_corr && ecc->strength >= chip->base.eccreq.strength;
5505 }
5506 
5507 static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
5508 {
5509 	struct nand_chip *chip = container_of(nand, struct nand_chip,
5510 					      base);
5511 	unsigned int eb = nanddev_pos_to_row(nand, pos);
5512 	int ret;
5513 
5514 	eb >>= nand->rowconv.eraseblock_addr_shift;
5515 
5516 	nand_select_target(chip, pos->target);
5517 	ret = nand_erase_op(chip, eb);
5518 	nand_deselect_target(chip);
5519 
5520 	return ret;
5521 }
5522 
5523 static int rawnand_markbad(struct nand_device *nand,
5524 			   const struct nand_pos *pos)
5525 {
5526 	struct nand_chip *chip = container_of(nand, struct nand_chip,
5527 					      base);
5528 
5529 	return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5530 }
5531 
5532 static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
5533 {
5534 	struct nand_chip *chip = container_of(nand, struct nand_chip,
5535 					      base);
5536 	int ret;
5537 
5538 	nand_select_target(chip, pos->target);
5539 	ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5540 	nand_deselect_target(chip);
5541 
5542 	return ret;
5543 }
5544 
5545 static const struct nand_ops rawnand_ops = {
5546 	.erase = rawnand_erase,
5547 	.markbad = rawnand_markbad,
5548 	.isbad = rawnand_isbad,
5549 };
5550 
5551 /**
5552  * nand_scan_tail - Scan for the NAND device
5553  * @chip: NAND chip object
5554  *
5555  * This is the second phase of the normal nand_scan() function. It fills out
5556  * all the uninitialized function pointers with the defaults and scans for a
5557  * bad block table if appropriate.
5558  */
5559 static int nand_scan_tail(struct nand_chip *chip)
5560 {
5561 	struct mtd_info *mtd = nand_to_mtd(chip);
5562 	struct nand_ecc_ctrl *ecc = &chip->ecc;
5563 	int ret, i;
5564 
5565 	/* New bad blocks should be marked in OOB, flash-based BBT, or both */
5566 	if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
5567 		   !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
5568 		return -EINVAL;
5569 	}
5570 
5571 	chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
5572 	if (!chip->data_buf)
5573 		return -ENOMEM;
5574 
5575 	/*
5576 	 * FIXME: some NAND manufacturer drivers expect the first die to be
5577 	 * selected when manufacturer->init() is called. They should be fixed
5578 	 * to explictly select the relevant die when interacting with the NAND
5579 	 * chip.
5580 	 */
5581 	nand_select_target(chip, 0);
5582 	ret = nand_manufacturer_init(chip);
5583 	nand_deselect_target(chip);
5584 	if (ret)
5585 		goto err_free_buf;
5586 
5587 	/* Set the internal oob buffer location, just after the page data */
5588 	chip->oob_poi = chip->data_buf + mtd->writesize;
5589 
5590 	/*
5591 	 * If no default placement scheme is given, select an appropriate one.
5592 	 */
5593 	if (!mtd->ooblayout &&
5594 	    !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
5595 		switch (mtd->oobsize) {
5596 		case 8:
5597 		case 16:
5598 			mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
5599 			break;
5600 		case 64:
5601 		case 128:
5602 			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
5603 			break;
5604 		default:
5605 			/*
5606 			 * Expose the whole OOB area to users if ECC_NONE
5607 			 * is passed. We could do that for all kind of
5608 			 * ->oobsize, but we must keep the old large/small
5609 			 * page with ECC layout when ->oobsize <= 128 for
5610 			 * compatibility reasons.
5611 			 */
5612 			if (ecc->mode == NAND_ECC_NONE) {
5613 				mtd_set_ooblayout(mtd,
5614 						&nand_ooblayout_lp_ops);
5615 				break;
5616 			}
5617 
5618 			WARN(1, "No oob scheme defined for oobsize %d\n",
5619 				mtd->oobsize);
5620 			ret = -EINVAL;
5621 			goto err_nand_manuf_cleanup;
5622 		}
5623 	}
5624 
5625 	/*
5626 	 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
5627 	 * selected and we have 256 byte pagesize fallback to software ECC
5628 	 */
5629 
5630 	switch (ecc->mode) {
5631 	case NAND_ECC_HW_OOB_FIRST:
5632 		/* Similar to NAND_ECC_HW, but a separate read_page handle */
5633 		if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
5634 			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5635 			ret = -EINVAL;
5636 			goto err_nand_manuf_cleanup;
5637 		}
5638 		if (!ecc->read_page)
5639 			ecc->read_page = nand_read_page_hwecc_oob_first;
5640 		fallthrough;
5641 	case NAND_ECC_HW:
5642 		/* Use standard hwecc read page function? */
5643 		if (!ecc->read_page)
5644 			ecc->read_page = nand_read_page_hwecc;
5645 		if (!ecc->write_page)
5646 			ecc->write_page = nand_write_page_hwecc;
5647 		if (!ecc->read_page_raw)
5648 			ecc->read_page_raw = nand_read_page_raw;
5649 		if (!ecc->write_page_raw)
5650 			ecc->write_page_raw = nand_write_page_raw;
5651 		if (!ecc->read_oob)
5652 			ecc->read_oob = nand_read_oob_std;
5653 		if (!ecc->write_oob)
5654 			ecc->write_oob = nand_write_oob_std;
5655 		if (!ecc->read_subpage)
5656 			ecc->read_subpage = nand_read_subpage;
5657 		if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5658 			ecc->write_subpage = nand_write_subpage_hwecc;
5659 		fallthrough;
5660 	case NAND_ECC_HW_SYNDROME:
5661 		if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5662 		    (!ecc->read_page ||
5663 		     ecc->read_page == nand_read_page_hwecc ||
5664 		     !ecc->write_page ||
5665 		     ecc->write_page == nand_write_page_hwecc)) {
5666 			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5667 			ret = -EINVAL;
5668 			goto err_nand_manuf_cleanup;
5669 		}
5670 		/* Use standard syndrome read/write page function? */
5671 		if (!ecc->read_page)
5672 			ecc->read_page = nand_read_page_syndrome;
5673 		if (!ecc->write_page)
5674 			ecc->write_page = nand_write_page_syndrome;
5675 		if (!ecc->read_page_raw)
5676 			ecc->read_page_raw = nand_read_page_raw_syndrome;
5677 		if (!ecc->write_page_raw)
5678 			ecc->write_page_raw = nand_write_page_raw_syndrome;
5679 		if (!ecc->read_oob)
5680 			ecc->read_oob = nand_read_oob_syndrome;
5681 		if (!ecc->write_oob)
5682 			ecc->write_oob = nand_write_oob_syndrome;
5683 
5684 		if (mtd->writesize >= ecc->size) {
5685 			if (!ecc->strength) {
5686 				WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
5687 				ret = -EINVAL;
5688 				goto err_nand_manuf_cleanup;
5689 			}
5690 			break;
5691 		}
5692 		pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
5693 			ecc->size, mtd->writesize);
5694 		ecc->mode = NAND_ECC_SOFT;
5695 		ecc->algo = NAND_ECC_HAMMING;
5696 		fallthrough;
5697 	case NAND_ECC_SOFT:
5698 		ret = nand_set_ecc_soft_ops(chip);
5699 		if (ret) {
5700 			ret = -EINVAL;
5701 			goto err_nand_manuf_cleanup;
5702 		}
5703 		break;
5704 
5705 	case NAND_ECC_ON_DIE:
5706 		if (!ecc->read_page || !ecc->write_page) {
5707 			WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
5708 			ret = -EINVAL;
5709 			goto err_nand_manuf_cleanup;
5710 		}
5711 		if (!ecc->read_oob)
5712 			ecc->read_oob = nand_read_oob_std;
5713 		if (!ecc->write_oob)
5714 			ecc->write_oob = nand_write_oob_std;
5715 		break;
5716 
5717 	case NAND_ECC_NONE:
5718 		pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
5719 		ecc->read_page = nand_read_page_raw;
5720 		ecc->write_page = nand_write_page_raw;
5721 		ecc->read_oob = nand_read_oob_std;
5722 		ecc->read_page_raw = nand_read_page_raw;
5723 		ecc->write_page_raw = nand_write_page_raw;
5724 		ecc->write_oob = nand_write_oob_std;
5725 		ecc->size = mtd->writesize;
5726 		ecc->bytes = 0;
5727 		ecc->strength = 0;
5728 		break;
5729 
5730 	default:
5731 		WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
5732 		ret = -EINVAL;
5733 		goto err_nand_manuf_cleanup;
5734 	}
5735 
5736 	if (ecc->correct || ecc->calculate) {
5737 		ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5738 		ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5739 		if (!ecc->calc_buf || !ecc->code_buf) {
5740 			ret = -ENOMEM;
5741 			goto err_nand_manuf_cleanup;
5742 		}
5743 	}
5744 
5745 	/* For many systems, the standard OOB write also works for raw */
5746 	if (!ecc->read_oob_raw)
5747 		ecc->read_oob_raw = ecc->read_oob;
5748 	if (!ecc->write_oob_raw)
5749 		ecc->write_oob_raw = ecc->write_oob;
5750 
5751 	/* propagate ecc info to mtd_info */
5752 	mtd->ecc_strength = ecc->strength;
5753 	mtd->ecc_step_size = ecc->size;
5754 
5755 	/*
5756 	 * Set the number of read / write steps for one page depending on ECC
5757 	 * mode.
5758 	 */
5759 	ecc->steps = mtd->writesize / ecc->size;
5760 	if (ecc->steps * ecc->size != mtd->writesize) {
5761 		WARN(1, "Invalid ECC parameters\n");
5762 		ret = -EINVAL;
5763 		goto err_nand_manuf_cleanup;
5764 	}
5765 	ecc->total = ecc->steps * ecc->bytes;
5766 	if (ecc->total > mtd->oobsize) {
5767 		WARN(1, "Total number of ECC bytes exceeded oobsize\n");
5768 		ret = -EINVAL;
5769 		goto err_nand_manuf_cleanup;
5770 	}
5771 
5772 	/*
5773 	 * The number of bytes available for a client to place data into
5774 	 * the out of band area.
5775 	 */
5776 	ret = mtd_ooblayout_count_freebytes(mtd);
5777 	if (ret < 0)
5778 		ret = 0;
5779 
5780 	mtd->oobavail = ret;
5781 
5782 	/* ECC sanity check: warn if it's too weak */
5783 	if (!nand_ecc_strength_good(chip))
5784 		pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
5785 			mtd->name);
5786 
5787 	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
5788 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
5789 		switch (ecc->steps) {
5790 		case 2:
5791 			mtd->subpage_sft = 1;
5792 			break;
5793 		case 4:
5794 		case 8:
5795 		case 16:
5796 			mtd->subpage_sft = 2;
5797 			break;
5798 		}
5799 	}
5800 	chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
5801 
5802 	/* Invalidate the pagebuffer reference */
5803 	chip->pagecache.page = -1;
5804 
5805 	/* Large page NAND with SOFT_ECC should support subpage reads */
5806 	switch (ecc->mode) {
5807 	case NAND_ECC_SOFT:
5808 		if (chip->page_shift > 9)
5809 			chip->options |= NAND_SUBPAGE_READ;
5810 		break;
5811 
5812 	default:
5813 		break;
5814 	}
5815 
5816 	ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
5817 	if (ret)
5818 		goto err_nand_manuf_cleanup;
5819 
5820 	/* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
5821 	if (chip->options & NAND_ROM)
5822 		mtd->flags = MTD_CAP_ROM;
5823 
5824 	/* Fill in remaining MTD driver data */
5825 	mtd->_erase = nand_erase;
5826 	mtd->_point = NULL;
5827 	mtd->_unpoint = NULL;
5828 	mtd->_panic_write = panic_nand_write;
5829 	mtd->_read_oob = nand_read_oob;
5830 	mtd->_write_oob = nand_write_oob;
5831 	mtd->_sync = nand_sync;
5832 	mtd->_lock = nand_lock;
5833 	mtd->_unlock = nand_unlock;
5834 	mtd->_suspend = nand_suspend;
5835 	mtd->_resume = nand_resume;
5836 	mtd->_reboot = nand_shutdown;
5837 	mtd->_block_isreserved = nand_block_isreserved;
5838 	mtd->_block_isbad = nand_block_isbad;
5839 	mtd->_block_markbad = nand_block_markbad;
5840 	mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
5841 
5842 	/*
5843 	 * Initialize bitflip_threshold to its default prior scan_bbt() call.
5844 	 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
5845 	 * properly set.
5846 	 */
5847 	if (!mtd->bitflip_threshold)
5848 		mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
5849 
5850 	/* Initialize the ->data_interface field. */
5851 	ret = nand_init_data_interface(chip);
5852 	if (ret)
5853 		goto err_nanddev_cleanup;
5854 
5855 	/* Enter fastest possible mode on all dies. */
5856 	for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
5857 		ret = nand_setup_data_interface(chip, i);
5858 		if (ret)
5859 			goto err_nanddev_cleanup;
5860 	}
5861 
5862 	/* Check, if we should skip the bad block table scan */
5863 	if (chip->options & NAND_SKIP_BBTSCAN)
5864 		return 0;
5865 
5866 	/* Build bad block table */
5867 	ret = nand_create_bbt(chip);
5868 	if (ret)
5869 		goto err_nanddev_cleanup;
5870 
5871 	return 0;
5872 
5873 
5874 err_nanddev_cleanup:
5875 	nanddev_cleanup(&chip->base);
5876 
5877 err_nand_manuf_cleanup:
5878 	nand_manufacturer_cleanup(chip);
5879 
5880 err_free_buf:
5881 	kfree(chip->data_buf);
5882 	kfree(ecc->code_buf);
5883 	kfree(ecc->calc_buf);
5884 
5885 	return ret;
5886 }
5887 
5888 static int nand_attach(struct nand_chip *chip)
5889 {
5890 	if (chip->controller->ops && chip->controller->ops->attach_chip)
5891 		return chip->controller->ops->attach_chip(chip);
5892 
5893 	return 0;
5894 }
5895 
5896 static void nand_detach(struct nand_chip *chip)
5897 {
5898 	if (chip->controller->ops && chip->controller->ops->detach_chip)
5899 		chip->controller->ops->detach_chip(chip);
5900 }
5901 
5902 /**
5903  * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
5904  * @chip: NAND chip object
5905  * @maxchips: number of chips to scan for.
5906  * @ids: optional flash IDs table
5907  *
5908  * This fills out all the uninitialized function pointers with the defaults.
5909  * The flash ID is read and the mtd/chip structures are filled with the
5910  * appropriate values.
5911  */
5912 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
5913 		       struct nand_flash_dev *ids)
5914 {
5915 	int ret;
5916 
5917 	if (!maxchips)
5918 		return -EINVAL;
5919 
5920 	ret = nand_scan_ident(chip, maxchips, ids);
5921 	if (ret)
5922 		return ret;
5923 
5924 	ret = nand_attach(chip);
5925 	if (ret)
5926 		goto cleanup_ident;
5927 
5928 	ret = nand_scan_tail(chip);
5929 	if (ret)
5930 		goto detach_chip;
5931 
5932 	return 0;
5933 
5934 detach_chip:
5935 	nand_detach(chip);
5936 cleanup_ident:
5937 	nand_scan_ident_cleanup(chip);
5938 
5939 	return ret;
5940 }
5941 EXPORT_SYMBOL(nand_scan_with_ids);
5942 
5943 /**
5944  * nand_cleanup - [NAND Interface] Free resources held by the NAND device
5945  * @chip: NAND chip object
5946  */
5947 void nand_cleanup(struct nand_chip *chip)
5948 {
5949 	if (chip->ecc.mode == NAND_ECC_SOFT &&
5950 	    chip->ecc.algo == NAND_ECC_BCH)
5951 		nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
5952 
5953 	nanddev_cleanup(&chip->base);
5954 
5955 	/* Free bad block table memory */
5956 	kfree(chip->bbt);
5957 	kfree(chip->data_buf);
5958 	kfree(chip->ecc.code_buf);
5959 	kfree(chip->ecc.calc_buf);
5960 
5961 	/* Free bad block descriptor memory */
5962 	if (chip->badblock_pattern && chip->badblock_pattern->options
5963 			& NAND_BBT_DYNAMICSTRUCT)
5964 		kfree(chip->badblock_pattern);
5965 
5966 	/* Free manufacturer priv data. */
5967 	nand_manufacturer_cleanup(chip);
5968 
5969 	/* Free controller specific allocations after chip identification */
5970 	nand_detach(chip);
5971 
5972 	/* Free identification phase allocations */
5973 	nand_scan_ident_cleanup(chip);
5974 }
5975 
5976 EXPORT_SYMBOL_GPL(nand_cleanup);
5977 
5978 /**
5979  * nand_release - [NAND Interface] Unregister the MTD device and free resources
5980  *		  held by the NAND device
5981  * @chip: NAND chip object
5982  */
5983 void nand_release(struct nand_chip *chip)
5984 {
5985 	mtd_device_unregister(nand_to_mtd(chip));
5986 	nand_cleanup(chip);
5987 }
5988 EXPORT_SYMBOL_GPL(nand_release);
5989 
5990 MODULE_LICENSE("GPL");
5991 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
5992 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
5993 MODULE_DESCRIPTION("Generic NAND flash driver code");
5994