xref: /openbmc/linux/drivers/mtd/nand/raw/nand_base.c (revision 8b030a57)
1 /*
2  *  Overview:
3  *   This is the generic MTD driver for NAND flash devices. It should be
4  *   capable of working with almost all NAND chips currently available.
5  *
6  *	Additional technical information is available on
7  *	http://www.linux-mtd.infradead.org/doc/nand.html
8  *
9  *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10  *		  2002-2006 Thomas Gleixner (tglx@linutronix.de)
11  *
12  *  Credits:
13  *	David Woodhouse for adding multichip support
14  *
15  *	Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16  *	rework for 2K page size chips
17  *
18  *  TODO:
19  *	Enable cached programming for 2k page size chips
20  *	Check, if mtd->ecctype should be set to MTD_ECC_HW
21  *	if we have HW ECC support.
22  *	BBT table is not serialized, has to be fixed
23  *
24  * This program is free software; you can redistribute it and/or modify
25  * it under the terms of the GNU General Public License version 2 as
26  * published by the Free Software Foundation.
27  *
28  */
29 
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/mm.h>
39 #include <linux/types.h>
40 #include <linux/mtd/mtd.h>
41 #include <linux/mtd/nand_ecc.h>
42 #include <linux/mtd/nand_bch.h>
43 #include <linux/interrupt.h>
44 #include <linux/bitops.h>
45 #include <linux/io.h>
46 #include <linux/mtd/partitions.h>
47 #include <linux/of.h>
48 #include <linux/gpio/consumer.h>
49 
50 #include "internals.h"
51 
52 /* Define default oob placement schemes for large and small page devices */
53 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
54 				 struct mtd_oob_region *oobregion)
55 {
56 	struct nand_chip *chip = mtd_to_nand(mtd);
57 	struct nand_ecc_ctrl *ecc = &chip->ecc;
58 
59 	if (section > 1)
60 		return -ERANGE;
61 
62 	if (!section) {
63 		oobregion->offset = 0;
64 		if (mtd->oobsize == 16)
65 			oobregion->length = 4;
66 		else
67 			oobregion->length = 3;
68 	} else {
69 		if (mtd->oobsize == 8)
70 			return -ERANGE;
71 
72 		oobregion->offset = 6;
73 		oobregion->length = ecc->total - 4;
74 	}
75 
76 	return 0;
77 }
78 
79 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
80 				  struct mtd_oob_region *oobregion)
81 {
82 	if (section > 1)
83 		return -ERANGE;
84 
85 	if (mtd->oobsize == 16) {
86 		if (section)
87 			return -ERANGE;
88 
89 		oobregion->length = 8;
90 		oobregion->offset = 8;
91 	} else {
92 		oobregion->length = 2;
93 		if (!section)
94 			oobregion->offset = 3;
95 		else
96 			oobregion->offset = 6;
97 	}
98 
99 	return 0;
100 }
101 
102 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
103 	.ecc = nand_ooblayout_ecc_sp,
104 	.free = nand_ooblayout_free_sp,
105 };
106 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
107 
108 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
109 				 struct mtd_oob_region *oobregion)
110 {
111 	struct nand_chip *chip = mtd_to_nand(mtd);
112 	struct nand_ecc_ctrl *ecc = &chip->ecc;
113 
114 	if (section || !ecc->total)
115 		return -ERANGE;
116 
117 	oobregion->length = ecc->total;
118 	oobregion->offset = mtd->oobsize - oobregion->length;
119 
120 	return 0;
121 }
122 
123 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
124 				  struct mtd_oob_region *oobregion)
125 {
126 	struct nand_chip *chip = mtd_to_nand(mtd);
127 	struct nand_ecc_ctrl *ecc = &chip->ecc;
128 
129 	if (section)
130 		return -ERANGE;
131 
132 	oobregion->length = mtd->oobsize - ecc->total - 2;
133 	oobregion->offset = 2;
134 
135 	return 0;
136 }
137 
138 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
139 	.ecc = nand_ooblayout_ecc_lp,
140 	.free = nand_ooblayout_free_lp,
141 };
142 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
143 
144 /*
145  * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
146  * are placed at a fixed offset.
147  */
148 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
149 					 struct mtd_oob_region *oobregion)
150 {
151 	struct nand_chip *chip = mtd_to_nand(mtd);
152 	struct nand_ecc_ctrl *ecc = &chip->ecc;
153 
154 	if (section)
155 		return -ERANGE;
156 
157 	switch (mtd->oobsize) {
158 	case 64:
159 		oobregion->offset = 40;
160 		break;
161 	case 128:
162 		oobregion->offset = 80;
163 		break;
164 	default:
165 		return -EINVAL;
166 	}
167 
168 	oobregion->length = ecc->total;
169 	if (oobregion->offset + oobregion->length > mtd->oobsize)
170 		return -ERANGE;
171 
172 	return 0;
173 }
174 
175 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
176 					  struct mtd_oob_region *oobregion)
177 {
178 	struct nand_chip *chip = mtd_to_nand(mtd);
179 	struct nand_ecc_ctrl *ecc = &chip->ecc;
180 	int ecc_offset = 0;
181 
182 	if (section < 0 || section > 1)
183 		return -ERANGE;
184 
185 	switch (mtd->oobsize) {
186 	case 64:
187 		ecc_offset = 40;
188 		break;
189 	case 128:
190 		ecc_offset = 80;
191 		break;
192 	default:
193 		return -EINVAL;
194 	}
195 
196 	if (section == 0) {
197 		oobregion->offset = 2;
198 		oobregion->length = ecc_offset - 2;
199 	} else {
200 		oobregion->offset = ecc_offset + ecc->total;
201 		oobregion->length = mtd->oobsize - oobregion->offset;
202 	}
203 
204 	return 0;
205 }
206 
207 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
208 	.ecc = nand_ooblayout_ecc_lp_hamming,
209 	.free = nand_ooblayout_free_lp_hamming,
210 };
211 
212 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
213 {
214 	int ret = 0;
215 
216 	/* Start address must align on block boundary */
217 	if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
218 		pr_debug("%s: unaligned address\n", __func__);
219 		ret = -EINVAL;
220 	}
221 
222 	/* Length must align on block boundary */
223 	if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
224 		pr_debug("%s: length not block aligned\n", __func__);
225 		ret = -EINVAL;
226 	}
227 
228 	return ret;
229 }
230 
231 /**
232  * nand_select_target() - Select a NAND target (A.K.A. die)
233  * @chip: NAND chip object
234  * @cs: the CS line to select. Note that this CS id is always from the chip
235  *	PoV, not the controller one
236  *
237  * Select a NAND target so that further operations executed on @chip go to the
238  * selected NAND target.
239  */
240 void nand_select_target(struct nand_chip *chip, unsigned int cs)
241 {
242 	/*
243 	 * cs should always lie between 0 and chip->numchips, when that's not
244 	 * the case it's a bug and the caller should be fixed.
245 	 */
246 	if (WARN_ON(cs > chip->numchips))
247 		return;
248 
249 	chip->cur_cs = cs;
250 
251 	if (chip->legacy.select_chip)
252 		chip->legacy.select_chip(chip, cs);
253 }
254 EXPORT_SYMBOL_GPL(nand_select_target);
255 
256 /**
257  * nand_deselect_target() - Deselect the currently selected target
258  * @chip: NAND chip object
259  *
260  * Deselect the currently selected NAND target. The result of operations
261  * executed on @chip after the target has been deselected is undefined.
262  */
263 void nand_deselect_target(struct nand_chip *chip)
264 {
265 	if (chip->legacy.select_chip)
266 		chip->legacy.select_chip(chip, -1);
267 
268 	chip->cur_cs = -1;
269 }
270 EXPORT_SYMBOL_GPL(nand_deselect_target);
271 
272 /**
273  * nand_release_device - [GENERIC] release chip
274  * @chip: NAND chip object
275  *
276  * Release chip lock and wake up anyone waiting on the device.
277  */
278 static void nand_release_device(struct nand_chip *chip)
279 {
280 	/* Release the controller and the chip */
281 	spin_lock(&chip->controller->lock);
282 	chip->controller->active = NULL;
283 	chip->state = FL_READY;
284 	wake_up(&chip->controller->wq);
285 	spin_unlock(&chip->controller->lock);
286 }
287 
288 /**
289  * nand_block_bad - [DEFAULT] Read bad block marker from the chip
290  * @chip: NAND chip object
291  * @ofs: offset from device start
292  *
293  * Check, if the block is bad.
294  */
295 static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
296 {
297 	struct mtd_info *mtd = nand_to_mtd(chip);
298 	int page, page_end, res;
299 	u8 bad;
300 
301 	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
302 		ofs += mtd->erasesize - mtd->writesize;
303 
304 	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
305 	page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
306 
307 	for (; page < page_end; page++) {
308 		res = chip->ecc.read_oob(chip, page);
309 		if (res < 0)
310 			return res;
311 
312 		bad = chip->oob_poi[chip->badblockpos];
313 
314 		if (likely(chip->badblockbits == 8))
315 			res = bad != 0xFF;
316 		else
317 			res = hweight8(bad) < chip->badblockbits;
318 		if (res)
319 			return res;
320 	}
321 
322 	return 0;
323 }
324 
325 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
326 {
327 	if (chip->legacy.block_bad)
328 		return chip->legacy.block_bad(chip, ofs);
329 
330 	return nand_block_bad(chip, ofs);
331 }
332 
333 /**
334  * panic_nand_get_device - [GENERIC] Get chip for selected access
335  * @chip: the nand chip descriptor
336  * @new_state: the state which is requested
337  *
338  * Used when in panic, no locks are taken.
339  */
340 static void panic_nand_get_device(struct nand_chip *chip, int new_state)
341 {
342 	/* Hardware controller shared among independent devices */
343 	chip->controller->active = chip;
344 	chip->state = new_state;
345 }
346 
347 /**
348  * nand_get_device - [GENERIC] Get chip for selected access
349  * @chip: NAND chip structure
350  * @new_state: the state which is requested
351  *
352  * Get the device and lock it for exclusive access
353  */
354 static int
355 nand_get_device(struct nand_chip *chip, int new_state)
356 {
357 	spinlock_t *lock = &chip->controller->lock;
358 	wait_queue_head_t *wq = &chip->controller->wq;
359 	DECLARE_WAITQUEUE(wait, current);
360 retry:
361 	spin_lock(lock);
362 
363 	/* Hardware controller shared among independent devices */
364 	if (!chip->controller->active)
365 		chip->controller->active = chip;
366 
367 	if (chip->controller->active == chip && chip->state == FL_READY) {
368 		chip->state = new_state;
369 		spin_unlock(lock);
370 		return 0;
371 	}
372 	if (new_state == FL_PM_SUSPENDED) {
373 		if (chip->controller->active->state == FL_PM_SUSPENDED) {
374 			chip->state = FL_PM_SUSPENDED;
375 			spin_unlock(lock);
376 			return 0;
377 		}
378 	}
379 	set_current_state(TASK_UNINTERRUPTIBLE);
380 	add_wait_queue(wq, &wait);
381 	spin_unlock(lock);
382 	schedule();
383 	remove_wait_queue(wq, &wait);
384 	goto retry;
385 }
386 
387 /**
388  * nand_check_wp - [GENERIC] check if the chip is write protected
389  * @chip: NAND chip object
390  *
391  * Check, if the device is write protected. The function expects, that the
392  * device is already selected.
393  */
394 static int nand_check_wp(struct nand_chip *chip)
395 {
396 	u8 status;
397 	int ret;
398 
399 	/* Broken xD cards report WP despite being writable */
400 	if (chip->options & NAND_BROKEN_XD)
401 		return 0;
402 
403 	/* Check the WP bit */
404 	ret = nand_status_op(chip, &status);
405 	if (ret)
406 		return ret;
407 
408 	return status & NAND_STATUS_WP ? 0 : 1;
409 }
410 
411 /**
412  * nand_fill_oob - [INTERN] Transfer client buffer to oob
413  * @chip: NAND chip object
414  * @oob: oob data buffer
415  * @len: oob data write length
416  * @ops: oob ops structure
417  */
418 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
419 			      struct mtd_oob_ops *ops)
420 {
421 	struct mtd_info *mtd = nand_to_mtd(chip);
422 	int ret;
423 
424 	/*
425 	 * Initialise to all 0xFF, to avoid the possibility of left over OOB
426 	 * data from a previous OOB read.
427 	 */
428 	memset(chip->oob_poi, 0xff, mtd->oobsize);
429 
430 	switch (ops->mode) {
431 
432 	case MTD_OPS_PLACE_OOB:
433 	case MTD_OPS_RAW:
434 		memcpy(chip->oob_poi + ops->ooboffs, oob, len);
435 		return oob + len;
436 
437 	case MTD_OPS_AUTO_OOB:
438 		ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
439 						  ops->ooboffs, len);
440 		BUG_ON(ret);
441 		return oob + len;
442 
443 	default:
444 		BUG();
445 	}
446 	return NULL;
447 }
448 
449 /**
450  * nand_do_write_oob - [MTD Interface] NAND write out-of-band
451  * @chip: NAND chip object
452  * @to: offset to write to
453  * @ops: oob operation description structure
454  *
455  * NAND write out-of-band.
456  */
457 static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
458 			     struct mtd_oob_ops *ops)
459 {
460 	struct mtd_info *mtd = nand_to_mtd(chip);
461 	int chipnr, page, status, len;
462 
463 	pr_debug("%s: to = 0x%08x, len = %i\n",
464 			 __func__, (unsigned int)to, (int)ops->ooblen);
465 
466 	len = mtd_oobavail(mtd, ops);
467 
468 	/* Do not allow write past end of page */
469 	if ((ops->ooboffs + ops->ooblen) > len) {
470 		pr_debug("%s: attempt to write past end of page\n",
471 				__func__);
472 		return -EINVAL;
473 	}
474 
475 	chipnr = (int)(to >> chip->chip_shift);
476 
477 	/*
478 	 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
479 	 * of my DiskOnChip 2000 test units) will clear the whole data page too
480 	 * if we don't do this. I have no clue why, but I seem to have 'fixed'
481 	 * it in the doc2000 driver in August 1999.  dwmw2.
482 	 */
483 	nand_reset(chip, chipnr);
484 
485 	nand_select_target(chip, chipnr);
486 
487 	/* Shift to get page */
488 	page = (int)(to >> chip->page_shift);
489 
490 	/* Check, if it is write protected */
491 	if (nand_check_wp(chip)) {
492 		nand_deselect_target(chip);
493 		return -EROFS;
494 	}
495 
496 	/* Invalidate the page cache, if we write to the cached page */
497 	if (page == chip->pagebuf)
498 		chip->pagebuf = -1;
499 
500 	nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
501 
502 	if (ops->mode == MTD_OPS_RAW)
503 		status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
504 	else
505 		status = chip->ecc.write_oob(chip, page & chip->pagemask);
506 
507 	nand_deselect_target(chip);
508 
509 	if (status)
510 		return status;
511 
512 	ops->oobretlen = ops->ooblen;
513 
514 	return 0;
515 }
516 
517 /**
518  * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
519  * @chip: NAND chip object
520  * @ofs: offset from device start
521  *
522  * This is the default implementation, which can be overridden by a hardware
523  * specific driver. It provides the details for writing a bad block marker to a
524  * block.
525  */
526 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
527 {
528 	struct mtd_info *mtd = nand_to_mtd(chip);
529 	struct mtd_oob_ops ops;
530 	uint8_t buf[2] = { 0, 0 };
531 	int ret = 0, res, i = 0;
532 
533 	memset(&ops, 0, sizeof(ops));
534 	ops.oobbuf = buf;
535 	ops.ooboffs = chip->badblockpos;
536 	if (chip->options & NAND_BUSWIDTH_16) {
537 		ops.ooboffs &= ~0x01;
538 		ops.len = ops.ooblen = 2;
539 	} else {
540 		ops.len = ops.ooblen = 1;
541 	}
542 	ops.mode = MTD_OPS_PLACE_OOB;
543 
544 	/* Write to first/last page(s) if necessary */
545 	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
546 		ofs += mtd->erasesize - mtd->writesize;
547 	do {
548 		res = nand_do_write_oob(chip, ofs, &ops);
549 		if (!ret)
550 			ret = res;
551 
552 		i++;
553 		ofs += mtd->writesize;
554 	} while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
555 
556 	return ret;
557 }
558 
559 /**
560  * nand_markbad_bbm - mark a block by updating the BBM
561  * @chip: NAND chip object
562  * @ofs: offset of the block to mark bad
563  */
564 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
565 {
566 	if (chip->legacy.block_markbad)
567 		return chip->legacy.block_markbad(chip, ofs);
568 
569 	return nand_default_block_markbad(chip, ofs);
570 }
571 
572 /**
573  * nand_block_markbad_lowlevel - mark a block bad
574  * @chip: NAND chip object
575  * @ofs: offset from device start
576  *
577  * This function performs the generic NAND bad block marking steps (i.e., bad
578  * block table(s) and/or marker(s)). We only allow the hardware driver to
579  * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
580  *
581  * We try operations in the following order:
582  *
583  *  (1) erase the affected block, to allow OOB marker to be written cleanly
584  *  (2) write bad block marker to OOB area of affected block (unless flag
585  *      NAND_BBT_NO_OOB_BBM is present)
586  *  (3) update the BBT
587  *
588  * Note that we retain the first error encountered in (2) or (3), finish the
589  * procedures, and dump the error in the end.
590 */
591 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
592 {
593 	struct mtd_info *mtd = nand_to_mtd(chip);
594 	int res, ret = 0;
595 
596 	if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
597 		struct erase_info einfo;
598 
599 		/* Attempt erase before marking OOB */
600 		memset(&einfo, 0, sizeof(einfo));
601 		einfo.addr = ofs;
602 		einfo.len = 1ULL << chip->phys_erase_shift;
603 		nand_erase_nand(chip, &einfo, 0);
604 
605 		/* Write bad block marker to OOB */
606 		nand_get_device(chip, FL_WRITING);
607 		ret = nand_markbad_bbm(chip, ofs);
608 		nand_release_device(chip);
609 	}
610 
611 	/* Mark block bad in BBT */
612 	if (chip->bbt) {
613 		res = nand_markbad_bbt(chip, ofs);
614 		if (!ret)
615 			ret = res;
616 	}
617 
618 	if (!ret)
619 		mtd->ecc_stats.badblocks++;
620 
621 	return ret;
622 }
623 
624 /**
625  * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
626  * @mtd: MTD device structure
627  * @ofs: offset from device start
628  *
629  * Check if the block is marked as reserved.
630  */
631 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
632 {
633 	struct nand_chip *chip = mtd_to_nand(mtd);
634 
635 	if (!chip->bbt)
636 		return 0;
637 	/* Return info from the table */
638 	return nand_isreserved_bbt(chip, ofs);
639 }
640 
641 /**
642  * nand_block_checkbad - [GENERIC] Check if a block is marked bad
643  * @chip: NAND chip object
644  * @ofs: offset from device start
645  * @allowbbt: 1, if its allowed to access the bbt area
646  *
647  * Check, if the block is bad. Either by reading the bad block table or
648  * calling of the scan function.
649  */
650 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
651 {
652 	/* Return info from the table */
653 	if (chip->bbt)
654 		return nand_isbad_bbt(chip, ofs, allowbbt);
655 
656 	return nand_isbad_bbm(chip, ofs);
657 }
658 
659 /**
660  * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
661  * @chip: NAND chip structure
662  * @timeout_ms: Timeout in ms
663  *
664  * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
665  * If that does not happen whitin the specified timeout, -ETIMEDOUT is
666  * returned.
667  *
668  * This helper is intended to be used when the controller does not have access
669  * to the NAND R/B pin.
670  *
671  * Be aware that calling this helper from an ->exec_op() implementation means
672  * ->exec_op() must be re-entrant.
673  *
674  * Return 0 if the NAND chip is ready, a negative error otherwise.
675  */
676 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
677 {
678 	const struct nand_sdr_timings *timings;
679 	u8 status = 0;
680 	int ret;
681 
682 	if (!nand_has_exec_op(chip))
683 		return -ENOTSUPP;
684 
685 	/* Wait tWB before polling the STATUS reg. */
686 	timings = nand_get_sdr_timings(&chip->data_interface);
687 	ndelay(PSEC_TO_NSEC(timings->tWB_max));
688 
689 	ret = nand_status_op(chip, NULL);
690 	if (ret)
691 		return ret;
692 
693 	timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
694 	do {
695 		ret = nand_read_data_op(chip, &status, sizeof(status), true);
696 		if (ret)
697 			break;
698 
699 		if (status & NAND_STATUS_READY)
700 			break;
701 
702 		/*
703 		 * Typical lowest execution time for a tR on most NANDs is 10us,
704 		 * use this as polling delay before doing something smarter (ie.
705 		 * deriving a delay from the timeout value, timeout_ms/ratio).
706 		 */
707 		udelay(10);
708 	} while	(time_before(jiffies, timeout_ms));
709 
710 	/*
711 	 * We have to exit READ_STATUS mode in order to read real data on the
712 	 * bus in case the WAITRDY instruction is preceding a DATA_IN
713 	 * instruction.
714 	 */
715 	nand_exit_status_op(chip);
716 
717 	if (ret)
718 		return ret;
719 
720 	return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
721 };
722 EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
723 
724 /**
725  * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
726  * @chip: NAND chip structure
727  * @gpiod: GPIO descriptor of R/B pin
728  * @timeout_ms: Timeout in ms
729  *
730  * Poll the R/B GPIO pin until it becomes ready. If that does not happen
731  * whitin the specified timeout, -ETIMEDOUT is returned.
732  *
733  * This helper is intended to be used when the controller has access to the
734  * NAND R/B pin over GPIO.
735  *
736  * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
737  */
738 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
739 		      unsigned long timeout_ms)
740 {
741 	/* Wait until R/B pin indicates chip is ready or timeout occurs */
742 	timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
743 	do {
744 		if (gpiod_get_value_cansleep(gpiod))
745 			return 0;
746 
747 		cond_resched();
748 	} while	(time_before(jiffies, timeout_ms));
749 
750 	return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
751 };
752 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
753 
754 /**
755  * panic_nand_wait - [GENERIC] wait until the command is done
756  * @chip: NAND chip structure
757  * @timeo: timeout
758  *
759  * Wait for command done. This is a helper function for nand_wait used when
760  * we are in interrupt context. May happen when in panic and trying to write
761  * an oops through mtdoops.
762  */
763 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
764 {
765 	int i;
766 	for (i = 0; i < timeo; i++) {
767 		if (chip->legacy.dev_ready) {
768 			if (chip->legacy.dev_ready(chip))
769 				break;
770 		} else {
771 			int ret;
772 			u8 status;
773 
774 			ret = nand_read_data_op(chip, &status, sizeof(status),
775 						true);
776 			if (ret)
777 				return;
778 
779 			if (status & NAND_STATUS_READY)
780 				break;
781 		}
782 		mdelay(1);
783 	}
784 }
785 
786 static bool nand_supports_get_features(struct nand_chip *chip, int addr)
787 {
788 	return (chip->parameters.supports_set_get_features &&
789 		test_bit(addr, chip->parameters.get_feature_list));
790 }
791 
792 static bool nand_supports_set_features(struct nand_chip *chip, int addr)
793 {
794 	return (chip->parameters.supports_set_get_features &&
795 		test_bit(addr, chip->parameters.set_feature_list));
796 }
797 
798 /**
799  * nand_reset_data_interface - Reset data interface and timings
800  * @chip: The NAND chip
801  * @chipnr: Internal die id
802  *
803  * Reset the Data interface and timings to ONFI mode 0.
804  *
805  * Returns 0 for success or negative error code otherwise.
806  */
807 static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
808 {
809 	int ret;
810 
811 	if (!nand_has_setup_data_iface(chip))
812 		return 0;
813 
814 	/*
815 	 * The ONFI specification says:
816 	 * "
817 	 * To transition from NV-DDR or NV-DDR2 to the SDR data
818 	 * interface, the host shall use the Reset (FFh) command
819 	 * using SDR timing mode 0. A device in any timing mode is
820 	 * required to recognize Reset (FFh) command issued in SDR
821 	 * timing mode 0.
822 	 * "
823 	 *
824 	 * Configure the data interface in SDR mode and set the
825 	 * timings to timing mode 0.
826 	 */
827 
828 	onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
829 	ret = chip->controller->ops->setup_data_interface(chip, chipnr,
830 							&chip->data_interface);
831 	if (ret)
832 		pr_err("Failed to configure data interface to SDR timing mode 0\n");
833 
834 	return ret;
835 }
836 
837 /**
838  * nand_setup_data_interface - Setup the best data interface and timings
839  * @chip: The NAND chip
840  * @chipnr: Internal die id
841  *
842  * Find and configure the best data interface and NAND timings supported by
843  * the chip and the driver.
844  * First tries to retrieve supported timing modes from ONFI information,
845  * and if the NAND chip does not support ONFI, relies on the
846  * ->onfi_timing_mode_default specified in the nand_ids table.
847  *
848  * Returns 0 for success or negative error code otherwise.
849  */
850 static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
851 {
852 	u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
853 		chip->onfi_timing_mode_default,
854 	};
855 	int ret;
856 
857 	if (!nand_has_setup_data_iface(chip))
858 		return 0;
859 
860 	/* Change the mode on the chip side (if supported by the NAND chip) */
861 	if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
862 		nand_select_target(chip, chipnr);
863 		ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
864 					tmode_param);
865 		nand_deselect_target(chip);
866 		if (ret)
867 			return ret;
868 	}
869 
870 	/* Change the mode on the controller side */
871 	ret = chip->controller->ops->setup_data_interface(chip, chipnr,
872 							&chip->data_interface);
873 	if (ret)
874 		return ret;
875 
876 	/* Check the mode has been accepted by the chip, if supported */
877 	if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
878 		return 0;
879 
880 	memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
881 	nand_select_target(chip, chipnr);
882 	ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
883 				tmode_param);
884 	nand_deselect_target(chip);
885 	if (ret)
886 		goto err_reset_chip;
887 
888 	if (tmode_param[0] != chip->onfi_timing_mode_default) {
889 		pr_warn("timing mode %d not acknowledged by the NAND chip\n",
890 			chip->onfi_timing_mode_default);
891 		goto err_reset_chip;
892 	}
893 
894 	return 0;
895 
896 err_reset_chip:
897 	/*
898 	 * Fallback to mode 0 if the chip explicitly did not ack the chosen
899 	 * timing mode.
900 	 */
901 	nand_reset_data_interface(chip, chipnr);
902 	nand_select_target(chip, chipnr);
903 	nand_reset_op(chip);
904 	nand_deselect_target(chip);
905 
906 	return ret;
907 }
908 
909 /**
910  * nand_init_data_interface - find the best data interface and timings
911  * @chip: The NAND chip
912  *
913  * Find the best data interface and NAND timings supported by the chip
914  * and the driver.
915  * First tries to retrieve supported timing modes from ONFI information,
916  * and if the NAND chip does not support ONFI, relies on the
917  * ->onfi_timing_mode_default specified in the nand_ids table. After this
918  * function nand_chip->data_interface is initialized with the best timing mode
919  * available.
920  *
921  * Returns 0 for success or negative error code otherwise.
922  */
923 static int nand_init_data_interface(struct nand_chip *chip)
924 {
925 	int modes, mode, ret;
926 
927 	if (!nand_has_setup_data_iface(chip))
928 		return 0;
929 
930 	/*
931 	 * First try to identify the best timings from ONFI parameters and
932 	 * if the NAND does not support ONFI, fallback to the default ONFI
933 	 * timing mode.
934 	 */
935 	if (chip->parameters.onfi) {
936 		modes = chip->parameters.onfi->async_timing_mode;
937 	} else {
938 		if (!chip->onfi_timing_mode_default)
939 			return 0;
940 
941 		modes = GENMASK(chip->onfi_timing_mode_default, 0);
942 	}
943 
944 	for (mode = fls(modes) - 1; mode >= 0; mode--) {
945 		ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
946 		if (ret)
947 			continue;
948 
949 		/*
950 		 * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
951 		 * controller supports the requested timings.
952 		 */
953 		ret = chip->controller->ops->setup_data_interface(chip,
954 						 NAND_DATA_IFACE_CHECK_ONLY,
955 						 &chip->data_interface);
956 		if (!ret) {
957 			chip->onfi_timing_mode_default = mode;
958 			break;
959 		}
960 	}
961 
962 	return 0;
963 }
964 
965 /**
966  * nand_fill_column_cycles - fill the column cycles of an address
967  * @chip: The NAND chip
968  * @addrs: Array of address cycles to fill
969  * @offset_in_page: The offset in the page
970  *
971  * Fills the first or the first two bytes of the @addrs field depending
972  * on the NAND bus width and the page size.
973  *
974  * Returns the number of cycles needed to encode the column, or a negative
975  * error code in case one of the arguments is invalid.
976  */
977 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
978 				   unsigned int offset_in_page)
979 {
980 	struct mtd_info *mtd = nand_to_mtd(chip);
981 
982 	/* Make sure the offset is less than the actual page size. */
983 	if (offset_in_page > mtd->writesize + mtd->oobsize)
984 		return -EINVAL;
985 
986 	/*
987 	 * On small page NANDs, there's a dedicated command to access the OOB
988 	 * area, and the column address is relative to the start of the OOB
989 	 * area, not the start of the page. Asjust the address accordingly.
990 	 */
991 	if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
992 		offset_in_page -= mtd->writesize;
993 
994 	/*
995 	 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
996 	 * wide, then it must be divided by 2.
997 	 */
998 	if (chip->options & NAND_BUSWIDTH_16) {
999 		if (WARN_ON(offset_in_page % 2))
1000 			return -EINVAL;
1001 
1002 		offset_in_page /= 2;
1003 	}
1004 
1005 	addrs[0] = offset_in_page;
1006 
1007 	/*
1008 	 * Small page NANDs use 1 cycle for the columns, while large page NANDs
1009 	 * need 2
1010 	 */
1011 	if (mtd->writesize <= 512)
1012 		return 1;
1013 
1014 	addrs[1] = offset_in_page >> 8;
1015 
1016 	return 2;
1017 }
1018 
1019 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1020 				     unsigned int offset_in_page, void *buf,
1021 				     unsigned int len)
1022 {
1023 	struct mtd_info *mtd = nand_to_mtd(chip);
1024 	const struct nand_sdr_timings *sdr =
1025 		nand_get_sdr_timings(&chip->data_interface);
1026 	u8 addrs[4];
1027 	struct nand_op_instr instrs[] = {
1028 		NAND_OP_CMD(NAND_CMD_READ0, 0),
1029 		NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
1030 		NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1031 				 PSEC_TO_NSEC(sdr->tRR_min)),
1032 		NAND_OP_DATA_IN(len, buf, 0),
1033 	};
1034 	struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1035 	int ret;
1036 
1037 	/* Drop the DATA_IN instruction if len is set to 0. */
1038 	if (!len)
1039 		op.ninstrs--;
1040 
1041 	if (offset_in_page >= mtd->writesize)
1042 		instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1043 	else if (offset_in_page >= 256 &&
1044 		 !(chip->options & NAND_BUSWIDTH_16))
1045 		instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1046 
1047 	ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1048 	if (ret < 0)
1049 		return ret;
1050 
1051 	addrs[1] = page;
1052 	addrs[2] = page >> 8;
1053 
1054 	if (chip->options & NAND_ROW_ADDR_3) {
1055 		addrs[3] = page >> 16;
1056 		instrs[1].ctx.addr.naddrs++;
1057 	}
1058 
1059 	return nand_exec_op(chip, &op);
1060 }
1061 
1062 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1063 				     unsigned int offset_in_page, void *buf,
1064 				     unsigned int len)
1065 {
1066 	const struct nand_sdr_timings *sdr =
1067 		nand_get_sdr_timings(&chip->data_interface);
1068 	u8 addrs[5];
1069 	struct nand_op_instr instrs[] = {
1070 		NAND_OP_CMD(NAND_CMD_READ0, 0),
1071 		NAND_OP_ADDR(4, addrs, 0),
1072 		NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1073 		NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1074 				 PSEC_TO_NSEC(sdr->tRR_min)),
1075 		NAND_OP_DATA_IN(len, buf, 0),
1076 	};
1077 	struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1078 	int ret;
1079 
1080 	/* Drop the DATA_IN instruction if len is set to 0. */
1081 	if (!len)
1082 		op.ninstrs--;
1083 
1084 	ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1085 	if (ret < 0)
1086 		return ret;
1087 
1088 	addrs[2] = page;
1089 	addrs[3] = page >> 8;
1090 
1091 	if (chip->options & NAND_ROW_ADDR_3) {
1092 		addrs[4] = page >> 16;
1093 		instrs[1].ctx.addr.naddrs++;
1094 	}
1095 
1096 	return nand_exec_op(chip, &op);
1097 }
1098 
1099 /**
1100  * nand_read_page_op - Do a READ PAGE operation
1101  * @chip: The NAND chip
1102  * @page: page to read
1103  * @offset_in_page: offset within the page
1104  * @buf: buffer used to store the data
1105  * @len: length of the buffer
1106  *
1107  * This function issues a READ PAGE operation.
1108  * This function does not select/unselect the CS line.
1109  *
1110  * Returns 0 on success, a negative error code otherwise.
1111  */
1112 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1113 		      unsigned int offset_in_page, void *buf, unsigned int len)
1114 {
1115 	struct mtd_info *mtd = nand_to_mtd(chip);
1116 
1117 	if (len && !buf)
1118 		return -EINVAL;
1119 
1120 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1121 		return -EINVAL;
1122 
1123 	if (nand_has_exec_op(chip)) {
1124 		if (mtd->writesize > 512)
1125 			return nand_lp_exec_read_page_op(chip, page,
1126 							 offset_in_page, buf,
1127 							 len);
1128 
1129 		return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1130 						 buf, len);
1131 	}
1132 
1133 	chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1134 	if (len)
1135 		chip->legacy.read_buf(chip, buf, len);
1136 
1137 	return 0;
1138 }
1139 EXPORT_SYMBOL_GPL(nand_read_page_op);
1140 
1141 /**
1142  * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1143  * @chip: The NAND chip
1144  * @page: parameter page to read
1145  * @buf: buffer used to store the data
1146  * @len: length of the buffer
1147  *
1148  * This function issues a READ PARAMETER PAGE operation.
1149  * This function does not select/unselect the CS line.
1150  *
1151  * Returns 0 on success, a negative error code otherwise.
1152  */
1153 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1154 			    unsigned int len)
1155 {
1156 	unsigned int i;
1157 	u8 *p = buf;
1158 
1159 	if (len && !buf)
1160 		return -EINVAL;
1161 
1162 	if (nand_has_exec_op(chip)) {
1163 		const struct nand_sdr_timings *sdr =
1164 			nand_get_sdr_timings(&chip->data_interface);
1165 		struct nand_op_instr instrs[] = {
1166 			NAND_OP_CMD(NAND_CMD_PARAM, 0),
1167 			NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1168 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1169 					 PSEC_TO_NSEC(sdr->tRR_min)),
1170 			NAND_OP_8BIT_DATA_IN(len, buf, 0),
1171 		};
1172 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1173 
1174 		/* Drop the DATA_IN instruction if len is set to 0. */
1175 		if (!len)
1176 			op.ninstrs--;
1177 
1178 		return nand_exec_op(chip, &op);
1179 	}
1180 
1181 	chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1182 	for (i = 0; i < len; i++)
1183 		p[i] = chip->legacy.read_byte(chip);
1184 
1185 	return 0;
1186 }
1187 
1188 /**
1189  * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1190  * @chip: The NAND chip
1191  * @offset_in_page: offset within the page
1192  * @buf: buffer used to store the data
1193  * @len: length of the buffer
1194  * @force_8bit: force 8-bit bus access
1195  *
1196  * This function issues a CHANGE READ COLUMN operation.
1197  * This function does not select/unselect the CS line.
1198  *
1199  * Returns 0 on success, a negative error code otherwise.
1200  */
1201 int nand_change_read_column_op(struct nand_chip *chip,
1202 			       unsigned int offset_in_page, void *buf,
1203 			       unsigned int len, bool force_8bit)
1204 {
1205 	struct mtd_info *mtd = nand_to_mtd(chip);
1206 
1207 	if (len && !buf)
1208 		return -EINVAL;
1209 
1210 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1211 		return -EINVAL;
1212 
1213 	/* Small page NANDs do not support column change. */
1214 	if (mtd->writesize <= 512)
1215 		return -ENOTSUPP;
1216 
1217 	if (nand_has_exec_op(chip)) {
1218 		const struct nand_sdr_timings *sdr =
1219 			nand_get_sdr_timings(&chip->data_interface);
1220 		u8 addrs[2] = {};
1221 		struct nand_op_instr instrs[] = {
1222 			NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1223 			NAND_OP_ADDR(2, addrs, 0),
1224 			NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1225 				    PSEC_TO_NSEC(sdr->tCCS_min)),
1226 			NAND_OP_DATA_IN(len, buf, 0),
1227 		};
1228 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1229 		int ret;
1230 
1231 		ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1232 		if (ret < 0)
1233 			return ret;
1234 
1235 		/* Drop the DATA_IN instruction if len is set to 0. */
1236 		if (!len)
1237 			op.ninstrs--;
1238 
1239 		instrs[3].ctx.data.force_8bit = force_8bit;
1240 
1241 		return nand_exec_op(chip, &op);
1242 	}
1243 
1244 	chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1245 	if (len)
1246 		chip->legacy.read_buf(chip, buf, len);
1247 
1248 	return 0;
1249 }
1250 EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1251 
1252 /**
1253  * nand_read_oob_op - Do a READ OOB operation
1254  * @chip: The NAND chip
1255  * @page: page to read
1256  * @offset_in_oob: offset within the OOB area
1257  * @buf: buffer used to store the data
1258  * @len: length of the buffer
1259  *
1260  * This function issues a READ OOB operation.
1261  * This function does not select/unselect the CS line.
1262  *
1263  * Returns 0 on success, a negative error code otherwise.
1264  */
1265 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1266 		     unsigned int offset_in_oob, void *buf, unsigned int len)
1267 {
1268 	struct mtd_info *mtd = nand_to_mtd(chip);
1269 
1270 	if (len && !buf)
1271 		return -EINVAL;
1272 
1273 	if (offset_in_oob + len > mtd->oobsize)
1274 		return -EINVAL;
1275 
1276 	if (nand_has_exec_op(chip))
1277 		return nand_read_page_op(chip, page,
1278 					 mtd->writesize + offset_in_oob,
1279 					 buf, len);
1280 
1281 	chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1282 	if (len)
1283 		chip->legacy.read_buf(chip, buf, len);
1284 
1285 	return 0;
1286 }
1287 EXPORT_SYMBOL_GPL(nand_read_oob_op);
1288 
1289 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1290 				  unsigned int offset_in_page, const void *buf,
1291 				  unsigned int len, bool prog)
1292 {
1293 	struct mtd_info *mtd = nand_to_mtd(chip);
1294 	const struct nand_sdr_timings *sdr =
1295 		nand_get_sdr_timings(&chip->data_interface);
1296 	u8 addrs[5] = {};
1297 	struct nand_op_instr instrs[] = {
1298 		/*
1299 		 * The first instruction will be dropped if we're dealing
1300 		 * with a large page NAND and adjusted if we're dealing
1301 		 * with a small page NAND and the page offset is > 255.
1302 		 */
1303 		NAND_OP_CMD(NAND_CMD_READ0, 0),
1304 		NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1305 		NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1306 		NAND_OP_DATA_OUT(len, buf, 0),
1307 		NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1308 		NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1309 	};
1310 	struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1311 	int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1312 	int ret;
1313 	u8 status;
1314 
1315 	if (naddrs < 0)
1316 		return naddrs;
1317 
1318 	addrs[naddrs++] = page;
1319 	addrs[naddrs++] = page >> 8;
1320 	if (chip->options & NAND_ROW_ADDR_3)
1321 		addrs[naddrs++] = page >> 16;
1322 
1323 	instrs[2].ctx.addr.naddrs = naddrs;
1324 
1325 	/* Drop the last two instructions if we're not programming the page. */
1326 	if (!prog) {
1327 		op.ninstrs -= 2;
1328 		/* Also drop the DATA_OUT instruction if empty. */
1329 		if (!len)
1330 			op.ninstrs--;
1331 	}
1332 
1333 	if (mtd->writesize <= 512) {
1334 		/*
1335 		 * Small pages need some more tweaking: we have to adjust the
1336 		 * first instruction depending on the page offset we're trying
1337 		 * to access.
1338 		 */
1339 		if (offset_in_page >= mtd->writesize)
1340 			instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1341 		else if (offset_in_page >= 256 &&
1342 			 !(chip->options & NAND_BUSWIDTH_16))
1343 			instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1344 	} else {
1345 		/*
1346 		 * Drop the first command if we're dealing with a large page
1347 		 * NAND.
1348 		 */
1349 		op.instrs++;
1350 		op.ninstrs--;
1351 	}
1352 
1353 	ret = nand_exec_op(chip, &op);
1354 	if (!prog || ret)
1355 		return ret;
1356 
1357 	ret = nand_status_op(chip, &status);
1358 	if (ret)
1359 		return ret;
1360 
1361 	return status;
1362 }
1363 
1364 /**
1365  * nand_prog_page_begin_op - starts a PROG PAGE operation
1366  * @chip: The NAND chip
1367  * @page: page to write
1368  * @offset_in_page: offset within the page
1369  * @buf: buffer containing the data to write to the page
1370  * @len: length of the buffer
1371  *
1372  * This function issues the first half of a PROG PAGE operation.
1373  * This function does not select/unselect the CS line.
1374  *
1375  * Returns 0 on success, a negative error code otherwise.
1376  */
1377 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1378 			    unsigned int offset_in_page, const void *buf,
1379 			    unsigned int len)
1380 {
1381 	struct mtd_info *mtd = nand_to_mtd(chip);
1382 
1383 	if (len && !buf)
1384 		return -EINVAL;
1385 
1386 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1387 		return -EINVAL;
1388 
1389 	if (nand_has_exec_op(chip))
1390 		return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1391 					      len, false);
1392 
1393 	chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1394 
1395 	if (buf)
1396 		chip->legacy.write_buf(chip, buf, len);
1397 
1398 	return 0;
1399 }
1400 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1401 
1402 /**
1403  * nand_prog_page_end_op - ends a PROG PAGE operation
1404  * @chip: The NAND chip
1405  *
1406  * This function issues the second half of a PROG PAGE operation.
1407  * This function does not select/unselect the CS line.
1408  *
1409  * Returns 0 on success, a negative error code otherwise.
1410  */
1411 int nand_prog_page_end_op(struct nand_chip *chip)
1412 {
1413 	int ret;
1414 	u8 status;
1415 
1416 	if (nand_has_exec_op(chip)) {
1417 		const struct nand_sdr_timings *sdr =
1418 			nand_get_sdr_timings(&chip->data_interface);
1419 		struct nand_op_instr instrs[] = {
1420 			NAND_OP_CMD(NAND_CMD_PAGEPROG,
1421 				    PSEC_TO_NSEC(sdr->tWB_max)),
1422 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1423 		};
1424 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1425 
1426 		ret = nand_exec_op(chip, &op);
1427 		if (ret)
1428 			return ret;
1429 
1430 		ret = nand_status_op(chip, &status);
1431 		if (ret)
1432 			return ret;
1433 	} else {
1434 		chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1435 		ret = chip->legacy.waitfunc(chip);
1436 		if (ret < 0)
1437 			return ret;
1438 
1439 		status = ret;
1440 	}
1441 
1442 	if (status & NAND_STATUS_FAIL)
1443 		return -EIO;
1444 
1445 	return 0;
1446 }
1447 EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1448 
1449 /**
1450  * nand_prog_page_op - Do a full PROG PAGE operation
1451  * @chip: The NAND chip
1452  * @page: page to write
1453  * @offset_in_page: offset within the page
1454  * @buf: buffer containing the data to write to the page
1455  * @len: length of the buffer
1456  *
1457  * This function issues a full PROG PAGE operation.
1458  * This function does not select/unselect the CS line.
1459  *
1460  * Returns 0 on success, a negative error code otherwise.
1461  */
1462 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1463 		      unsigned int offset_in_page, const void *buf,
1464 		      unsigned int len)
1465 {
1466 	struct mtd_info *mtd = nand_to_mtd(chip);
1467 	int status;
1468 
1469 	if (!len || !buf)
1470 		return -EINVAL;
1471 
1472 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1473 		return -EINVAL;
1474 
1475 	if (nand_has_exec_op(chip)) {
1476 		status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1477 						len, true);
1478 	} else {
1479 		chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1480 				     page);
1481 		chip->legacy.write_buf(chip, buf, len);
1482 		chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1483 		status = chip->legacy.waitfunc(chip);
1484 	}
1485 
1486 	if (status & NAND_STATUS_FAIL)
1487 		return -EIO;
1488 
1489 	return 0;
1490 }
1491 EXPORT_SYMBOL_GPL(nand_prog_page_op);
1492 
1493 /**
1494  * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1495  * @chip: The NAND chip
1496  * @offset_in_page: offset within the page
1497  * @buf: buffer containing the data to send to the NAND
1498  * @len: length of the buffer
1499  * @force_8bit: force 8-bit bus access
1500  *
1501  * This function issues a CHANGE WRITE COLUMN operation.
1502  * This function does not select/unselect the CS line.
1503  *
1504  * Returns 0 on success, a negative error code otherwise.
1505  */
1506 int nand_change_write_column_op(struct nand_chip *chip,
1507 				unsigned int offset_in_page,
1508 				const void *buf, unsigned int len,
1509 				bool force_8bit)
1510 {
1511 	struct mtd_info *mtd = nand_to_mtd(chip);
1512 
1513 	if (len && !buf)
1514 		return -EINVAL;
1515 
1516 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1517 		return -EINVAL;
1518 
1519 	/* Small page NANDs do not support column change. */
1520 	if (mtd->writesize <= 512)
1521 		return -ENOTSUPP;
1522 
1523 	if (nand_has_exec_op(chip)) {
1524 		const struct nand_sdr_timings *sdr =
1525 			nand_get_sdr_timings(&chip->data_interface);
1526 		u8 addrs[2];
1527 		struct nand_op_instr instrs[] = {
1528 			NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1529 			NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1530 			NAND_OP_DATA_OUT(len, buf, 0),
1531 		};
1532 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1533 		int ret;
1534 
1535 		ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1536 		if (ret < 0)
1537 			return ret;
1538 
1539 		instrs[2].ctx.data.force_8bit = force_8bit;
1540 
1541 		/* Drop the DATA_OUT instruction if len is set to 0. */
1542 		if (!len)
1543 			op.ninstrs--;
1544 
1545 		return nand_exec_op(chip, &op);
1546 	}
1547 
1548 	chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1549 	if (len)
1550 		chip->legacy.write_buf(chip, buf, len);
1551 
1552 	return 0;
1553 }
1554 EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1555 
1556 /**
1557  * nand_readid_op - Do a READID operation
1558  * @chip: The NAND chip
1559  * @addr: address cycle to pass after the READID command
1560  * @buf: buffer used to store the ID
1561  * @len: length of the buffer
1562  *
1563  * This function sends a READID command and reads back the ID returned by the
1564  * NAND.
1565  * This function does not select/unselect the CS line.
1566  *
1567  * Returns 0 on success, a negative error code otherwise.
1568  */
1569 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1570 		   unsigned int len)
1571 {
1572 	unsigned int i;
1573 	u8 *id = buf;
1574 
1575 	if (len && !buf)
1576 		return -EINVAL;
1577 
1578 	if (nand_has_exec_op(chip)) {
1579 		const struct nand_sdr_timings *sdr =
1580 			nand_get_sdr_timings(&chip->data_interface);
1581 		struct nand_op_instr instrs[] = {
1582 			NAND_OP_CMD(NAND_CMD_READID, 0),
1583 			NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
1584 			NAND_OP_8BIT_DATA_IN(len, buf, 0),
1585 		};
1586 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1587 
1588 		/* Drop the DATA_IN instruction if len is set to 0. */
1589 		if (!len)
1590 			op.ninstrs--;
1591 
1592 		return nand_exec_op(chip, &op);
1593 	}
1594 
1595 	chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1596 
1597 	for (i = 0; i < len; i++)
1598 		id[i] = chip->legacy.read_byte(chip);
1599 
1600 	return 0;
1601 }
1602 EXPORT_SYMBOL_GPL(nand_readid_op);
1603 
1604 /**
1605  * nand_status_op - Do a STATUS operation
1606  * @chip: The NAND chip
1607  * @status: out variable to store the NAND status
1608  *
1609  * This function sends a STATUS command and reads back the status returned by
1610  * the NAND.
1611  * This function does not select/unselect the CS line.
1612  *
1613  * Returns 0 on success, a negative error code otherwise.
1614  */
1615 int nand_status_op(struct nand_chip *chip, u8 *status)
1616 {
1617 	if (nand_has_exec_op(chip)) {
1618 		const struct nand_sdr_timings *sdr =
1619 			nand_get_sdr_timings(&chip->data_interface);
1620 		struct nand_op_instr instrs[] = {
1621 			NAND_OP_CMD(NAND_CMD_STATUS,
1622 				    PSEC_TO_NSEC(sdr->tADL_min)),
1623 			NAND_OP_8BIT_DATA_IN(1, status, 0),
1624 		};
1625 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1626 
1627 		if (!status)
1628 			op.ninstrs--;
1629 
1630 		return nand_exec_op(chip, &op);
1631 	}
1632 
1633 	chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1634 	if (status)
1635 		*status = chip->legacy.read_byte(chip);
1636 
1637 	return 0;
1638 }
1639 EXPORT_SYMBOL_GPL(nand_status_op);
1640 
1641 /**
1642  * nand_exit_status_op - Exit a STATUS operation
1643  * @chip: The NAND chip
1644  *
1645  * This function sends a READ0 command to cancel the effect of the STATUS
1646  * command to avoid reading only the status until a new read command is sent.
1647  *
1648  * This function does not select/unselect the CS line.
1649  *
1650  * Returns 0 on success, a negative error code otherwise.
1651  */
1652 int nand_exit_status_op(struct nand_chip *chip)
1653 {
1654 	if (nand_has_exec_op(chip)) {
1655 		struct nand_op_instr instrs[] = {
1656 			NAND_OP_CMD(NAND_CMD_READ0, 0),
1657 		};
1658 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1659 
1660 		return nand_exec_op(chip, &op);
1661 	}
1662 
1663 	chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1664 
1665 	return 0;
1666 }
1667 
1668 /**
1669  * nand_erase_op - Do an erase operation
1670  * @chip: The NAND chip
1671  * @eraseblock: block to erase
1672  *
1673  * This function sends an ERASE command and waits for the NAND to be ready
1674  * before returning.
1675  * This function does not select/unselect the CS line.
1676  *
1677  * Returns 0 on success, a negative error code otherwise.
1678  */
1679 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1680 {
1681 	unsigned int page = eraseblock <<
1682 			    (chip->phys_erase_shift - chip->page_shift);
1683 	int ret;
1684 	u8 status;
1685 
1686 	if (nand_has_exec_op(chip)) {
1687 		const struct nand_sdr_timings *sdr =
1688 			nand_get_sdr_timings(&chip->data_interface);
1689 		u8 addrs[3] = {	page, page >> 8, page >> 16 };
1690 		struct nand_op_instr instrs[] = {
1691 			NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1692 			NAND_OP_ADDR(2, addrs, 0),
1693 			NAND_OP_CMD(NAND_CMD_ERASE2,
1694 				    PSEC_TO_MSEC(sdr->tWB_max)),
1695 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
1696 		};
1697 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1698 
1699 		if (chip->options & NAND_ROW_ADDR_3)
1700 			instrs[1].ctx.addr.naddrs++;
1701 
1702 		ret = nand_exec_op(chip, &op);
1703 		if (ret)
1704 			return ret;
1705 
1706 		ret = nand_status_op(chip, &status);
1707 		if (ret)
1708 			return ret;
1709 	} else {
1710 		chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1711 		chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1712 
1713 		ret = chip->legacy.waitfunc(chip);
1714 		if (ret < 0)
1715 			return ret;
1716 
1717 		status = ret;
1718 	}
1719 
1720 	if (status & NAND_STATUS_FAIL)
1721 		return -EIO;
1722 
1723 	return 0;
1724 }
1725 EXPORT_SYMBOL_GPL(nand_erase_op);
1726 
1727 /**
1728  * nand_set_features_op - Do a SET FEATURES operation
1729  * @chip: The NAND chip
1730  * @feature: feature id
1731  * @data: 4 bytes of data
1732  *
1733  * This function sends a SET FEATURES command and waits for the NAND to be
1734  * ready before returning.
1735  * This function does not select/unselect the CS line.
1736  *
1737  * Returns 0 on success, a negative error code otherwise.
1738  */
1739 static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1740 				const void *data)
1741 {
1742 	const u8 *params = data;
1743 	int i, ret;
1744 
1745 	if (nand_has_exec_op(chip)) {
1746 		const struct nand_sdr_timings *sdr =
1747 			nand_get_sdr_timings(&chip->data_interface);
1748 		struct nand_op_instr instrs[] = {
1749 			NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1750 			NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
1751 			NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1752 					      PSEC_TO_NSEC(sdr->tWB_max)),
1753 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
1754 		};
1755 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1756 
1757 		return nand_exec_op(chip, &op);
1758 	}
1759 
1760 	chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1761 	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1762 		chip->legacy.write_byte(chip, params[i]);
1763 
1764 	ret = chip->legacy.waitfunc(chip);
1765 	if (ret < 0)
1766 		return ret;
1767 
1768 	if (ret & NAND_STATUS_FAIL)
1769 		return -EIO;
1770 
1771 	return 0;
1772 }
1773 
1774 /**
1775  * nand_get_features_op - Do a GET FEATURES operation
1776  * @chip: The NAND chip
1777  * @feature: feature id
1778  * @data: 4 bytes of data
1779  *
1780  * This function sends a GET FEATURES command and waits for the NAND to be
1781  * ready before returning.
1782  * This function does not select/unselect the CS line.
1783  *
1784  * Returns 0 on success, a negative error code otherwise.
1785  */
1786 static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1787 				void *data)
1788 {
1789 	u8 *params = data;
1790 	int i;
1791 
1792 	if (nand_has_exec_op(chip)) {
1793 		const struct nand_sdr_timings *sdr =
1794 			nand_get_sdr_timings(&chip->data_interface);
1795 		struct nand_op_instr instrs[] = {
1796 			NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1797 			NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
1798 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
1799 					 PSEC_TO_NSEC(sdr->tRR_min)),
1800 			NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1801 					     data, 0),
1802 		};
1803 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1804 
1805 		return nand_exec_op(chip, &op);
1806 	}
1807 
1808 	chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1809 	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1810 		params[i] = chip->legacy.read_byte(chip);
1811 
1812 	return 0;
1813 }
1814 
1815 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1816 			    unsigned int delay_ns)
1817 {
1818 	if (nand_has_exec_op(chip)) {
1819 		struct nand_op_instr instrs[] = {
1820 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1821 					 PSEC_TO_NSEC(delay_ns)),
1822 		};
1823 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1824 
1825 		return nand_exec_op(chip, &op);
1826 	}
1827 
1828 	/* Apply delay or wait for ready/busy pin */
1829 	if (!chip->legacy.dev_ready)
1830 		udelay(chip->legacy.chip_delay);
1831 	else
1832 		nand_wait_ready(chip);
1833 
1834 	return 0;
1835 }
1836 
1837 /**
1838  * nand_reset_op - Do a reset operation
1839  * @chip: The NAND chip
1840  *
1841  * This function sends a RESET command and waits for the NAND to be ready
1842  * before returning.
1843  * This function does not select/unselect the CS line.
1844  *
1845  * Returns 0 on success, a negative error code otherwise.
1846  */
1847 int nand_reset_op(struct nand_chip *chip)
1848 {
1849 	if (nand_has_exec_op(chip)) {
1850 		const struct nand_sdr_timings *sdr =
1851 			nand_get_sdr_timings(&chip->data_interface);
1852 		struct nand_op_instr instrs[] = {
1853 			NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
1854 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
1855 		};
1856 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1857 
1858 		return nand_exec_op(chip, &op);
1859 	}
1860 
1861 	chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
1862 
1863 	return 0;
1864 }
1865 EXPORT_SYMBOL_GPL(nand_reset_op);
1866 
1867 /**
1868  * nand_read_data_op - Read data from the NAND
1869  * @chip: The NAND chip
1870  * @buf: buffer used to store the data
1871  * @len: length of the buffer
1872  * @force_8bit: force 8-bit bus access
1873  *
1874  * This function does a raw data read on the bus. Usually used after launching
1875  * another NAND operation like nand_read_page_op().
1876  * This function does not select/unselect the CS line.
1877  *
1878  * Returns 0 on success, a negative error code otherwise.
1879  */
1880 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1881 		      bool force_8bit)
1882 {
1883 	if (!len || !buf)
1884 		return -EINVAL;
1885 
1886 	if (nand_has_exec_op(chip)) {
1887 		struct nand_op_instr instrs[] = {
1888 			NAND_OP_DATA_IN(len, buf, 0),
1889 		};
1890 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1891 
1892 		instrs[0].ctx.data.force_8bit = force_8bit;
1893 
1894 		return nand_exec_op(chip, &op);
1895 	}
1896 
1897 	if (force_8bit) {
1898 		u8 *p = buf;
1899 		unsigned int i;
1900 
1901 		for (i = 0; i < len; i++)
1902 			p[i] = chip->legacy.read_byte(chip);
1903 	} else {
1904 		chip->legacy.read_buf(chip, buf, len);
1905 	}
1906 
1907 	return 0;
1908 }
1909 EXPORT_SYMBOL_GPL(nand_read_data_op);
1910 
1911 /**
1912  * nand_write_data_op - Write data from the NAND
1913  * @chip: The NAND chip
1914  * @buf: buffer containing the data to send on the bus
1915  * @len: length of the buffer
1916  * @force_8bit: force 8-bit bus access
1917  *
1918  * This function does a raw data write on the bus. Usually used after launching
1919  * another NAND operation like nand_write_page_begin_op().
1920  * This function does not select/unselect the CS line.
1921  *
1922  * Returns 0 on success, a negative error code otherwise.
1923  */
1924 int nand_write_data_op(struct nand_chip *chip, const void *buf,
1925 		       unsigned int len, bool force_8bit)
1926 {
1927 	if (!len || !buf)
1928 		return -EINVAL;
1929 
1930 	if (nand_has_exec_op(chip)) {
1931 		struct nand_op_instr instrs[] = {
1932 			NAND_OP_DATA_OUT(len, buf, 0),
1933 		};
1934 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1935 
1936 		instrs[0].ctx.data.force_8bit = force_8bit;
1937 
1938 		return nand_exec_op(chip, &op);
1939 	}
1940 
1941 	if (force_8bit) {
1942 		const u8 *p = buf;
1943 		unsigned int i;
1944 
1945 		for (i = 0; i < len; i++)
1946 			chip->legacy.write_byte(chip, p[i]);
1947 	} else {
1948 		chip->legacy.write_buf(chip, buf, len);
1949 	}
1950 
1951 	return 0;
1952 }
1953 EXPORT_SYMBOL_GPL(nand_write_data_op);
1954 
1955 /**
1956  * struct nand_op_parser_ctx - Context used by the parser
1957  * @instrs: array of all the instructions that must be addressed
1958  * @ninstrs: length of the @instrs array
1959  * @subop: Sub-operation to be passed to the NAND controller
1960  *
1961  * This structure is used by the core to split NAND operations into
1962  * sub-operations that can be handled by the NAND controller.
1963  */
1964 struct nand_op_parser_ctx {
1965 	const struct nand_op_instr *instrs;
1966 	unsigned int ninstrs;
1967 	struct nand_subop subop;
1968 };
1969 
1970 /**
1971  * nand_op_parser_must_split_instr - Checks if an instruction must be split
1972  * @pat: the parser pattern element that matches @instr
1973  * @instr: pointer to the instruction to check
1974  * @start_offset: this is an in/out parameter. If @instr has already been
1975  *		  split, then @start_offset is the offset from which to start
1976  *		  (either an address cycle or an offset in the data buffer).
1977  *		  Conversely, if the function returns true (ie. instr must be
1978  *		  split), this parameter is updated to point to the first
1979  *		  data/address cycle that has not been taken care of.
1980  *
1981  * Some NAND controllers are limited and cannot send X address cycles with a
1982  * unique operation, or cannot read/write more than Y bytes at the same time.
1983  * In this case, split the instruction that does not fit in a single
1984  * controller-operation into two or more chunks.
1985  *
1986  * Returns true if the instruction must be split, false otherwise.
1987  * The @start_offset parameter is also updated to the offset at which the next
1988  * bundle of instruction must start (if an address or a data instruction).
1989  */
1990 static bool
1991 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
1992 				const struct nand_op_instr *instr,
1993 				unsigned int *start_offset)
1994 {
1995 	switch (pat->type) {
1996 	case NAND_OP_ADDR_INSTR:
1997 		if (!pat->ctx.addr.maxcycles)
1998 			break;
1999 
2000 		if (instr->ctx.addr.naddrs - *start_offset >
2001 		    pat->ctx.addr.maxcycles) {
2002 			*start_offset += pat->ctx.addr.maxcycles;
2003 			return true;
2004 		}
2005 		break;
2006 
2007 	case NAND_OP_DATA_IN_INSTR:
2008 	case NAND_OP_DATA_OUT_INSTR:
2009 		if (!pat->ctx.data.maxlen)
2010 			break;
2011 
2012 		if (instr->ctx.data.len - *start_offset >
2013 		    pat->ctx.data.maxlen) {
2014 			*start_offset += pat->ctx.data.maxlen;
2015 			return true;
2016 		}
2017 		break;
2018 
2019 	default:
2020 		break;
2021 	}
2022 
2023 	return false;
2024 }
2025 
2026 /**
2027  * nand_op_parser_match_pat - Checks if a pattern matches the instructions
2028  *			      remaining in the parser context
2029  * @pat: the pattern to test
2030  * @ctx: the parser context structure to match with the pattern @pat
2031  *
2032  * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2033  * Returns true if this is the case, false ortherwise. When true is returned,
2034  * @ctx->subop is updated with the set of instructions to be passed to the
2035  * controller driver.
2036  */
2037 static bool
2038 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2039 			 struct nand_op_parser_ctx *ctx)
2040 {
2041 	unsigned int instr_offset = ctx->subop.first_instr_start_off;
2042 	const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2043 	const struct nand_op_instr *instr = ctx->subop.instrs;
2044 	unsigned int i, ninstrs;
2045 
2046 	for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2047 		/*
2048 		 * The pattern instruction does not match the operation
2049 		 * instruction. If the instruction is marked optional in the
2050 		 * pattern definition, we skip the pattern element and continue
2051 		 * to the next one. If the element is mandatory, there's no
2052 		 * match and we can return false directly.
2053 		 */
2054 		if (instr->type != pat->elems[i].type) {
2055 			if (!pat->elems[i].optional)
2056 				return false;
2057 
2058 			continue;
2059 		}
2060 
2061 		/*
2062 		 * Now check the pattern element constraints. If the pattern is
2063 		 * not able to handle the whole instruction in a single step,
2064 		 * we have to split it.
2065 		 * The last_instr_end_off value comes back updated to point to
2066 		 * the position where we have to split the instruction (the
2067 		 * start of the next subop chunk).
2068 		 */
2069 		if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2070 						    &instr_offset)) {
2071 			ninstrs++;
2072 			i++;
2073 			break;
2074 		}
2075 
2076 		instr++;
2077 		ninstrs++;
2078 		instr_offset = 0;
2079 	}
2080 
2081 	/*
2082 	 * This can happen if all instructions of a pattern are optional.
2083 	 * Still, if there's not at least one instruction handled by this
2084 	 * pattern, this is not a match, and we should try the next one (if
2085 	 * any).
2086 	 */
2087 	if (!ninstrs)
2088 		return false;
2089 
2090 	/*
2091 	 * We had a match on the pattern head, but the pattern may be longer
2092 	 * than the instructions we're asked to execute. We need to make sure
2093 	 * there's no mandatory elements in the pattern tail.
2094 	 */
2095 	for (; i < pat->nelems; i++) {
2096 		if (!pat->elems[i].optional)
2097 			return false;
2098 	}
2099 
2100 	/*
2101 	 * We have a match: update the subop structure accordingly and return
2102 	 * true.
2103 	 */
2104 	ctx->subop.ninstrs = ninstrs;
2105 	ctx->subop.last_instr_end_off = instr_offset;
2106 
2107 	return true;
2108 }
2109 
2110 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2111 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2112 {
2113 	const struct nand_op_instr *instr;
2114 	char *prefix = "      ";
2115 	unsigned int i;
2116 
2117 	pr_debug("executing subop:\n");
2118 
2119 	for (i = 0; i < ctx->ninstrs; i++) {
2120 		instr = &ctx->instrs[i];
2121 
2122 		if (instr == &ctx->subop.instrs[0])
2123 			prefix = "    ->";
2124 
2125 		switch (instr->type) {
2126 		case NAND_OP_CMD_INSTR:
2127 			pr_debug("%sCMD      [0x%02x]\n", prefix,
2128 				 instr->ctx.cmd.opcode);
2129 			break;
2130 		case NAND_OP_ADDR_INSTR:
2131 			pr_debug("%sADDR     [%d cyc: %*ph]\n", prefix,
2132 				 instr->ctx.addr.naddrs,
2133 				 instr->ctx.addr.naddrs < 64 ?
2134 				 instr->ctx.addr.naddrs : 64,
2135 				 instr->ctx.addr.addrs);
2136 			break;
2137 		case NAND_OP_DATA_IN_INSTR:
2138 			pr_debug("%sDATA_IN  [%d B%s]\n", prefix,
2139 				 instr->ctx.data.len,
2140 				 instr->ctx.data.force_8bit ?
2141 				 ", force 8-bit" : "");
2142 			break;
2143 		case NAND_OP_DATA_OUT_INSTR:
2144 			pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
2145 				 instr->ctx.data.len,
2146 				 instr->ctx.data.force_8bit ?
2147 				 ", force 8-bit" : "");
2148 			break;
2149 		case NAND_OP_WAITRDY_INSTR:
2150 			pr_debug("%sWAITRDY  [max %d ms]\n", prefix,
2151 				 instr->ctx.waitrdy.timeout_ms);
2152 			break;
2153 		}
2154 
2155 		if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2156 			prefix = "      ";
2157 	}
2158 }
2159 #else
2160 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2161 {
2162 	/* NOP */
2163 }
2164 #endif
2165 
2166 /**
2167  * nand_op_parser_exec_op - exec_op parser
2168  * @chip: the NAND chip
2169  * @parser: patterns description provided by the controller driver
2170  * @op: the NAND operation to address
2171  * @check_only: when true, the function only checks if @op can be handled but
2172  *		does not execute the operation
2173  *
2174  * Helper function designed to ease integration of NAND controller drivers that
2175  * only support a limited set of instruction sequences. The supported sequences
2176  * are described in @parser, and the framework takes care of splitting @op into
2177  * multiple sub-operations (if required) and pass them back to the ->exec()
2178  * callback of the matching pattern if @check_only is set to false.
2179  *
2180  * NAND controller drivers should call this function from their own ->exec_op()
2181  * implementation.
2182  *
2183  * Returns 0 on success, a negative error code otherwise. A failure can be
2184  * caused by an unsupported operation (none of the supported patterns is able
2185  * to handle the requested operation), or an error returned by one of the
2186  * matching pattern->exec() hook.
2187  */
2188 int nand_op_parser_exec_op(struct nand_chip *chip,
2189 			   const struct nand_op_parser *parser,
2190 			   const struct nand_operation *op, bool check_only)
2191 {
2192 	struct nand_op_parser_ctx ctx = {
2193 		.subop.instrs = op->instrs,
2194 		.instrs = op->instrs,
2195 		.ninstrs = op->ninstrs,
2196 	};
2197 	unsigned int i;
2198 
2199 	while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2200 		int ret;
2201 
2202 		for (i = 0; i < parser->npatterns; i++) {
2203 			const struct nand_op_parser_pattern *pattern;
2204 
2205 			pattern = &parser->patterns[i];
2206 			if (!nand_op_parser_match_pat(pattern, &ctx))
2207 				continue;
2208 
2209 			nand_op_parser_trace(&ctx);
2210 
2211 			if (check_only)
2212 				break;
2213 
2214 			ret = pattern->exec(chip, &ctx.subop);
2215 			if (ret)
2216 				return ret;
2217 
2218 			break;
2219 		}
2220 
2221 		if (i == parser->npatterns) {
2222 			pr_debug("->exec_op() parser: pattern not found!\n");
2223 			return -ENOTSUPP;
2224 		}
2225 
2226 		/*
2227 		 * Update the context structure by pointing to the start of the
2228 		 * next subop.
2229 		 */
2230 		ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2231 		if (ctx.subop.last_instr_end_off)
2232 			ctx.subop.instrs -= 1;
2233 
2234 		ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2235 	}
2236 
2237 	return 0;
2238 }
2239 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2240 
2241 static bool nand_instr_is_data(const struct nand_op_instr *instr)
2242 {
2243 	return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2244 			 instr->type == NAND_OP_DATA_OUT_INSTR);
2245 }
2246 
2247 static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2248 				      unsigned int instr_idx)
2249 {
2250 	return subop && instr_idx < subop->ninstrs;
2251 }
2252 
2253 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2254 					     unsigned int instr_idx)
2255 {
2256 	if (instr_idx)
2257 		return 0;
2258 
2259 	return subop->first_instr_start_off;
2260 }
2261 
2262 /**
2263  * nand_subop_get_addr_start_off - Get the start offset in an address array
2264  * @subop: The entire sub-operation
2265  * @instr_idx: Index of the instruction inside the sub-operation
2266  *
2267  * During driver development, one could be tempted to directly use the
2268  * ->addr.addrs field of address instructions. This is wrong as address
2269  * instructions might be split.
2270  *
2271  * Given an address instruction, returns the offset of the first cycle to issue.
2272  */
2273 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2274 					   unsigned int instr_idx)
2275 {
2276 	if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2277 		    subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2278 		return 0;
2279 
2280 	return nand_subop_get_start_off(subop, instr_idx);
2281 }
2282 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2283 
2284 /**
2285  * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2286  * @subop: The entire sub-operation
2287  * @instr_idx: Index of the instruction inside the sub-operation
2288  *
2289  * During driver development, one could be tempted to directly use the
2290  * ->addr->naddrs field of a data instruction. This is wrong as instructions
2291  * might be split.
2292  *
2293  * Given an address instruction, returns the number of address cycle to issue.
2294  */
2295 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2296 					 unsigned int instr_idx)
2297 {
2298 	int start_off, end_off;
2299 
2300 	if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2301 		    subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2302 		return 0;
2303 
2304 	start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2305 
2306 	if (instr_idx == subop->ninstrs - 1 &&
2307 	    subop->last_instr_end_off)
2308 		end_off = subop->last_instr_end_off;
2309 	else
2310 		end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2311 
2312 	return end_off - start_off;
2313 }
2314 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2315 
2316 /**
2317  * nand_subop_get_data_start_off - Get the start offset in a data array
2318  * @subop: The entire sub-operation
2319  * @instr_idx: Index of the instruction inside the sub-operation
2320  *
2321  * During driver development, one could be tempted to directly use the
2322  * ->data->buf.{in,out} field of data instructions. This is wrong as data
2323  * instructions might be split.
2324  *
2325  * Given a data instruction, returns the offset to start from.
2326  */
2327 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2328 					   unsigned int instr_idx)
2329 {
2330 	if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2331 		    !nand_instr_is_data(&subop->instrs[instr_idx])))
2332 		return 0;
2333 
2334 	return nand_subop_get_start_off(subop, instr_idx);
2335 }
2336 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2337 
2338 /**
2339  * nand_subop_get_data_len - Get the number of bytes to retrieve
2340  * @subop: The entire sub-operation
2341  * @instr_idx: Index of the instruction inside the sub-operation
2342  *
2343  * During driver development, one could be tempted to directly use the
2344  * ->data->len field of a data instruction. This is wrong as data instructions
2345  * might be split.
2346  *
2347  * Returns the length of the chunk of data to send/receive.
2348  */
2349 unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2350 				     unsigned int instr_idx)
2351 {
2352 	int start_off = 0, end_off;
2353 
2354 	if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2355 		    !nand_instr_is_data(&subop->instrs[instr_idx])))
2356 		return 0;
2357 
2358 	start_off = nand_subop_get_data_start_off(subop, instr_idx);
2359 
2360 	if (instr_idx == subop->ninstrs - 1 &&
2361 	    subop->last_instr_end_off)
2362 		end_off = subop->last_instr_end_off;
2363 	else
2364 		end_off = subop->instrs[instr_idx].ctx.data.len;
2365 
2366 	return end_off - start_off;
2367 }
2368 EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2369 
2370 /**
2371  * nand_reset - Reset and initialize a NAND device
2372  * @chip: The NAND chip
2373  * @chipnr: Internal die id
2374  *
2375  * Save the timings data structure, then apply SDR timings mode 0 (see
2376  * nand_reset_data_interface for details), do the reset operation, and
2377  * apply back the previous timings.
2378  *
2379  * Returns 0 on success, a negative error code otherwise.
2380  */
2381 int nand_reset(struct nand_chip *chip, int chipnr)
2382 {
2383 	struct nand_data_interface saved_data_intf = chip->data_interface;
2384 	int ret;
2385 
2386 	ret = nand_reset_data_interface(chip, chipnr);
2387 	if (ret)
2388 		return ret;
2389 
2390 	/*
2391 	 * The CS line has to be released before we can apply the new NAND
2392 	 * interface settings, hence this weird nand_select_target()
2393 	 * nand_deselect_target() dance.
2394 	 */
2395 	nand_select_target(chip, chipnr);
2396 	ret = nand_reset_op(chip);
2397 	nand_deselect_target(chip);
2398 	if (ret)
2399 		return ret;
2400 
2401 	/*
2402 	 * A nand_reset_data_interface() put both the NAND chip and the NAND
2403 	 * controller in timings mode 0. If the default mode for this chip is
2404 	 * also 0, no need to proceed to the change again. Plus, at probe time,
2405 	 * nand_setup_data_interface() uses ->set/get_features() which would
2406 	 * fail anyway as the parameter page is not available yet.
2407 	 */
2408 	if (!chip->onfi_timing_mode_default)
2409 		return 0;
2410 
2411 	chip->data_interface = saved_data_intf;
2412 	ret = nand_setup_data_interface(chip, chipnr);
2413 	if (ret)
2414 		return ret;
2415 
2416 	return 0;
2417 }
2418 EXPORT_SYMBOL_GPL(nand_reset);
2419 
2420 /**
2421  * nand_get_features - wrapper to perform a GET_FEATURE
2422  * @chip: NAND chip info structure
2423  * @addr: feature address
2424  * @subfeature_param: the subfeature parameters, a four bytes array
2425  *
2426  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2427  * operation cannot be handled.
2428  */
2429 int nand_get_features(struct nand_chip *chip, int addr,
2430 		      u8 *subfeature_param)
2431 {
2432 	if (!nand_supports_get_features(chip, addr))
2433 		return -ENOTSUPP;
2434 
2435 	if (chip->legacy.get_features)
2436 		return chip->legacy.get_features(chip, addr, subfeature_param);
2437 
2438 	return nand_get_features_op(chip, addr, subfeature_param);
2439 }
2440 
2441 /**
2442  * nand_set_features - wrapper to perform a SET_FEATURE
2443  * @chip: NAND chip info structure
2444  * @addr: feature address
2445  * @subfeature_param: the subfeature parameters, a four bytes array
2446  *
2447  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2448  * operation cannot be handled.
2449  */
2450 int nand_set_features(struct nand_chip *chip, int addr,
2451 		      u8 *subfeature_param)
2452 {
2453 	if (!nand_supports_set_features(chip, addr))
2454 		return -ENOTSUPP;
2455 
2456 	if (chip->legacy.set_features)
2457 		return chip->legacy.set_features(chip, addr, subfeature_param);
2458 
2459 	return nand_set_features_op(chip, addr, subfeature_param);
2460 }
2461 
2462 /**
2463  * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2464  * @buf: buffer to test
2465  * @len: buffer length
2466  * @bitflips_threshold: maximum number of bitflips
2467  *
2468  * Check if a buffer contains only 0xff, which means the underlying region
2469  * has been erased and is ready to be programmed.
2470  * The bitflips_threshold specify the maximum number of bitflips before
2471  * considering the region is not erased.
2472  * Note: The logic of this function has been extracted from the memweight
2473  * implementation, except that nand_check_erased_buf function exit before
2474  * testing the whole buffer if the number of bitflips exceed the
2475  * bitflips_threshold value.
2476  *
2477  * Returns a positive number of bitflips less than or equal to
2478  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2479  * threshold.
2480  */
2481 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2482 {
2483 	const unsigned char *bitmap = buf;
2484 	int bitflips = 0;
2485 	int weight;
2486 
2487 	for (; len && ((uintptr_t)bitmap) % sizeof(long);
2488 	     len--, bitmap++) {
2489 		weight = hweight8(*bitmap);
2490 		bitflips += BITS_PER_BYTE - weight;
2491 		if (unlikely(bitflips > bitflips_threshold))
2492 			return -EBADMSG;
2493 	}
2494 
2495 	for (; len >= sizeof(long);
2496 	     len -= sizeof(long), bitmap += sizeof(long)) {
2497 		unsigned long d = *((unsigned long *)bitmap);
2498 		if (d == ~0UL)
2499 			continue;
2500 		weight = hweight_long(d);
2501 		bitflips += BITS_PER_LONG - weight;
2502 		if (unlikely(bitflips > bitflips_threshold))
2503 			return -EBADMSG;
2504 	}
2505 
2506 	for (; len > 0; len--, bitmap++) {
2507 		weight = hweight8(*bitmap);
2508 		bitflips += BITS_PER_BYTE - weight;
2509 		if (unlikely(bitflips > bitflips_threshold))
2510 			return -EBADMSG;
2511 	}
2512 
2513 	return bitflips;
2514 }
2515 
2516 /**
2517  * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2518  *				 0xff data
2519  * @data: data buffer to test
2520  * @datalen: data length
2521  * @ecc: ECC buffer
2522  * @ecclen: ECC length
2523  * @extraoob: extra OOB buffer
2524  * @extraooblen: extra OOB length
2525  * @bitflips_threshold: maximum number of bitflips
2526  *
2527  * Check if a data buffer and its associated ECC and OOB data contains only
2528  * 0xff pattern, which means the underlying region has been erased and is
2529  * ready to be programmed.
2530  * The bitflips_threshold specify the maximum number of bitflips before
2531  * considering the region as not erased.
2532  *
2533  * Note:
2534  * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2535  *    different from the NAND page size. When fixing bitflips, ECC engines will
2536  *    report the number of errors per chunk, and the NAND core infrastructure
2537  *    expect you to return the maximum number of bitflips for the whole page.
2538  *    This is why you should always use this function on a single chunk and
2539  *    not on the whole page. After checking each chunk you should update your
2540  *    max_bitflips value accordingly.
2541  * 2/ When checking for bitflips in erased pages you should not only check
2542  *    the payload data but also their associated ECC data, because a user might
2543  *    have programmed almost all bits to 1 but a few. In this case, we
2544  *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
2545  *    this case.
2546  * 3/ The extraoob argument is optional, and should be used if some of your OOB
2547  *    data are protected by the ECC engine.
2548  *    It could also be used if you support subpages and want to attach some
2549  *    extra OOB data to an ECC chunk.
2550  *
2551  * Returns a positive number of bitflips less than or equal to
2552  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2553  * threshold. In case of success, the passed buffers are filled with 0xff.
2554  */
2555 int nand_check_erased_ecc_chunk(void *data, int datalen,
2556 				void *ecc, int ecclen,
2557 				void *extraoob, int extraooblen,
2558 				int bitflips_threshold)
2559 {
2560 	int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2561 
2562 	data_bitflips = nand_check_erased_buf(data, datalen,
2563 					      bitflips_threshold);
2564 	if (data_bitflips < 0)
2565 		return data_bitflips;
2566 
2567 	bitflips_threshold -= data_bitflips;
2568 
2569 	ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2570 	if (ecc_bitflips < 0)
2571 		return ecc_bitflips;
2572 
2573 	bitflips_threshold -= ecc_bitflips;
2574 
2575 	extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2576 						  bitflips_threshold);
2577 	if (extraoob_bitflips < 0)
2578 		return extraoob_bitflips;
2579 
2580 	if (data_bitflips)
2581 		memset(data, 0xff, datalen);
2582 
2583 	if (ecc_bitflips)
2584 		memset(ecc, 0xff, ecclen);
2585 
2586 	if (extraoob_bitflips)
2587 		memset(extraoob, 0xff, extraooblen);
2588 
2589 	return data_bitflips + ecc_bitflips + extraoob_bitflips;
2590 }
2591 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2592 
2593 /**
2594  * nand_read_page_raw_notsupp - dummy read raw page function
2595  * @chip: nand chip info structure
2596  * @buf: buffer to store read data
2597  * @oob_required: caller requires OOB data read to chip->oob_poi
2598  * @page: page number to read
2599  *
2600  * Returns -ENOTSUPP unconditionally.
2601  */
2602 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2603 			       int oob_required, int page)
2604 {
2605 	return -ENOTSUPP;
2606 }
2607 
2608 /**
2609  * nand_read_page_raw - [INTERN] read raw page data without ecc
2610  * @chip: nand chip info structure
2611  * @buf: buffer to store read data
2612  * @oob_required: caller requires OOB data read to chip->oob_poi
2613  * @page: page number to read
2614  *
2615  * Not for syndrome calculating ECC controllers, which use a special oob layout.
2616  */
2617 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2618 		       int page)
2619 {
2620 	struct mtd_info *mtd = nand_to_mtd(chip);
2621 	int ret;
2622 
2623 	ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2624 	if (ret)
2625 		return ret;
2626 
2627 	if (oob_required) {
2628 		ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2629 					false);
2630 		if (ret)
2631 			return ret;
2632 	}
2633 
2634 	return 0;
2635 }
2636 EXPORT_SYMBOL(nand_read_page_raw);
2637 
2638 /**
2639  * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
2640  * @chip: nand chip info structure
2641  * @buf: buffer to store read data
2642  * @oob_required: caller requires OOB data read to chip->oob_poi
2643  * @page: page number to read
2644  *
2645  * We need a special oob layout and handling even when OOB isn't used.
2646  */
2647 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2648 				       int oob_required, int page)
2649 {
2650 	struct mtd_info *mtd = nand_to_mtd(chip);
2651 	int eccsize = chip->ecc.size;
2652 	int eccbytes = chip->ecc.bytes;
2653 	uint8_t *oob = chip->oob_poi;
2654 	int steps, size, ret;
2655 
2656 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
2657 	if (ret)
2658 		return ret;
2659 
2660 	for (steps = chip->ecc.steps; steps > 0; steps--) {
2661 		ret = nand_read_data_op(chip, buf, eccsize, false);
2662 		if (ret)
2663 			return ret;
2664 
2665 		buf += eccsize;
2666 
2667 		if (chip->ecc.prepad) {
2668 			ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2669 						false);
2670 			if (ret)
2671 				return ret;
2672 
2673 			oob += chip->ecc.prepad;
2674 		}
2675 
2676 		ret = nand_read_data_op(chip, oob, eccbytes, false);
2677 		if (ret)
2678 			return ret;
2679 
2680 		oob += eccbytes;
2681 
2682 		if (chip->ecc.postpad) {
2683 			ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2684 						false);
2685 			if (ret)
2686 				return ret;
2687 
2688 			oob += chip->ecc.postpad;
2689 		}
2690 	}
2691 
2692 	size = mtd->oobsize - (oob - chip->oob_poi);
2693 	if (size) {
2694 		ret = nand_read_data_op(chip, oob, size, false);
2695 		if (ret)
2696 			return ret;
2697 	}
2698 
2699 	return 0;
2700 }
2701 
2702 /**
2703  * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
2704  * @chip: nand chip info structure
2705  * @buf: buffer to store read data
2706  * @oob_required: caller requires OOB data read to chip->oob_poi
2707  * @page: page number to read
2708  */
2709 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2710 				int oob_required, int page)
2711 {
2712 	struct mtd_info *mtd = nand_to_mtd(chip);
2713 	int i, eccsize = chip->ecc.size, ret;
2714 	int eccbytes = chip->ecc.bytes;
2715 	int eccsteps = chip->ecc.steps;
2716 	uint8_t *p = buf;
2717 	uint8_t *ecc_calc = chip->ecc.calc_buf;
2718 	uint8_t *ecc_code = chip->ecc.code_buf;
2719 	unsigned int max_bitflips = 0;
2720 
2721 	chip->ecc.read_page_raw(chip, buf, 1, page);
2722 
2723 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2724 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
2725 
2726 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2727 					 chip->ecc.total);
2728 	if (ret)
2729 		return ret;
2730 
2731 	eccsteps = chip->ecc.steps;
2732 	p = buf;
2733 
2734 	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2735 		int stat;
2736 
2737 		stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2738 		if (stat < 0) {
2739 			mtd->ecc_stats.failed++;
2740 		} else {
2741 			mtd->ecc_stats.corrected += stat;
2742 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2743 		}
2744 	}
2745 	return max_bitflips;
2746 }
2747 
2748 /**
2749  * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
2750  * @chip: nand chip info structure
2751  * @data_offs: offset of requested data within the page
2752  * @readlen: data length
2753  * @bufpoi: buffer to store read data
2754  * @page: page number to read
2755  */
2756 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2757 			     uint32_t readlen, uint8_t *bufpoi, int page)
2758 {
2759 	struct mtd_info *mtd = nand_to_mtd(chip);
2760 	int start_step, end_step, num_steps, ret;
2761 	uint8_t *p;
2762 	int data_col_addr, i, gaps = 0;
2763 	int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
2764 	int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
2765 	int index, section = 0;
2766 	unsigned int max_bitflips = 0;
2767 	struct mtd_oob_region oobregion = { };
2768 
2769 	/* Column address within the page aligned to ECC size (256bytes) */
2770 	start_step = data_offs / chip->ecc.size;
2771 	end_step = (data_offs + readlen - 1) / chip->ecc.size;
2772 	num_steps = end_step - start_step + 1;
2773 	index = start_step * chip->ecc.bytes;
2774 
2775 	/* Data size aligned to ECC ecc.size */
2776 	datafrag_len = num_steps * chip->ecc.size;
2777 	eccfrag_len = num_steps * chip->ecc.bytes;
2778 
2779 	data_col_addr = start_step * chip->ecc.size;
2780 	/* If we read not a page aligned data */
2781 	p = bufpoi + data_col_addr;
2782 	ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
2783 	if (ret)
2784 		return ret;
2785 
2786 	/* Calculate ECC */
2787 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
2788 		chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
2789 
2790 	/*
2791 	 * The performance is faster if we position offsets according to
2792 	 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
2793 	 */
2794 	ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
2795 	if (ret)
2796 		return ret;
2797 
2798 	if (oobregion.length < eccfrag_len)
2799 		gaps = 1;
2800 
2801 	if (gaps) {
2802 		ret = nand_change_read_column_op(chip, mtd->writesize,
2803 						 chip->oob_poi, mtd->oobsize,
2804 						 false);
2805 		if (ret)
2806 			return ret;
2807 	} else {
2808 		/*
2809 		 * Send the command to read the particular ECC bytes take care
2810 		 * about buswidth alignment in read_buf.
2811 		 */
2812 		aligned_pos = oobregion.offset & ~(busw - 1);
2813 		aligned_len = eccfrag_len;
2814 		if (oobregion.offset & (busw - 1))
2815 			aligned_len++;
2816 		if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
2817 		    (busw - 1))
2818 			aligned_len++;
2819 
2820 		ret = nand_change_read_column_op(chip,
2821 						 mtd->writesize + aligned_pos,
2822 						 &chip->oob_poi[aligned_pos],
2823 						 aligned_len, false);
2824 		if (ret)
2825 			return ret;
2826 	}
2827 
2828 	ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
2829 					 chip->oob_poi, index, eccfrag_len);
2830 	if (ret)
2831 		return ret;
2832 
2833 	p = bufpoi + data_col_addr;
2834 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
2835 		int stat;
2836 
2837 		stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
2838 					 &chip->ecc.calc_buf[i]);
2839 		if (stat == -EBADMSG &&
2840 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2841 			/* check for empty pages with bitflips */
2842 			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2843 						&chip->ecc.code_buf[i],
2844 						chip->ecc.bytes,
2845 						NULL, 0,
2846 						chip->ecc.strength);
2847 		}
2848 
2849 		if (stat < 0) {
2850 			mtd->ecc_stats.failed++;
2851 		} else {
2852 			mtd->ecc_stats.corrected += stat;
2853 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2854 		}
2855 	}
2856 	return max_bitflips;
2857 }
2858 
2859 /**
2860  * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
2861  * @chip: nand chip info structure
2862  * @buf: buffer to store read data
2863  * @oob_required: caller requires OOB data read to chip->oob_poi
2864  * @page: page number to read
2865  *
2866  * Not for syndrome calculating ECC controllers which need a special oob layout.
2867  */
2868 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
2869 				int oob_required, int page)
2870 {
2871 	struct mtd_info *mtd = nand_to_mtd(chip);
2872 	int i, eccsize = chip->ecc.size, ret;
2873 	int eccbytes = chip->ecc.bytes;
2874 	int eccsteps = chip->ecc.steps;
2875 	uint8_t *p = buf;
2876 	uint8_t *ecc_calc = chip->ecc.calc_buf;
2877 	uint8_t *ecc_code = chip->ecc.code_buf;
2878 	unsigned int max_bitflips = 0;
2879 
2880 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
2881 	if (ret)
2882 		return ret;
2883 
2884 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2885 		chip->ecc.hwctl(chip, NAND_ECC_READ);
2886 
2887 		ret = nand_read_data_op(chip, p, eccsize, false);
2888 		if (ret)
2889 			return ret;
2890 
2891 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
2892 	}
2893 
2894 	ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
2895 	if (ret)
2896 		return ret;
2897 
2898 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2899 					 chip->ecc.total);
2900 	if (ret)
2901 		return ret;
2902 
2903 	eccsteps = chip->ecc.steps;
2904 	p = buf;
2905 
2906 	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2907 		int stat;
2908 
2909 		stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2910 		if (stat == -EBADMSG &&
2911 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2912 			/* check for empty pages with bitflips */
2913 			stat = nand_check_erased_ecc_chunk(p, eccsize,
2914 						&ecc_code[i], eccbytes,
2915 						NULL, 0,
2916 						chip->ecc.strength);
2917 		}
2918 
2919 		if (stat < 0) {
2920 			mtd->ecc_stats.failed++;
2921 		} else {
2922 			mtd->ecc_stats.corrected += stat;
2923 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2924 		}
2925 	}
2926 	return max_bitflips;
2927 }
2928 
2929 /**
2930  * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
2931  * @chip: nand chip info structure
2932  * @buf: buffer to store read data
2933  * @oob_required: caller requires OOB data read to chip->oob_poi
2934  * @page: page number to read
2935  *
2936  * Hardware ECC for large page chips, require OOB to be read first. For this
2937  * ECC mode, the write_page method is re-used from ECC_HW. These methods
2938  * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
2939  * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
2940  * the data area, by overwriting the NAND manufacturer bad block markings.
2941  */
2942 static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
2943 					  int oob_required, int page)
2944 {
2945 	struct mtd_info *mtd = nand_to_mtd(chip);
2946 	int i, eccsize = chip->ecc.size, ret;
2947 	int eccbytes = chip->ecc.bytes;
2948 	int eccsteps = chip->ecc.steps;
2949 	uint8_t *p = buf;
2950 	uint8_t *ecc_code = chip->ecc.code_buf;
2951 	uint8_t *ecc_calc = chip->ecc.calc_buf;
2952 	unsigned int max_bitflips = 0;
2953 
2954 	/* Read the OOB area first */
2955 	ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
2956 	if (ret)
2957 		return ret;
2958 
2959 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
2960 	if (ret)
2961 		return ret;
2962 
2963 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2964 					 chip->ecc.total);
2965 	if (ret)
2966 		return ret;
2967 
2968 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2969 		int stat;
2970 
2971 		chip->ecc.hwctl(chip, NAND_ECC_READ);
2972 
2973 		ret = nand_read_data_op(chip, p, eccsize, false);
2974 		if (ret)
2975 			return ret;
2976 
2977 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
2978 
2979 		stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
2980 		if (stat == -EBADMSG &&
2981 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2982 			/* check for empty pages with bitflips */
2983 			stat = nand_check_erased_ecc_chunk(p, eccsize,
2984 						&ecc_code[i], eccbytes,
2985 						NULL, 0,
2986 						chip->ecc.strength);
2987 		}
2988 
2989 		if (stat < 0) {
2990 			mtd->ecc_stats.failed++;
2991 		} else {
2992 			mtd->ecc_stats.corrected += stat;
2993 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2994 		}
2995 	}
2996 	return max_bitflips;
2997 }
2998 
2999 /**
3000  * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
3001  * @chip: nand chip info structure
3002  * @buf: buffer to store read data
3003  * @oob_required: caller requires OOB data read to chip->oob_poi
3004  * @page: page number to read
3005  *
3006  * The hw generator calculates the error syndrome automatically. Therefore we
3007  * need a special oob layout and handling.
3008  */
3009 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
3010 				   int oob_required, int page)
3011 {
3012 	struct mtd_info *mtd = nand_to_mtd(chip);
3013 	int ret, i, eccsize = chip->ecc.size;
3014 	int eccbytes = chip->ecc.bytes;
3015 	int eccsteps = chip->ecc.steps;
3016 	int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
3017 	uint8_t *p = buf;
3018 	uint8_t *oob = chip->oob_poi;
3019 	unsigned int max_bitflips = 0;
3020 
3021 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
3022 	if (ret)
3023 		return ret;
3024 
3025 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3026 		int stat;
3027 
3028 		chip->ecc.hwctl(chip, NAND_ECC_READ);
3029 
3030 		ret = nand_read_data_op(chip, p, eccsize, false);
3031 		if (ret)
3032 			return ret;
3033 
3034 		if (chip->ecc.prepad) {
3035 			ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3036 						false);
3037 			if (ret)
3038 				return ret;
3039 
3040 			oob += chip->ecc.prepad;
3041 		}
3042 
3043 		chip->ecc.hwctl(chip, NAND_ECC_READSYN);
3044 
3045 		ret = nand_read_data_op(chip, oob, eccbytes, false);
3046 		if (ret)
3047 			return ret;
3048 
3049 		stat = chip->ecc.correct(chip, p, oob, NULL);
3050 
3051 		oob += eccbytes;
3052 
3053 		if (chip->ecc.postpad) {
3054 			ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3055 						false);
3056 			if (ret)
3057 				return ret;
3058 
3059 			oob += chip->ecc.postpad;
3060 		}
3061 
3062 		if (stat == -EBADMSG &&
3063 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3064 			/* check for empty pages with bitflips */
3065 			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3066 							   oob - eccpadbytes,
3067 							   eccpadbytes,
3068 							   NULL, 0,
3069 							   chip->ecc.strength);
3070 		}
3071 
3072 		if (stat < 0) {
3073 			mtd->ecc_stats.failed++;
3074 		} else {
3075 			mtd->ecc_stats.corrected += stat;
3076 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
3077 		}
3078 	}
3079 
3080 	/* Calculate remaining oob bytes */
3081 	i = mtd->oobsize - (oob - chip->oob_poi);
3082 	if (i) {
3083 		ret = nand_read_data_op(chip, oob, i, false);
3084 		if (ret)
3085 			return ret;
3086 	}
3087 
3088 	return max_bitflips;
3089 }
3090 
3091 /**
3092  * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3093  * @chip: NAND chip object
3094  * @oob: oob destination address
3095  * @ops: oob ops structure
3096  * @len: size of oob to transfer
3097  */
3098 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3099 				  struct mtd_oob_ops *ops, size_t len)
3100 {
3101 	struct mtd_info *mtd = nand_to_mtd(chip);
3102 	int ret;
3103 
3104 	switch (ops->mode) {
3105 
3106 	case MTD_OPS_PLACE_OOB:
3107 	case MTD_OPS_RAW:
3108 		memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3109 		return oob + len;
3110 
3111 	case MTD_OPS_AUTO_OOB:
3112 		ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3113 						  ops->ooboffs, len);
3114 		BUG_ON(ret);
3115 		return oob + len;
3116 
3117 	default:
3118 		BUG();
3119 	}
3120 	return NULL;
3121 }
3122 
3123 /**
3124  * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3125  * @chip: NAND chip object
3126  * @retry_mode: the retry mode to use
3127  *
3128  * Some vendors supply a special command to shift the Vt threshold, to be used
3129  * when there are too many bitflips in a page (i.e., ECC error). After setting
3130  * a new threshold, the host should retry reading the page.
3131  */
3132 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3133 {
3134 	pr_debug("setting READ RETRY mode %d\n", retry_mode);
3135 
3136 	if (retry_mode >= chip->read_retries)
3137 		return -EINVAL;
3138 
3139 	if (!chip->setup_read_retry)
3140 		return -EOPNOTSUPP;
3141 
3142 	return chip->setup_read_retry(chip, retry_mode);
3143 }
3144 
3145 static void nand_wait_readrdy(struct nand_chip *chip)
3146 {
3147 	const struct nand_sdr_timings *sdr;
3148 
3149 	if (!(chip->options & NAND_NEED_READRDY))
3150 		return;
3151 
3152 	sdr = nand_get_sdr_timings(&chip->data_interface);
3153 	WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
3154 }
3155 
3156 /**
3157  * nand_do_read_ops - [INTERN] Read data with ECC
3158  * @chip: NAND chip object
3159  * @from: offset to read from
3160  * @ops: oob ops structure
3161  *
3162  * Internal function. Called with chip held.
3163  */
3164 static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3165 			    struct mtd_oob_ops *ops)
3166 {
3167 	int chipnr, page, realpage, col, bytes, aligned, oob_required;
3168 	struct mtd_info *mtd = nand_to_mtd(chip);
3169 	int ret = 0;
3170 	uint32_t readlen = ops->len;
3171 	uint32_t oobreadlen = ops->ooblen;
3172 	uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3173 
3174 	uint8_t *bufpoi, *oob, *buf;
3175 	int use_bufpoi;
3176 	unsigned int max_bitflips = 0;
3177 	int retry_mode = 0;
3178 	bool ecc_fail = false;
3179 
3180 	chipnr = (int)(from >> chip->chip_shift);
3181 	nand_select_target(chip, chipnr);
3182 
3183 	realpage = (int)(from >> chip->page_shift);
3184 	page = realpage & chip->pagemask;
3185 
3186 	col = (int)(from & (mtd->writesize - 1));
3187 
3188 	buf = ops->datbuf;
3189 	oob = ops->oobbuf;
3190 	oob_required = oob ? 1 : 0;
3191 
3192 	while (1) {
3193 		unsigned int ecc_failures = mtd->ecc_stats.failed;
3194 
3195 		bytes = min(mtd->writesize - col, readlen);
3196 		aligned = (bytes == mtd->writesize);
3197 
3198 		if (!aligned)
3199 			use_bufpoi = 1;
3200 		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
3201 			use_bufpoi = !virt_addr_valid(buf) ||
3202 				     !IS_ALIGNED((unsigned long)buf,
3203 						 chip->buf_align);
3204 		else
3205 			use_bufpoi = 0;
3206 
3207 		/* Is the current page in the buffer? */
3208 		if (realpage != chip->pagebuf || oob) {
3209 			bufpoi = use_bufpoi ? chip->data_buf : buf;
3210 
3211 			if (use_bufpoi && aligned)
3212 				pr_debug("%s: using read bounce buffer for buf@%p\n",
3213 						 __func__, buf);
3214 
3215 read_retry:
3216 			/*
3217 			 * Now read the page into the buffer.  Absent an error,
3218 			 * the read methods return max bitflips per ecc step.
3219 			 */
3220 			if (unlikely(ops->mode == MTD_OPS_RAW))
3221 				ret = chip->ecc.read_page_raw(chip, bufpoi,
3222 							      oob_required,
3223 							      page);
3224 			else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3225 				 !oob)
3226 				ret = chip->ecc.read_subpage(chip, col, bytes,
3227 							     bufpoi, page);
3228 			else
3229 				ret = chip->ecc.read_page(chip, bufpoi,
3230 							  oob_required, page);
3231 			if (ret < 0) {
3232 				if (use_bufpoi)
3233 					/* Invalidate page cache */
3234 					chip->pagebuf = -1;
3235 				break;
3236 			}
3237 
3238 			/* Transfer not aligned data */
3239 			if (use_bufpoi) {
3240 				if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3241 				    !(mtd->ecc_stats.failed - ecc_failures) &&
3242 				    (ops->mode != MTD_OPS_RAW)) {
3243 					chip->pagebuf = realpage;
3244 					chip->pagebuf_bitflips = ret;
3245 				} else {
3246 					/* Invalidate page cache */
3247 					chip->pagebuf = -1;
3248 				}
3249 				memcpy(buf, chip->data_buf + col, bytes);
3250 			}
3251 
3252 			if (unlikely(oob)) {
3253 				int toread = min(oobreadlen, max_oobsize);
3254 
3255 				if (toread) {
3256 					oob = nand_transfer_oob(chip, oob, ops,
3257 								toread);
3258 					oobreadlen -= toread;
3259 				}
3260 			}
3261 
3262 			nand_wait_readrdy(chip);
3263 
3264 			if (mtd->ecc_stats.failed - ecc_failures) {
3265 				if (retry_mode + 1 < chip->read_retries) {
3266 					retry_mode++;
3267 					ret = nand_setup_read_retry(chip,
3268 							retry_mode);
3269 					if (ret < 0)
3270 						break;
3271 
3272 					/* Reset failures; retry */
3273 					mtd->ecc_stats.failed = ecc_failures;
3274 					goto read_retry;
3275 				} else {
3276 					/* No more retry modes; real failure */
3277 					ecc_fail = true;
3278 				}
3279 			}
3280 
3281 			buf += bytes;
3282 			max_bitflips = max_t(unsigned int, max_bitflips, ret);
3283 		} else {
3284 			memcpy(buf, chip->data_buf + col, bytes);
3285 			buf += bytes;
3286 			max_bitflips = max_t(unsigned int, max_bitflips,
3287 					     chip->pagebuf_bitflips);
3288 		}
3289 
3290 		readlen -= bytes;
3291 
3292 		/* Reset to retry mode 0 */
3293 		if (retry_mode) {
3294 			ret = nand_setup_read_retry(chip, 0);
3295 			if (ret < 0)
3296 				break;
3297 			retry_mode = 0;
3298 		}
3299 
3300 		if (!readlen)
3301 			break;
3302 
3303 		/* For subsequent reads align to page boundary */
3304 		col = 0;
3305 		/* Increment page address */
3306 		realpage++;
3307 
3308 		page = realpage & chip->pagemask;
3309 		/* Check, if we cross a chip boundary */
3310 		if (!page) {
3311 			chipnr++;
3312 			nand_deselect_target(chip);
3313 			nand_select_target(chip, chipnr);
3314 		}
3315 	}
3316 	nand_deselect_target(chip);
3317 
3318 	ops->retlen = ops->len - (size_t) readlen;
3319 	if (oob)
3320 		ops->oobretlen = ops->ooblen - oobreadlen;
3321 
3322 	if (ret < 0)
3323 		return ret;
3324 
3325 	if (ecc_fail)
3326 		return -EBADMSG;
3327 
3328 	return max_bitflips;
3329 }
3330 
3331 /**
3332  * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3333  * @chip: nand chip info structure
3334  * @page: page number to read
3335  */
3336 int nand_read_oob_std(struct nand_chip *chip, int page)
3337 {
3338 	struct mtd_info *mtd = nand_to_mtd(chip);
3339 
3340 	return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3341 }
3342 EXPORT_SYMBOL(nand_read_oob_std);
3343 
3344 /**
3345  * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3346  *			    with syndromes
3347  * @chip: nand chip info structure
3348  * @page: page number to read
3349  */
3350 static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3351 {
3352 	struct mtd_info *mtd = nand_to_mtd(chip);
3353 	int length = mtd->oobsize;
3354 	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3355 	int eccsize = chip->ecc.size;
3356 	uint8_t *bufpoi = chip->oob_poi;
3357 	int i, toread, sndrnd = 0, pos, ret;
3358 
3359 	ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3360 	if (ret)
3361 		return ret;
3362 
3363 	for (i = 0; i < chip->ecc.steps; i++) {
3364 		if (sndrnd) {
3365 			int ret;
3366 
3367 			pos = eccsize + i * (eccsize + chunk);
3368 			if (mtd->writesize > 512)
3369 				ret = nand_change_read_column_op(chip, pos,
3370 								 NULL, 0,
3371 								 false);
3372 			else
3373 				ret = nand_read_page_op(chip, page, pos, NULL,
3374 							0);
3375 
3376 			if (ret)
3377 				return ret;
3378 		} else
3379 			sndrnd = 1;
3380 		toread = min_t(int, length, chunk);
3381 
3382 		ret = nand_read_data_op(chip, bufpoi, toread, false);
3383 		if (ret)
3384 			return ret;
3385 
3386 		bufpoi += toread;
3387 		length -= toread;
3388 	}
3389 	if (length > 0) {
3390 		ret = nand_read_data_op(chip, bufpoi, length, false);
3391 		if (ret)
3392 			return ret;
3393 	}
3394 
3395 	return 0;
3396 }
3397 
3398 /**
3399  * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3400  * @chip: nand chip info structure
3401  * @page: page number to write
3402  */
3403 int nand_write_oob_std(struct nand_chip *chip, int page)
3404 {
3405 	struct mtd_info *mtd = nand_to_mtd(chip);
3406 
3407 	return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3408 				 mtd->oobsize);
3409 }
3410 EXPORT_SYMBOL(nand_write_oob_std);
3411 
3412 /**
3413  * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3414  *			     with syndrome - only for large page flash
3415  * @chip: nand chip info structure
3416  * @page: page number to write
3417  */
3418 static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3419 {
3420 	struct mtd_info *mtd = nand_to_mtd(chip);
3421 	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3422 	int eccsize = chip->ecc.size, length = mtd->oobsize;
3423 	int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3424 	const uint8_t *bufpoi = chip->oob_poi;
3425 
3426 	/*
3427 	 * data-ecc-data-ecc ... ecc-oob
3428 	 * or
3429 	 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3430 	 */
3431 	if (!chip->ecc.prepad && !chip->ecc.postpad) {
3432 		pos = steps * (eccsize + chunk);
3433 		steps = 0;
3434 	} else
3435 		pos = eccsize;
3436 
3437 	ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3438 	if (ret)
3439 		return ret;
3440 
3441 	for (i = 0; i < steps; i++) {
3442 		if (sndcmd) {
3443 			if (mtd->writesize <= 512) {
3444 				uint32_t fill = 0xFFFFFFFF;
3445 
3446 				len = eccsize;
3447 				while (len > 0) {
3448 					int num = min_t(int, len, 4);
3449 
3450 					ret = nand_write_data_op(chip, &fill,
3451 								 num, false);
3452 					if (ret)
3453 						return ret;
3454 
3455 					len -= num;
3456 				}
3457 			} else {
3458 				pos = eccsize + i * (eccsize + chunk);
3459 				ret = nand_change_write_column_op(chip, pos,
3460 								  NULL, 0,
3461 								  false);
3462 				if (ret)
3463 					return ret;
3464 			}
3465 		} else
3466 			sndcmd = 1;
3467 		len = min_t(int, length, chunk);
3468 
3469 		ret = nand_write_data_op(chip, bufpoi, len, false);
3470 		if (ret)
3471 			return ret;
3472 
3473 		bufpoi += len;
3474 		length -= len;
3475 	}
3476 	if (length > 0) {
3477 		ret = nand_write_data_op(chip, bufpoi, length, false);
3478 		if (ret)
3479 			return ret;
3480 	}
3481 
3482 	return nand_prog_page_end_op(chip);
3483 }
3484 
3485 /**
3486  * nand_do_read_oob - [INTERN] NAND read out-of-band
3487  * @chip: NAND chip object
3488  * @from: offset to read from
3489  * @ops: oob operations description structure
3490  *
3491  * NAND read out-of-band data from the spare area.
3492  */
3493 static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3494 			    struct mtd_oob_ops *ops)
3495 {
3496 	struct mtd_info *mtd = nand_to_mtd(chip);
3497 	unsigned int max_bitflips = 0;
3498 	int page, realpage, chipnr;
3499 	struct mtd_ecc_stats stats;
3500 	int readlen = ops->ooblen;
3501 	int len;
3502 	uint8_t *buf = ops->oobbuf;
3503 	int ret = 0;
3504 
3505 	pr_debug("%s: from = 0x%08Lx, len = %i\n",
3506 			__func__, (unsigned long long)from, readlen);
3507 
3508 	stats = mtd->ecc_stats;
3509 
3510 	len = mtd_oobavail(mtd, ops);
3511 
3512 	chipnr = (int)(from >> chip->chip_shift);
3513 	nand_select_target(chip, chipnr);
3514 
3515 	/* Shift to get page */
3516 	realpage = (int)(from >> chip->page_shift);
3517 	page = realpage & chip->pagemask;
3518 
3519 	while (1) {
3520 		if (ops->mode == MTD_OPS_RAW)
3521 			ret = chip->ecc.read_oob_raw(chip, page);
3522 		else
3523 			ret = chip->ecc.read_oob(chip, page);
3524 
3525 		if (ret < 0)
3526 			break;
3527 
3528 		len = min(len, readlen);
3529 		buf = nand_transfer_oob(chip, buf, ops, len);
3530 
3531 		nand_wait_readrdy(chip);
3532 
3533 		max_bitflips = max_t(unsigned int, max_bitflips, ret);
3534 
3535 		readlen -= len;
3536 		if (!readlen)
3537 			break;
3538 
3539 		/* Increment page address */
3540 		realpage++;
3541 
3542 		page = realpage & chip->pagemask;
3543 		/* Check, if we cross a chip boundary */
3544 		if (!page) {
3545 			chipnr++;
3546 			nand_deselect_target(chip);
3547 			nand_select_target(chip, chipnr);
3548 		}
3549 	}
3550 	nand_deselect_target(chip);
3551 
3552 	ops->oobretlen = ops->ooblen - readlen;
3553 
3554 	if (ret < 0)
3555 		return ret;
3556 
3557 	if (mtd->ecc_stats.failed - stats.failed)
3558 		return -EBADMSG;
3559 
3560 	return max_bitflips;
3561 }
3562 
3563 /**
3564  * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3565  * @mtd: MTD device structure
3566  * @from: offset to read from
3567  * @ops: oob operation description structure
3568  *
3569  * NAND read data and/or out-of-band data.
3570  */
3571 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3572 			 struct mtd_oob_ops *ops)
3573 {
3574 	struct nand_chip *chip = mtd_to_nand(mtd);
3575 	int ret;
3576 
3577 	ops->retlen = 0;
3578 
3579 	if (ops->mode != MTD_OPS_PLACE_OOB &&
3580 	    ops->mode != MTD_OPS_AUTO_OOB &&
3581 	    ops->mode != MTD_OPS_RAW)
3582 		return -ENOTSUPP;
3583 
3584 	nand_get_device(chip, FL_READING);
3585 
3586 	if (!ops->datbuf)
3587 		ret = nand_do_read_oob(chip, from, ops);
3588 	else
3589 		ret = nand_do_read_ops(chip, from, ops);
3590 
3591 	nand_release_device(chip);
3592 	return ret;
3593 }
3594 
3595 /**
3596  * nand_write_page_raw_notsupp - dummy raw page write function
3597  * @chip: nand chip info structure
3598  * @buf: data buffer
3599  * @oob_required: must write chip->oob_poi to OOB
3600  * @page: page number to write
3601  *
3602  * Returns -ENOTSUPP unconditionally.
3603  */
3604 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3605 				int oob_required, int page)
3606 {
3607 	return -ENOTSUPP;
3608 }
3609 
3610 /**
3611  * nand_write_page_raw - [INTERN] raw page write function
3612  * @chip: nand chip info structure
3613  * @buf: data buffer
3614  * @oob_required: must write chip->oob_poi to OOB
3615  * @page: page number to write
3616  *
3617  * Not for syndrome calculating ECC controllers, which use a special oob layout.
3618  */
3619 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3620 			int oob_required, int page)
3621 {
3622 	struct mtd_info *mtd = nand_to_mtd(chip);
3623 	int ret;
3624 
3625 	ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3626 	if (ret)
3627 		return ret;
3628 
3629 	if (oob_required) {
3630 		ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3631 					 false);
3632 		if (ret)
3633 			return ret;
3634 	}
3635 
3636 	return nand_prog_page_end_op(chip);
3637 }
3638 EXPORT_SYMBOL(nand_write_page_raw);
3639 
3640 /**
3641  * nand_write_page_raw_syndrome - [INTERN] raw page write function
3642  * @chip: nand chip info structure
3643  * @buf: data buffer
3644  * @oob_required: must write chip->oob_poi to OOB
3645  * @page: page number to write
3646  *
3647  * We need a special oob layout and handling even when ECC isn't checked.
3648  */
3649 static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3650 					const uint8_t *buf, int oob_required,
3651 					int page)
3652 {
3653 	struct mtd_info *mtd = nand_to_mtd(chip);
3654 	int eccsize = chip->ecc.size;
3655 	int eccbytes = chip->ecc.bytes;
3656 	uint8_t *oob = chip->oob_poi;
3657 	int steps, size, ret;
3658 
3659 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3660 	if (ret)
3661 		return ret;
3662 
3663 	for (steps = chip->ecc.steps; steps > 0; steps--) {
3664 		ret = nand_write_data_op(chip, buf, eccsize, false);
3665 		if (ret)
3666 			return ret;
3667 
3668 		buf += eccsize;
3669 
3670 		if (chip->ecc.prepad) {
3671 			ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3672 						 false);
3673 			if (ret)
3674 				return ret;
3675 
3676 			oob += chip->ecc.prepad;
3677 		}
3678 
3679 		ret = nand_write_data_op(chip, oob, eccbytes, false);
3680 		if (ret)
3681 			return ret;
3682 
3683 		oob += eccbytes;
3684 
3685 		if (chip->ecc.postpad) {
3686 			ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3687 						 false);
3688 			if (ret)
3689 				return ret;
3690 
3691 			oob += chip->ecc.postpad;
3692 		}
3693 	}
3694 
3695 	size = mtd->oobsize - (oob - chip->oob_poi);
3696 	if (size) {
3697 		ret = nand_write_data_op(chip, oob, size, false);
3698 		if (ret)
3699 			return ret;
3700 	}
3701 
3702 	return nand_prog_page_end_op(chip);
3703 }
3704 /**
3705  * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
3706  * @chip: nand chip info structure
3707  * @buf: data buffer
3708  * @oob_required: must write chip->oob_poi to OOB
3709  * @page: page number to write
3710  */
3711 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3712 				 int oob_required, int page)
3713 {
3714 	struct mtd_info *mtd = nand_to_mtd(chip);
3715 	int i, eccsize = chip->ecc.size, ret;
3716 	int eccbytes = chip->ecc.bytes;
3717 	int eccsteps = chip->ecc.steps;
3718 	uint8_t *ecc_calc = chip->ecc.calc_buf;
3719 	const uint8_t *p = buf;
3720 
3721 	/* Software ECC calculation */
3722 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3723 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
3724 
3725 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3726 					 chip->ecc.total);
3727 	if (ret)
3728 		return ret;
3729 
3730 	return chip->ecc.write_page_raw(chip, buf, 1, page);
3731 }
3732 
3733 /**
3734  * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
3735  * @chip: nand chip info structure
3736  * @buf: data buffer
3737  * @oob_required: must write chip->oob_poi to OOB
3738  * @page: page number to write
3739  */
3740 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
3741 				 int oob_required, int page)
3742 {
3743 	struct mtd_info *mtd = nand_to_mtd(chip);
3744 	int i, eccsize = chip->ecc.size, ret;
3745 	int eccbytes = chip->ecc.bytes;
3746 	int eccsteps = chip->ecc.steps;
3747 	uint8_t *ecc_calc = chip->ecc.calc_buf;
3748 	const uint8_t *p = buf;
3749 
3750 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3751 	if (ret)
3752 		return ret;
3753 
3754 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3755 		chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3756 
3757 		ret = nand_write_data_op(chip, p, eccsize, false);
3758 		if (ret)
3759 			return ret;
3760 
3761 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
3762 	}
3763 
3764 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3765 					 chip->ecc.total);
3766 	if (ret)
3767 		return ret;
3768 
3769 	ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3770 	if (ret)
3771 		return ret;
3772 
3773 	return nand_prog_page_end_op(chip);
3774 }
3775 
3776 
3777 /**
3778  * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
3779  * @chip:	nand chip info structure
3780  * @offset:	column address of subpage within the page
3781  * @data_len:	data length
3782  * @buf:	data buffer
3783  * @oob_required: must write chip->oob_poi to OOB
3784  * @page: page number to write
3785  */
3786 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
3787 				    uint32_t data_len, const uint8_t *buf,
3788 				    int oob_required, int page)
3789 {
3790 	struct mtd_info *mtd = nand_to_mtd(chip);
3791 	uint8_t *oob_buf  = chip->oob_poi;
3792 	uint8_t *ecc_calc = chip->ecc.calc_buf;
3793 	int ecc_size      = chip->ecc.size;
3794 	int ecc_bytes     = chip->ecc.bytes;
3795 	int ecc_steps     = chip->ecc.steps;
3796 	uint32_t start_step = offset / ecc_size;
3797 	uint32_t end_step   = (offset + data_len - 1) / ecc_size;
3798 	int oob_bytes       = mtd->oobsize / ecc_steps;
3799 	int step, ret;
3800 
3801 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3802 	if (ret)
3803 		return ret;
3804 
3805 	for (step = 0; step < ecc_steps; step++) {
3806 		/* configure controller for WRITE access */
3807 		chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3808 
3809 		/* write data (untouched subpages already masked by 0xFF) */
3810 		ret = nand_write_data_op(chip, buf, ecc_size, false);
3811 		if (ret)
3812 			return ret;
3813 
3814 		/* mask ECC of un-touched subpages by padding 0xFF */
3815 		if ((step < start_step) || (step > end_step))
3816 			memset(ecc_calc, 0xff, ecc_bytes);
3817 		else
3818 			chip->ecc.calculate(chip, buf, ecc_calc);
3819 
3820 		/* mask OOB of un-touched subpages by padding 0xFF */
3821 		/* if oob_required, preserve OOB metadata of written subpage */
3822 		if (!oob_required || (step < start_step) || (step > end_step))
3823 			memset(oob_buf, 0xff, oob_bytes);
3824 
3825 		buf += ecc_size;
3826 		ecc_calc += ecc_bytes;
3827 		oob_buf  += oob_bytes;
3828 	}
3829 
3830 	/* copy calculated ECC for whole page to chip->buffer->oob */
3831 	/* this include masked-value(0xFF) for unwritten subpages */
3832 	ecc_calc = chip->ecc.calc_buf;
3833 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3834 					 chip->ecc.total);
3835 	if (ret)
3836 		return ret;
3837 
3838 	/* write OOB buffer to NAND device */
3839 	ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3840 	if (ret)
3841 		return ret;
3842 
3843 	return nand_prog_page_end_op(chip);
3844 }
3845 
3846 
3847 /**
3848  * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
3849  * @chip: nand chip info structure
3850  * @buf: data buffer
3851  * @oob_required: must write chip->oob_poi to OOB
3852  * @page: page number to write
3853  *
3854  * The hw generator calculates the error syndrome automatically. Therefore we
3855  * need a special oob layout and handling.
3856  */
3857 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
3858 				    int oob_required, int page)
3859 {
3860 	struct mtd_info *mtd = nand_to_mtd(chip);
3861 	int i, eccsize = chip->ecc.size;
3862 	int eccbytes = chip->ecc.bytes;
3863 	int eccsteps = chip->ecc.steps;
3864 	const uint8_t *p = buf;
3865 	uint8_t *oob = chip->oob_poi;
3866 	int ret;
3867 
3868 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3869 	if (ret)
3870 		return ret;
3871 
3872 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3873 		chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3874 
3875 		ret = nand_write_data_op(chip, p, eccsize, false);
3876 		if (ret)
3877 			return ret;
3878 
3879 		if (chip->ecc.prepad) {
3880 			ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3881 						 false);
3882 			if (ret)
3883 				return ret;
3884 
3885 			oob += chip->ecc.prepad;
3886 		}
3887 
3888 		chip->ecc.calculate(chip, p, oob);
3889 
3890 		ret = nand_write_data_op(chip, oob, eccbytes, false);
3891 		if (ret)
3892 			return ret;
3893 
3894 		oob += eccbytes;
3895 
3896 		if (chip->ecc.postpad) {
3897 			ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3898 						 false);
3899 			if (ret)
3900 				return ret;
3901 
3902 			oob += chip->ecc.postpad;
3903 		}
3904 	}
3905 
3906 	/* Calculate remaining oob bytes */
3907 	i = mtd->oobsize - (oob - chip->oob_poi);
3908 	if (i) {
3909 		ret = nand_write_data_op(chip, oob, i, false);
3910 		if (ret)
3911 			return ret;
3912 	}
3913 
3914 	return nand_prog_page_end_op(chip);
3915 }
3916 
3917 /**
3918  * nand_write_page - write one page
3919  * @chip: NAND chip descriptor
3920  * @offset: address offset within the page
3921  * @data_len: length of actual data to be written
3922  * @buf: the data to write
3923  * @oob_required: must write chip->oob_poi to OOB
3924  * @page: page number to write
3925  * @raw: use _raw version of write_page
3926  */
3927 static int nand_write_page(struct nand_chip *chip, uint32_t offset,
3928 			   int data_len, const uint8_t *buf, int oob_required,
3929 			   int page, int raw)
3930 {
3931 	struct mtd_info *mtd = nand_to_mtd(chip);
3932 	int status, subpage;
3933 
3934 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3935 		chip->ecc.write_subpage)
3936 		subpage = offset || (data_len < mtd->writesize);
3937 	else
3938 		subpage = 0;
3939 
3940 	if (unlikely(raw))
3941 		status = chip->ecc.write_page_raw(chip, buf, oob_required,
3942 						  page);
3943 	else if (subpage)
3944 		status = chip->ecc.write_subpage(chip, offset, data_len, buf,
3945 						 oob_required, page);
3946 	else
3947 		status = chip->ecc.write_page(chip, buf, oob_required, page);
3948 
3949 	if (status < 0)
3950 		return status;
3951 
3952 	return 0;
3953 }
3954 
3955 #define NOTALIGNED(x)	((x & (chip->subpagesize - 1)) != 0)
3956 
3957 /**
3958  * nand_do_write_ops - [INTERN] NAND write with ECC
3959  * @chip: NAND chip object
3960  * @to: offset to write to
3961  * @ops: oob operations description structure
3962  *
3963  * NAND write with ECC.
3964  */
3965 static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
3966 			     struct mtd_oob_ops *ops)
3967 {
3968 	struct mtd_info *mtd = nand_to_mtd(chip);
3969 	int chipnr, realpage, page, column;
3970 	uint32_t writelen = ops->len;
3971 
3972 	uint32_t oobwritelen = ops->ooblen;
3973 	uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
3974 
3975 	uint8_t *oob = ops->oobbuf;
3976 	uint8_t *buf = ops->datbuf;
3977 	int ret;
3978 	int oob_required = oob ? 1 : 0;
3979 
3980 	ops->retlen = 0;
3981 	if (!writelen)
3982 		return 0;
3983 
3984 	/* Reject writes, which are not page aligned */
3985 	if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
3986 		pr_notice("%s: attempt to write non page aligned data\n",
3987 			   __func__);
3988 		return -EINVAL;
3989 	}
3990 
3991 	column = to & (mtd->writesize - 1);
3992 
3993 	chipnr = (int)(to >> chip->chip_shift);
3994 	nand_select_target(chip, chipnr);
3995 
3996 	/* Check, if it is write protected */
3997 	if (nand_check_wp(chip)) {
3998 		ret = -EIO;
3999 		goto err_out;
4000 	}
4001 
4002 	realpage = (int)(to >> chip->page_shift);
4003 	page = realpage & chip->pagemask;
4004 
4005 	/* Invalidate the page cache, when we write to the cached page */
4006 	if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
4007 	    ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
4008 		chip->pagebuf = -1;
4009 
4010 	/* Don't allow multipage oob writes with offset */
4011 	if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
4012 		ret = -EINVAL;
4013 		goto err_out;
4014 	}
4015 
4016 	while (1) {
4017 		int bytes = mtd->writesize;
4018 		uint8_t *wbuf = buf;
4019 		int use_bufpoi;
4020 		int part_pagewr = (column || writelen < mtd->writesize);
4021 
4022 		if (part_pagewr)
4023 			use_bufpoi = 1;
4024 		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
4025 			use_bufpoi = !virt_addr_valid(buf) ||
4026 				     !IS_ALIGNED((unsigned long)buf,
4027 						 chip->buf_align);
4028 		else
4029 			use_bufpoi = 0;
4030 
4031 		/* Partial page write?, or need to use bounce buffer */
4032 		if (use_bufpoi) {
4033 			pr_debug("%s: using write bounce buffer for buf@%p\n",
4034 					 __func__, buf);
4035 			if (part_pagewr)
4036 				bytes = min_t(int, bytes - column, writelen);
4037 			chip->pagebuf = -1;
4038 			memset(chip->data_buf, 0xff, mtd->writesize);
4039 			memcpy(&chip->data_buf[column], buf, bytes);
4040 			wbuf = chip->data_buf;
4041 		}
4042 
4043 		if (unlikely(oob)) {
4044 			size_t len = min(oobwritelen, oobmaxlen);
4045 			oob = nand_fill_oob(chip, oob, len, ops);
4046 			oobwritelen -= len;
4047 		} else {
4048 			/* We still need to erase leftover OOB data */
4049 			memset(chip->oob_poi, 0xff, mtd->oobsize);
4050 		}
4051 
4052 		ret = nand_write_page(chip, column, bytes, wbuf,
4053 				      oob_required, page,
4054 				      (ops->mode == MTD_OPS_RAW));
4055 		if (ret)
4056 			break;
4057 
4058 		writelen -= bytes;
4059 		if (!writelen)
4060 			break;
4061 
4062 		column = 0;
4063 		buf += bytes;
4064 		realpage++;
4065 
4066 		page = realpage & chip->pagemask;
4067 		/* Check, if we cross a chip boundary */
4068 		if (!page) {
4069 			chipnr++;
4070 			nand_deselect_target(chip);
4071 			nand_select_target(chip, chipnr);
4072 		}
4073 	}
4074 
4075 	ops->retlen = ops->len - writelen;
4076 	if (unlikely(oob))
4077 		ops->oobretlen = ops->ooblen;
4078 
4079 err_out:
4080 	nand_deselect_target(chip);
4081 	return ret;
4082 }
4083 
4084 /**
4085  * panic_nand_write - [MTD Interface] NAND write with ECC
4086  * @mtd: MTD device structure
4087  * @to: offset to write to
4088  * @len: number of bytes to write
4089  * @retlen: pointer to variable to store the number of written bytes
4090  * @buf: the data to write
4091  *
4092  * NAND write with ECC. Used when performing writes in interrupt context, this
4093  * may for example be called by mtdoops when writing an oops while in panic.
4094  */
4095 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4096 			    size_t *retlen, const uint8_t *buf)
4097 {
4098 	struct nand_chip *chip = mtd_to_nand(mtd);
4099 	int chipnr = (int)(to >> chip->chip_shift);
4100 	struct mtd_oob_ops ops;
4101 	int ret;
4102 
4103 	/* Grab the device */
4104 	panic_nand_get_device(chip, FL_WRITING);
4105 
4106 	nand_select_target(chip, chipnr);
4107 
4108 	/* Wait for the device to get ready */
4109 	panic_nand_wait(chip, 400);
4110 
4111 	memset(&ops, 0, sizeof(ops));
4112 	ops.len = len;
4113 	ops.datbuf = (uint8_t *)buf;
4114 	ops.mode = MTD_OPS_PLACE_OOB;
4115 
4116 	ret = nand_do_write_ops(chip, to, &ops);
4117 
4118 	*retlen = ops.retlen;
4119 	return ret;
4120 }
4121 
4122 /**
4123  * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4124  * @mtd: MTD device structure
4125  * @to: offset to write to
4126  * @ops: oob operation description structure
4127  */
4128 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4129 			  struct mtd_oob_ops *ops)
4130 {
4131 	struct nand_chip *chip = mtd_to_nand(mtd);
4132 	int ret = -ENOTSUPP;
4133 
4134 	ops->retlen = 0;
4135 
4136 	nand_get_device(chip, FL_WRITING);
4137 
4138 	switch (ops->mode) {
4139 	case MTD_OPS_PLACE_OOB:
4140 	case MTD_OPS_AUTO_OOB:
4141 	case MTD_OPS_RAW:
4142 		break;
4143 
4144 	default:
4145 		goto out;
4146 	}
4147 
4148 	if (!ops->datbuf)
4149 		ret = nand_do_write_oob(chip, to, ops);
4150 	else
4151 		ret = nand_do_write_ops(chip, to, ops);
4152 
4153 out:
4154 	nand_release_device(chip);
4155 	return ret;
4156 }
4157 
4158 /**
4159  * single_erase - [GENERIC] NAND standard block erase command function
4160  * @chip: NAND chip object
4161  * @page: the page address of the block which will be erased
4162  *
4163  * Standard erase command for NAND chips. Returns NAND status.
4164  */
4165 static int single_erase(struct nand_chip *chip, int page)
4166 {
4167 	unsigned int eraseblock;
4168 
4169 	/* Send commands to erase a block */
4170 	eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
4171 
4172 	return nand_erase_op(chip, eraseblock);
4173 }
4174 
4175 /**
4176  * nand_erase - [MTD Interface] erase block(s)
4177  * @mtd: MTD device structure
4178  * @instr: erase instruction
4179  *
4180  * Erase one ore more blocks.
4181  */
4182 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4183 {
4184 	return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4185 }
4186 
4187 /**
4188  * nand_erase_nand - [INTERN] erase block(s)
4189  * @chip: NAND chip object
4190  * @instr: erase instruction
4191  * @allowbbt: allow erasing the bbt area
4192  *
4193  * Erase one ore more blocks.
4194  */
4195 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4196 		    int allowbbt)
4197 {
4198 	int page, status, pages_per_block, ret, chipnr;
4199 	loff_t len;
4200 
4201 	pr_debug("%s: start = 0x%012llx, len = %llu\n",
4202 			__func__, (unsigned long long)instr->addr,
4203 			(unsigned long long)instr->len);
4204 
4205 	if (check_offs_len(chip, instr->addr, instr->len))
4206 		return -EINVAL;
4207 
4208 	/* Grab the lock and see if the device is available */
4209 	nand_get_device(chip, FL_ERASING);
4210 
4211 	/* Shift to get first page */
4212 	page = (int)(instr->addr >> chip->page_shift);
4213 	chipnr = (int)(instr->addr >> chip->chip_shift);
4214 
4215 	/* Calculate pages in each block */
4216 	pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4217 
4218 	/* Select the NAND device */
4219 	nand_select_target(chip, chipnr);
4220 
4221 	/* Check, if it is write protected */
4222 	if (nand_check_wp(chip)) {
4223 		pr_debug("%s: device is write protected!\n",
4224 				__func__);
4225 		ret = -EIO;
4226 		goto erase_exit;
4227 	}
4228 
4229 	/* Loop through the pages */
4230 	len = instr->len;
4231 
4232 	while (len) {
4233 		/* Check if we have a bad block, we do not erase bad blocks! */
4234 		if (nand_block_checkbad(chip, ((loff_t) page) <<
4235 					chip->page_shift, allowbbt)) {
4236 			pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4237 				    __func__, page);
4238 			ret = -EIO;
4239 			goto erase_exit;
4240 		}
4241 
4242 		/*
4243 		 * Invalidate the page cache, if we erase the block which
4244 		 * contains the current cached page.
4245 		 */
4246 		if (page <= chip->pagebuf && chip->pagebuf <
4247 		    (page + pages_per_block))
4248 			chip->pagebuf = -1;
4249 
4250 		if (chip->legacy.erase)
4251 			status = chip->legacy.erase(chip,
4252 						    page & chip->pagemask);
4253 		else
4254 			status = single_erase(chip, page & chip->pagemask);
4255 
4256 		/* See if block erase succeeded */
4257 		if (status) {
4258 			pr_debug("%s: failed erase, page 0x%08x\n",
4259 					__func__, page);
4260 			ret = -EIO;
4261 			instr->fail_addr =
4262 				((loff_t)page << chip->page_shift);
4263 			goto erase_exit;
4264 		}
4265 
4266 		/* Increment page address and decrement length */
4267 		len -= (1ULL << chip->phys_erase_shift);
4268 		page += pages_per_block;
4269 
4270 		/* Check, if we cross a chip boundary */
4271 		if (len && !(page & chip->pagemask)) {
4272 			chipnr++;
4273 			nand_deselect_target(chip);
4274 			nand_select_target(chip, chipnr);
4275 		}
4276 	}
4277 
4278 	ret = 0;
4279 erase_exit:
4280 
4281 	/* Deselect and wake up anyone waiting on the device */
4282 	nand_deselect_target(chip);
4283 	nand_release_device(chip);
4284 
4285 	/* Return more or less happy */
4286 	return ret;
4287 }
4288 
4289 /**
4290  * nand_sync - [MTD Interface] sync
4291  * @mtd: MTD device structure
4292  *
4293  * Sync is actually a wait for chip ready function.
4294  */
4295 static void nand_sync(struct mtd_info *mtd)
4296 {
4297 	struct nand_chip *chip = mtd_to_nand(mtd);
4298 
4299 	pr_debug("%s: called\n", __func__);
4300 
4301 	/* Grab the lock and see if the device is available */
4302 	nand_get_device(chip, FL_SYNCING);
4303 	/* Release it and go back */
4304 	nand_release_device(chip);
4305 }
4306 
4307 /**
4308  * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4309  * @mtd: MTD device structure
4310  * @offs: offset relative to mtd start
4311  */
4312 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4313 {
4314 	struct nand_chip *chip = mtd_to_nand(mtd);
4315 	int chipnr = (int)(offs >> chip->chip_shift);
4316 	int ret;
4317 
4318 	/* Select the NAND device */
4319 	nand_get_device(chip, FL_READING);
4320 	nand_select_target(chip, chipnr);
4321 
4322 	ret = nand_block_checkbad(chip, offs, 0);
4323 
4324 	nand_deselect_target(chip);
4325 	nand_release_device(chip);
4326 
4327 	return ret;
4328 }
4329 
4330 /**
4331  * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4332  * @mtd: MTD device structure
4333  * @ofs: offset relative to mtd start
4334  */
4335 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4336 {
4337 	int ret;
4338 
4339 	ret = nand_block_isbad(mtd, ofs);
4340 	if (ret) {
4341 		/* If it was bad already, return success and do nothing */
4342 		if (ret > 0)
4343 			return 0;
4344 		return ret;
4345 	}
4346 
4347 	return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4348 }
4349 
4350 /**
4351  * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
4352  * @mtd: MTD device structure
4353  * @ofs: offset relative to mtd start
4354  * @len: length of mtd
4355  */
4356 static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
4357 {
4358 	struct nand_chip *chip = mtd_to_nand(mtd);
4359 	u32 part_start_block;
4360 	u32 part_end_block;
4361 	u32 part_start_die;
4362 	u32 part_end_die;
4363 
4364 	/*
4365 	 * max_bb_per_die and blocks_per_die used to determine
4366 	 * the maximum bad block count.
4367 	 */
4368 	if (!chip->max_bb_per_die || !chip->blocks_per_die)
4369 		return -ENOTSUPP;
4370 
4371 	/* Get the start and end of the partition in erase blocks. */
4372 	part_start_block = mtd_div_by_eb(ofs, mtd);
4373 	part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
4374 
4375 	/* Get the start and end LUNs of the partition. */
4376 	part_start_die = part_start_block / chip->blocks_per_die;
4377 	part_end_die = part_end_block / chip->blocks_per_die;
4378 
4379 	/*
4380 	 * Look up the bad blocks per unit and multiply by the number of units
4381 	 * that the partition spans.
4382 	 */
4383 	return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
4384 }
4385 
4386 /**
4387  * nand_suspend - [MTD Interface] Suspend the NAND flash
4388  * @mtd: MTD device structure
4389  */
4390 static int nand_suspend(struct mtd_info *mtd)
4391 {
4392 	return nand_get_device(mtd_to_nand(mtd), FL_PM_SUSPENDED);
4393 }
4394 
4395 /**
4396  * nand_resume - [MTD Interface] Resume the NAND flash
4397  * @mtd: MTD device structure
4398  */
4399 static void nand_resume(struct mtd_info *mtd)
4400 {
4401 	struct nand_chip *chip = mtd_to_nand(mtd);
4402 
4403 	if (chip->state == FL_PM_SUSPENDED)
4404 		nand_release_device(chip);
4405 	else
4406 		pr_err("%s called for a chip which is not in suspended state\n",
4407 			__func__);
4408 }
4409 
4410 /**
4411  * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4412  *                 prevent further operations
4413  * @mtd: MTD device structure
4414  */
4415 static void nand_shutdown(struct mtd_info *mtd)
4416 {
4417 	nand_get_device(mtd_to_nand(mtd), FL_PM_SUSPENDED);
4418 }
4419 
4420 /* Set default functions */
4421 static void nand_set_defaults(struct nand_chip *chip)
4422 {
4423 	/* If no controller is provided, use the dummy, legacy one. */
4424 	if (!chip->controller) {
4425 		chip->controller = &chip->legacy.dummy_controller;
4426 		nand_controller_init(chip->controller);
4427 	}
4428 
4429 	nand_legacy_set_defaults(chip);
4430 
4431 	if (!chip->buf_align)
4432 		chip->buf_align = 1;
4433 }
4434 
4435 /* Sanitize ONFI strings so we can safely print them */
4436 void sanitize_string(uint8_t *s, size_t len)
4437 {
4438 	ssize_t i;
4439 
4440 	/* Null terminate */
4441 	s[len - 1] = 0;
4442 
4443 	/* Remove non printable chars */
4444 	for (i = 0; i < len - 1; i++) {
4445 		if (s[i] < ' ' || s[i] > 127)
4446 			s[i] = '?';
4447 	}
4448 
4449 	/* Remove trailing spaces */
4450 	strim(s);
4451 }
4452 
4453 /*
4454  * nand_id_has_period - Check if an ID string has a given wraparound period
4455  * @id_data: the ID string
4456  * @arrlen: the length of the @id_data array
4457  * @period: the period of repitition
4458  *
4459  * Check if an ID string is repeated within a given sequence of bytes at
4460  * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
4461  * period of 3). This is a helper function for nand_id_len(). Returns non-zero
4462  * if the repetition has a period of @period; otherwise, returns zero.
4463  */
4464 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4465 {
4466 	int i, j;
4467 	for (i = 0; i < period; i++)
4468 		for (j = i + period; j < arrlen; j += period)
4469 			if (id_data[i] != id_data[j])
4470 				return 0;
4471 	return 1;
4472 }
4473 
4474 /*
4475  * nand_id_len - Get the length of an ID string returned by CMD_READID
4476  * @id_data: the ID string
4477  * @arrlen: the length of the @id_data array
4478 
4479  * Returns the length of the ID string, according to known wraparound/trailing
4480  * zero patterns. If no pattern exists, returns the length of the array.
4481  */
4482 static int nand_id_len(u8 *id_data, int arrlen)
4483 {
4484 	int last_nonzero, period;
4485 
4486 	/* Find last non-zero byte */
4487 	for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4488 		if (id_data[last_nonzero])
4489 			break;
4490 
4491 	/* All zeros */
4492 	if (last_nonzero < 0)
4493 		return 0;
4494 
4495 	/* Calculate wraparound period */
4496 	for (period = 1; period < arrlen; period++)
4497 		if (nand_id_has_period(id_data, arrlen, period))
4498 			break;
4499 
4500 	/* There's a repeated pattern */
4501 	if (period < arrlen)
4502 		return period;
4503 
4504 	/* There are trailing zeros */
4505 	if (last_nonzero < arrlen - 1)
4506 		return last_nonzero + 1;
4507 
4508 	/* No pattern detected */
4509 	return arrlen;
4510 }
4511 
4512 /* Extract the bits of per cell from the 3rd byte of the extended ID */
4513 static int nand_get_bits_per_cell(u8 cellinfo)
4514 {
4515 	int bits;
4516 
4517 	bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4518 	bits >>= NAND_CI_CELLTYPE_SHIFT;
4519 	return bits + 1;
4520 }
4521 
4522 /*
4523  * Many new NAND share similar device ID codes, which represent the size of the
4524  * chip. The rest of the parameters must be decoded according to generic or
4525  * manufacturer-specific "extended ID" decoding patterns.
4526  */
4527 void nand_decode_ext_id(struct nand_chip *chip)
4528 {
4529 	struct mtd_info *mtd = nand_to_mtd(chip);
4530 	int extid;
4531 	u8 *id_data = chip->id.data;
4532 	/* The 3rd id byte holds MLC / multichip data */
4533 	chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4534 	/* The 4th id byte is the important one */
4535 	extid = id_data[3];
4536 
4537 	/* Calc pagesize */
4538 	mtd->writesize = 1024 << (extid & 0x03);
4539 	extid >>= 2;
4540 	/* Calc oobsize */
4541 	mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4542 	extid >>= 2;
4543 	/* Calc blocksize. Blocksize is multiples of 64KiB */
4544 	mtd->erasesize = (64 * 1024) << (extid & 0x03);
4545 	extid >>= 2;
4546 	/* Get buswidth information */
4547 	if (extid & 0x1)
4548 		chip->options |= NAND_BUSWIDTH_16;
4549 }
4550 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4551 
4552 /*
4553  * Old devices have chip data hardcoded in the device ID table. nand_decode_id
4554  * decodes a matching ID table entry and assigns the MTD size parameters for
4555  * the chip.
4556  */
4557 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4558 {
4559 	struct mtd_info *mtd = nand_to_mtd(chip);
4560 
4561 	mtd->erasesize = type->erasesize;
4562 	mtd->writesize = type->pagesize;
4563 	mtd->oobsize = mtd->writesize / 32;
4564 
4565 	/* All legacy ID NAND are small-page, SLC */
4566 	chip->bits_per_cell = 1;
4567 }
4568 
4569 /*
4570  * Set the bad block marker/indicator (BBM/BBI) patterns according to some
4571  * heuristic patterns using various detected parameters (e.g., manufacturer,
4572  * page size, cell-type information).
4573  */
4574 static void nand_decode_bbm_options(struct nand_chip *chip)
4575 {
4576 	struct mtd_info *mtd = nand_to_mtd(chip);
4577 
4578 	/* Set the bad block position */
4579 	if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4580 		chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
4581 	else
4582 		chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
4583 }
4584 
4585 static inline bool is_full_id_nand(struct nand_flash_dev *type)
4586 {
4587 	return type->id_len;
4588 }
4589 
4590 static bool find_full_id_nand(struct nand_chip *chip,
4591 			      struct nand_flash_dev *type)
4592 {
4593 	struct mtd_info *mtd = nand_to_mtd(chip);
4594 	u8 *id_data = chip->id.data;
4595 
4596 	if (!strncmp(type->id, id_data, type->id_len)) {
4597 		mtd->writesize = type->pagesize;
4598 		mtd->erasesize = type->erasesize;
4599 		mtd->oobsize = type->oobsize;
4600 
4601 		chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4602 		chip->chipsize = (uint64_t)type->chipsize << 20;
4603 		chip->options |= type->options;
4604 		chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
4605 		chip->ecc_step_ds = NAND_ECC_STEP(type);
4606 		chip->onfi_timing_mode_default =
4607 					type->onfi_timing_mode_default;
4608 
4609 		chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4610 		if (!chip->parameters.model)
4611 			return false;
4612 
4613 		return true;
4614 	}
4615 	return false;
4616 }
4617 
4618 /*
4619  * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
4620  * compliant and does not have a full-id or legacy-id entry in the nand_ids
4621  * table.
4622  */
4623 static void nand_manufacturer_detect(struct nand_chip *chip)
4624 {
4625 	/*
4626 	 * Try manufacturer detection if available and use
4627 	 * nand_decode_ext_id() otherwise.
4628 	 */
4629 	if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4630 	    chip->manufacturer.desc->ops->detect) {
4631 		/* The 3rd id byte holds MLC / multichip data */
4632 		chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4633 		chip->manufacturer.desc->ops->detect(chip);
4634 	} else {
4635 		nand_decode_ext_id(chip);
4636 	}
4637 }
4638 
4639 /*
4640  * Manufacturer initialization. This function is called for all NANDs including
4641  * ONFI and JEDEC compliant ones.
4642  * Manufacturer drivers should put all their specific initialization code in
4643  * their ->init() hook.
4644  */
4645 static int nand_manufacturer_init(struct nand_chip *chip)
4646 {
4647 	if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4648 	    !chip->manufacturer.desc->ops->init)
4649 		return 0;
4650 
4651 	return chip->manufacturer.desc->ops->init(chip);
4652 }
4653 
4654 /*
4655  * Manufacturer cleanup. This function is called for all NANDs including
4656  * ONFI and JEDEC compliant ones.
4657  * Manufacturer drivers should put all their specific cleanup code in their
4658  * ->cleanup() hook.
4659  */
4660 static void nand_manufacturer_cleanup(struct nand_chip *chip)
4661 {
4662 	/* Release manufacturer private data */
4663 	if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4664 	    chip->manufacturer.desc->ops->cleanup)
4665 		chip->manufacturer.desc->ops->cleanup(chip);
4666 }
4667 
4668 static const char *
4669 nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
4670 {
4671 	return manufacturer ? manufacturer->name : "Unknown";
4672 }
4673 
4674 /*
4675  * Get the flash and manufacturer id and lookup if the type is supported.
4676  */
4677 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4678 {
4679 	const struct nand_manufacturer *manufacturer;
4680 	struct mtd_info *mtd = nand_to_mtd(chip);
4681 	int busw, ret;
4682 	u8 *id_data = chip->id.data;
4683 	u8 maf_id, dev_id;
4684 
4685 	/*
4686 	 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4687 	 * after power-up.
4688 	 */
4689 	ret = nand_reset(chip, 0);
4690 	if (ret)
4691 		return ret;
4692 
4693 	/* Select the device */
4694 	nand_select_target(chip, 0);
4695 
4696 	/* Send the command for reading device ID */
4697 	ret = nand_readid_op(chip, 0, id_data, 2);
4698 	if (ret)
4699 		return ret;
4700 
4701 	/* Read manufacturer and device IDs */
4702 	maf_id = id_data[0];
4703 	dev_id = id_data[1];
4704 
4705 	/*
4706 	 * Try again to make sure, as some systems the bus-hold or other
4707 	 * interface concerns can cause random data which looks like a
4708 	 * possibly credible NAND flash to appear. If the two results do
4709 	 * not match, ignore the device completely.
4710 	 */
4711 
4712 	/* Read entire ID string */
4713 	ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
4714 	if (ret)
4715 		return ret;
4716 
4717 	if (id_data[0] != maf_id || id_data[1] != dev_id) {
4718 		pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4719 			maf_id, dev_id, id_data[0], id_data[1]);
4720 		return -ENODEV;
4721 	}
4722 
4723 	chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
4724 
4725 	/* Try to identify manufacturer */
4726 	manufacturer = nand_get_manufacturer(maf_id);
4727 	chip->manufacturer.desc = manufacturer;
4728 
4729 	if (!type)
4730 		type = nand_flash_ids;
4731 
4732 	/*
4733 	 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
4734 	 * override it.
4735 	 * This is required to make sure initial NAND bus width set by the
4736 	 * NAND controller driver is coherent with the real NAND bus width
4737 	 * (extracted by auto-detection code).
4738 	 */
4739 	busw = chip->options & NAND_BUSWIDTH_16;
4740 
4741 	/*
4742 	 * The flag is only set (never cleared), reset it to its default value
4743 	 * before starting auto-detection.
4744 	 */
4745 	chip->options &= ~NAND_BUSWIDTH_16;
4746 
4747 	for (; type->name != NULL; type++) {
4748 		if (is_full_id_nand(type)) {
4749 			if (find_full_id_nand(chip, type))
4750 				goto ident_done;
4751 		} else if (dev_id == type->dev_id) {
4752 			break;
4753 		}
4754 	}
4755 
4756 	if (!type->name || !type->pagesize) {
4757 		/* Check if the chip is ONFI compliant */
4758 		ret = nand_onfi_detect(chip);
4759 		if (ret < 0)
4760 			return ret;
4761 		else if (ret)
4762 			goto ident_done;
4763 
4764 		/* Check if the chip is JEDEC compliant */
4765 		ret = nand_jedec_detect(chip);
4766 		if (ret < 0)
4767 			return ret;
4768 		else if (ret)
4769 			goto ident_done;
4770 	}
4771 
4772 	if (!type->name)
4773 		return -ENODEV;
4774 
4775 	chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4776 	if (!chip->parameters.model)
4777 		return -ENOMEM;
4778 
4779 	chip->chipsize = (uint64_t)type->chipsize << 20;
4780 
4781 	if (!type->pagesize)
4782 		nand_manufacturer_detect(chip);
4783 	else
4784 		nand_decode_id(chip, type);
4785 
4786 	/* Get chip options */
4787 	chip->options |= type->options;
4788 
4789 ident_done:
4790 	if (!mtd->name)
4791 		mtd->name = chip->parameters.model;
4792 
4793 	if (chip->options & NAND_BUSWIDTH_AUTO) {
4794 		WARN_ON(busw & NAND_BUSWIDTH_16);
4795 		nand_set_defaults(chip);
4796 	} else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4797 		/*
4798 		 * Check, if buswidth is correct. Hardware drivers should set
4799 		 * chip correct!
4800 		 */
4801 		pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4802 			maf_id, dev_id);
4803 		pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4804 			mtd->name);
4805 		pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4806 			(chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4807 		ret = -EINVAL;
4808 
4809 		goto free_detect_allocation;
4810 	}
4811 
4812 	nand_decode_bbm_options(chip);
4813 
4814 	/* Calculate the address shift from the page size */
4815 	chip->page_shift = ffs(mtd->writesize) - 1;
4816 	/* Convert chipsize to number of pages per chip -1 */
4817 	chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
4818 
4819 	chip->bbt_erase_shift = chip->phys_erase_shift =
4820 		ffs(mtd->erasesize) - 1;
4821 	if (chip->chipsize & 0xffffffff)
4822 		chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
4823 	else {
4824 		chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
4825 		chip->chip_shift += 32 - 1;
4826 	}
4827 
4828 	if (chip->chip_shift - chip->page_shift > 16)
4829 		chip->options |= NAND_ROW_ADDR_3;
4830 
4831 	chip->badblockbits = 8;
4832 
4833 	nand_legacy_adjust_cmdfunc(chip);
4834 
4835 	pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4836 		maf_id, dev_id);
4837 	pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4838 		chip->parameters.model);
4839 	pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4840 		(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4841 		mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4842 	return 0;
4843 
4844 free_detect_allocation:
4845 	kfree(chip->parameters.model);
4846 
4847 	return ret;
4848 }
4849 
4850 static const char * const nand_ecc_modes[] = {
4851 	[NAND_ECC_NONE]		= "none",
4852 	[NAND_ECC_SOFT]		= "soft",
4853 	[NAND_ECC_HW]		= "hw",
4854 	[NAND_ECC_HW_SYNDROME]	= "hw_syndrome",
4855 	[NAND_ECC_HW_OOB_FIRST]	= "hw_oob_first",
4856 	[NAND_ECC_ON_DIE]	= "on-die",
4857 };
4858 
4859 static int of_get_nand_ecc_mode(struct device_node *np)
4860 {
4861 	const char *pm;
4862 	int err, i;
4863 
4864 	err = of_property_read_string(np, "nand-ecc-mode", &pm);
4865 	if (err < 0)
4866 		return err;
4867 
4868 	for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4869 		if (!strcasecmp(pm, nand_ecc_modes[i]))
4870 			return i;
4871 
4872 	/*
4873 	 * For backward compatibility we support few obsoleted values that don't
4874 	 * have their mappings into nand_ecc_modes_t anymore (they were merged
4875 	 * with other enums).
4876 	 */
4877 	if (!strcasecmp(pm, "soft_bch"))
4878 		return NAND_ECC_SOFT;
4879 
4880 	return -ENODEV;
4881 }
4882 
4883 static const char * const nand_ecc_algos[] = {
4884 	[NAND_ECC_HAMMING]	= "hamming",
4885 	[NAND_ECC_BCH]		= "bch",
4886 	[NAND_ECC_RS]		= "rs",
4887 };
4888 
4889 static int of_get_nand_ecc_algo(struct device_node *np)
4890 {
4891 	const char *pm;
4892 	int err, i;
4893 
4894 	err = of_property_read_string(np, "nand-ecc-algo", &pm);
4895 	if (!err) {
4896 		for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4897 			if (!strcasecmp(pm, nand_ecc_algos[i]))
4898 				return i;
4899 		return -ENODEV;
4900 	}
4901 
4902 	/*
4903 	 * For backward compatibility we also read "nand-ecc-mode" checking
4904 	 * for some obsoleted values that were specifying ECC algorithm.
4905 	 */
4906 	err = of_property_read_string(np, "nand-ecc-mode", &pm);
4907 	if (err < 0)
4908 		return err;
4909 
4910 	if (!strcasecmp(pm, "soft"))
4911 		return NAND_ECC_HAMMING;
4912 	else if (!strcasecmp(pm, "soft_bch"))
4913 		return NAND_ECC_BCH;
4914 
4915 	return -ENODEV;
4916 }
4917 
4918 static int of_get_nand_ecc_step_size(struct device_node *np)
4919 {
4920 	int ret;
4921 	u32 val;
4922 
4923 	ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4924 	return ret ? ret : val;
4925 }
4926 
4927 static int of_get_nand_ecc_strength(struct device_node *np)
4928 {
4929 	int ret;
4930 	u32 val;
4931 
4932 	ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4933 	return ret ? ret : val;
4934 }
4935 
4936 static int of_get_nand_bus_width(struct device_node *np)
4937 {
4938 	u32 val;
4939 
4940 	if (of_property_read_u32(np, "nand-bus-width", &val))
4941 		return 8;
4942 
4943 	switch (val) {
4944 	case 8:
4945 	case 16:
4946 		return val;
4947 	default:
4948 		return -EIO;
4949 	}
4950 }
4951 
4952 static bool of_get_nand_on_flash_bbt(struct device_node *np)
4953 {
4954 	return of_property_read_bool(np, "nand-on-flash-bbt");
4955 }
4956 
4957 static int nand_dt_init(struct nand_chip *chip)
4958 {
4959 	struct device_node *dn = nand_get_flash_node(chip);
4960 	int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4961 
4962 	if (!dn)
4963 		return 0;
4964 
4965 	if (of_get_nand_bus_width(dn) == 16)
4966 		chip->options |= NAND_BUSWIDTH_16;
4967 
4968 	if (of_property_read_bool(dn, "nand-is-boot-medium"))
4969 		chip->options |= NAND_IS_BOOT_MEDIUM;
4970 
4971 	if (of_get_nand_on_flash_bbt(dn))
4972 		chip->bbt_options |= NAND_BBT_USE_FLASH;
4973 
4974 	ecc_mode = of_get_nand_ecc_mode(dn);
4975 	ecc_algo = of_get_nand_ecc_algo(dn);
4976 	ecc_strength = of_get_nand_ecc_strength(dn);
4977 	ecc_step = of_get_nand_ecc_step_size(dn);
4978 
4979 	if (ecc_mode >= 0)
4980 		chip->ecc.mode = ecc_mode;
4981 
4982 	if (ecc_algo >= 0)
4983 		chip->ecc.algo = ecc_algo;
4984 
4985 	if (ecc_strength >= 0)
4986 		chip->ecc.strength = ecc_strength;
4987 
4988 	if (ecc_step > 0)
4989 		chip->ecc.size = ecc_step;
4990 
4991 	if (of_property_read_bool(dn, "nand-ecc-maximize"))
4992 		chip->ecc.options |= NAND_ECC_MAXIMIZE;
4993 
4994 	return 0;
4995 }
4996 
4997 /**
4998  * nand_scan_ident - Scan for the NAND device
4999  * @chip: NAND chip object
5000  * @maxchips: number of chips to scan for
5001  * @table: alternative NAND ID table
5002  *
5003  * This is the first phase of the normal nand_scan() function. It reads the
5004  * flash ID and sets up MTD fields accordingly.
5005  *
5006  * This helper used to be called directly from controller drivers that needed
5007  * to tweak some ECC-related parameters before nand_scan_tail(). This separation
5008  * prevented dynamic allocations during this phase which was unconvenient and
5009  * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
5010  */
5011 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
5012 			   struct nand_flash_dev *table)
5013 {
5014 	struct mtd_info *mtd = nand_to_mtd(chip);
5015 	int nand_maf_id, nand_dev_id;
5016 	unsigned int i;
5017 	int ret;
5018 
5019 	/* Assume all dies are deselected when we enter nand_scan_ident(). */
5020 	chip->cur_cs = -1;
5021 
5022 	/* Enforce the right timings for reset/detection */
5023 	onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
5024 
5025 	ret = nand_dt_init(chip);
5026 	if (ret)
5027 		return ret;
5028 
5029 	if (!mtd->name && mtd->dev.parent)
5030 		mtd->name = dev_name(mtd->dev.parent);
5031 
5032 	/*
5033 	 * Start with chips->numchips = maxchips to let nand_select_target() do
5034 	 * its job. chip->numchips will be adjusted after.
5035 	 */
5036 	chip->numchips = maxchips;
5037 
5038 	/* Set the default functions */
5039 	nand_set_defaults(chip);
5040 
5041 	ret = nand_legacy_check_hooks(chip);
5042 	if (ret)
5043 		return ret;
5044 
5045 	/* Read the flash type */
5046 	ret = nand_detect(chip, table);
5047 	if (ret) {
5048 		if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5049 			pr_warn("No NAND device found\n");
5050 		nand_deselect_target(chip);
5051 		return ret;
5052 	}
5053 
5054 	nand_maf_id = chip->id.data[0];
5055 	nand_dev_id = chip->id.data[1];
5056 
5057 	nand_deselect_target(chip);
5058 
5059 	/* Check for a chip array */
5060 	for (i = 1; i < maxchips; i++) {
5061 		u8 id[2];
5062 
5063 		/* See comment in nand_get_flash_type for reset */
5064 		nand_reset(chip, i);
5065 
5066 		nand_select_target(chip, i);
5067 		/* Send the command for reading device ID */
5068 		nand_readid_op(chip, 0, id, sizeof(id));
5069 		/* Read manufacturer and device IDs */
5070 		if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5071 			nand_deselect_target(chip);
5072 			break;
5073 		}
5074 		nand_deselect_target(chip);
5075 	}
5076 	if (i > 1)
5077 		pr_info("%d chips detected\n", i);
5078 
5079 	/* Store the number of chips and calc total size for mtd */
5080 	chip->numchips = i;
5081 	mtd->size = i * chip->chipsize;
5082 
5083 	return 0;
5084 }
5085 
5086 static void nand_scan_ident_cleanup(struct nand_chip *chip)
5087 {
5088 	kfree(chip->parameters.model);
5089 	kfree(chip->parameters.onfi);
5090 }
5091 
5092 static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5093 {
5094 	struct mtd_info *mtd = nand_to_mtd(chip);
5095 	struct nand_ecc_ctrl *ecc = &chip->ecc;
5096 
5097 	if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
5098 		return -EINVAL;
5099 
5100 	switch (ecc->algo) {
5101 	case NAND_ECC_HAMMING:
5102 		ecc->calculate = nand_calculate_ecc;
5103 		ecc->correct = nand_correct_data;
5104 		ecc->read_page = nand_read_page_swecc;
5105 		ecc->read_subpage = nand_read_subpage;
5106 		ecc->write_page = nand_write_page_swecc;
5107 		ecc->read_page_raw = nand_read_page_raw;
5108 		ecc->write_page_raw = nand_write_page_raw;
5109 		ecc->read_oob = nand_read_oob_std;
5110 		ecc->write_oob = nand_write_oob_std;
5111 		if (!ecc->size)
5112 			ecc->size = 256;
5113 		ecc->bytes = 3;
5114 		ecc->strength = 1;
5115 
5116 		if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC))
5117 			ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5118 
5119 		return 0;
5120 	case NAND_ECC_BCH:
5121 		if (!mtd_nand_has_bch()) {
5122 			WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
5123 			return -EINVAL;
5124 		}
5125 		ecc->calculate = nand_bch_calculate_ecc;
5126 		ecc->correct = nand_bch_correct_data;
5127 		ecc->read_page = nand_read_page_swecc;
5128 		ecc->read_subpage = nand_read_subpage;
5129 		ecc->write_page = nand_write_page_swecc;
5130 		ecc->read_page_raw = nand_read_page_raw;
5131 		ecc->write_page_raw = nand_write_page_raw;
5132 		ecc->read_oob = nand_read_oob_std;
5133 		ecc->write_oob = nand_write_oob_std;
5134 
5135 		/*
5136 		* Board driver should supply ecc.size and ecc.strength
5137 		* values to select how many bits are correctable.
5138 		* Otherwise, default to 4 bits for large page devices.
5139 		*/
5140 		if (!ecc->size && (mtd->oobsize >= 64)) {
5141 			ecc->size = 512;
5142 			ecc->strength = 4;
5143 		}
5144 
5145 		/*
5146 		 * if no ecc placement scheme was provided pickup the default
5147 		 * large page one.
5148 		 */
5149 		if (!mtd->ooblayout) {
5150 			/* handle large page devices only */
5151 			if (mtd->oobsize < 64) {
5152 				WARN(1, "OOB layout is required when using software BCH on small pages\n");
5153 				return -EINVAL;
5154 			}
5155 
5156 			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
5157 
5158 		}
5159 
5160 		/*
5161 		 * We can only maximize ECC config when the default layout is
5162 		 * used, otherwise we don't know how many bytes can really be
5163 		 * used.
5164 		 */
5165 		if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
5166 		    ecc->options & NAND_ECC_MAXIMIZE) {
5167 			int steps, bytes;
5168 
5169 			/* Always prefer 1k blocks over 512bytes ones */
5170 			ecc->size = 1024;
5171 			steps = mtd->writesize / ecc->size;
5172 
5173 			/* Reserve 2 bytes for the BBM */
5174 			bytes = (mtd->oobsize - 2) / steps;
5175 			ecc->strength = bytes * 8 / fls(8 * ecc->size);
5176 		}
5177 
5178 		/* See nand_bch_init() for details. */
5179 		ecc->bytes = 0;
5180 		ecc->priv = nand_bch_init(mtd);
5181 		if (!ecc->priv) {
5182 			WARN(1, "BCH ECC initialization failed!\n");
5183 			return -EINVAL;
5184 		}
5185 		return 0;
5186 	default:
5187 		WARN(1, "Unsupported ECC algorithm!\n");
5188 		return -EINVAL;
5189 	}
5190 }
5191 
5192 /**
5193  * nand_check_ecc_caps - check the sanity of preset ECC settings
5194  * @chip: nand chip info structure
5195  * @caps: ECC caps info structure
5196  * @oobavail: OOB size that the ECC engine can use
5197  *
5198  * When ECC step size and strength are already set, check if they are supported
5199  * by the controller and the calculated ECC bytes fit within the chip's OOB.
5200  * On success, the calculated ECC bytes is set.
5201  */
5202 static int
5203 nand_check_ecc_caps(struct nand_chip *chip,
5204 		    const struct nand_ecc_caps *caps, int oobavail)
5205 {
5206 	struct mtd_info *mtd = nand_to_mtd(chip);
5207 	const struct nand_ecc_step_info *stepinfo;
5208 	int preset_step = chip->ecc.size;
5209 	int preset_strength = chip->ecc.strength;
5210 	int ecc_bytes, nsteps = mtd->writesize / preset_step;
5211 	int i, j;
5212 
5213 	for (i = 0; i < caps->nstepinfos; i++) {
5214 		stepinfo = &caps->stepinfos[i];
5215 
5216 		if (stepinfo->stepsize != preset_step)
5217 			continue;
5218 
5219 		for (j = 0; j < stepinfo->nstrengths; j++) {
5220 			if (stepinfo->strengths[j] != preset_strength)
5221 				continue;
5222 
5223 			ecc_bytes = caps->calc_ecc_bytes(preset_step,
5224 							 preset_strength);
5225 			if (WARN_ON_ONCE(ecc_bytes < 0))
5226 				return ecc_bytes;
5227 
5228 			if (ecc_bytes * nsteps > oobavail) {
5229 				pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5230 				       preset_step, preset_strength);
5231 				return -ENOSPC;
5232 			}
5233 
5234 			chip->ecc.bytes = ecc_bytes;
5235 
5236 			return 0;
5237 		}
5238 	}
5239 
5240 	pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5241 	       preset_step, preset_strength);
5242 
5243 	return -ENOTSUPP;
5244 }
5245 
5246 /**
5247  * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
5248  * @chip: nand chip info structure
5249  * @caps: ECC engine caps info structure
5250  * @oobavail: OOB size that the ECC engine can use
5251  *
5252  * If a chip's ECC requirement is provided, try to meet it with the least
5253  * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
5254  * On success, the chosen ECC settings are set.
5255  */
5256 static int
5257 nand_match_ecc_req(struct nand_chip *chip,
5258 		   const struct nand_ecc_caps *caps, int oobavail)
5259 {
5260 	struct mtd_info *mtd = nand_to_mtd(chip);
5261 	const struct nand_ecc_step_info *stepinfo;
5262 	int req_step = chip->ecc_step_ds;
5263 	int req_strength = chip->ecc_strength_ds;
5264 	int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5265 	int best_step, best_strength, best_ecc_bytes;
5266 	int best_ecc_bytes_total = INT_MAX;
5267 	int i, j;
5268 
5269 	/* No information provided by the NAND chip */
5270 	if (!req_step || !req_strength)
5271 		return -ENOTSUPP;
5272 
5273 	/* number of correctable bits the chip requires in a page */
5274 	req_corr = mtd->writesize / req_step * req_strength;
5275 
5276 	for (i = 0; i < caps->nstepinfos; i++) {
5277 		stepinfo = &caps->stepinfos[i];
5278 		step_size = stepinfo->stepsize;
5279 
5280 		for (j = 0; j < stepinfo->nstrengths; j++) {
5281 			strength = stepinfo->strengths[j];
5282 
5283 			/*
5284 			 * If both step size and strength are smaller than the
5285 			 * chip's requirement, it is not easy to compare the
5286 			 * resulted reliability.
5287 			 */
5288 			if (step_size < req_step && strength < req_strength)
5289 				continue;
5290 
5291 			if (mtd->writesize % step_size)
5292 				continue;
5293 
5294 			nsteps = mtd->writesize / step_size;
5295 
5296 			ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5297 			if (WARN_ON_ONCE(ecc_bytes < 0))
5298 				continue;
5299 			ecc_bytes_total = ecc_bytes * nsteps;
5300 
5301 			if (ecc_bytes_total > oobavail ||
5302 			    strength * nsteps < req_corr)
5303 				continue;
5304 
5305 			/*
5306 			 * We assume the best is to meet the chip's requrement
5307 			 * with the least number of ECC bytes.
5308 			 */
5309 			if (ecc_bytes_total < best_ecc_bytes_total) {
5310 				best_ecc_bytes_total = ecc_bytes_total;
5311 				best_step = step_size;
5312 				best_strength = strength;
5313 				best_ecc_bytes = ecc_bytes;
5314 			}
5315 		}
5316 	}
5317 
5318 	if (best_ecc_bytes_total == INT_MAX)
5319 		return -ENOTSUPP;
5320 
5321 	chip->ecc.size = best_step;
5322 	chip->ecc.strength = best_strength;
5323 	chip->ecc.bytes = best_ecc_bytes;
5324 
5325 	return 0;
5326 }
5327 
5328 /**
5329  * nand_maximize_ecc - choose the max ECC strength available
5330  * @chip: nand chip info structure
5331  * @caps: ECC engine caps info structure
5332  * @oobavail: OOB size that the ECC engine can use
5333  *
5334  * Choose the max ECC strength that is supported on the controller, and can fit
5335  * within the chip's OOB.  On success, the chosen ECC settings are set.
5336  */
5337 static int
5338 nand_maximize_ecc(struct nand_chip *chip,
5339 		  const struct nand_ecc_caps *caps, int oobavail)
5340 {
5341 	struct mtd_info *mtd = nand_to_mtd(chip);
5342 	const struct nand_ecc_step_info *stepinfo;
5343 	int step_size, strength, nsteps, ecc_bytes, corr;
5344 	int best_corr = 0;
5345 	int best_step = 0;
5346 	int best_strength, best_ecc_bytes;
5347 	int i, j;
5348 
5349 	for (i = 0; i < caps->nstepinfos; i++) {
5350 		stepinfo = &caps->stepinfos[i];
5351 		step_size = stepinfo->stepsize;
5352 
5353 		/* If chip->ecc.size is already set, respect it */
5354 		if (chip->ecc.size && step_size != chip->ecc.size)
5355 			continue;
5356 
5357 		for (j = 0; j < stepinfo->nstrengths; j++) {
5358 			strength = stepinfo->strengths[j];
5359 
5360 			if (mtd->writesize % step_size)
5361 				continue;
5362 
5363 			nsteps = mtd->writesize / step_size;
5364 
5365 			ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5366 			if (WARN_ON_ONCE(ecc_bytes < 0))
5367 				continue;
5368 
5369 			if (ecc_bytes * nsteps > oobavail)
5370 				continue;
5371 
5372 			corr = strength * nsteps;
5373 
5374 			/*
5375 			 * If the number of correctable bits is the same,
5376 			 * bigger step_size has more reliability.
5377 			 */
5378 			if (corr > best_corr ||
5379 			    (corr == best_corr && step_size > best_step)) {
5380 				best_corr = corr;
5381 				best_step = step_size;
5382 				best_strength = strength;
5383 				best_ecc_bytes = ecc_bytes;
5384 			}
5385 		}
5386 	}
5387 
5388 	if (!best_corr)
5389 		return -ENOTSUPP;
5390 
5391 	chip->ecc.size = best_step;
5392 	chip->ecc.strength = best_strength;
5393 	chip->ecc.bytes = best_ecc_bytes;
5394 
5395 	return 0;
5396 }
5397 
5398 /**
5399  * nand_ecc_choose_conf - Set the ECC strength and ECC step size
5400  * @chip: nand chip info structure
5401  * @caps: ECC engine caps info structure
5402  * @oobavail: OOB size that the ECC engine can use
5403  *
5404  * Choose the ECC configuration according to following logic
5405  *
5406  * 1. If both ECC step size and ECC strength are already set (usually by DT)
5407  *    then check if it is supported by this controller.
5408  * 2. If NAND_ECC_MAXIMIZE is set, then select maximum ECC strength.
5409  * 3. Otherwise, try to match the ECC step size and ECC strength closest
5410  *    to the chip's requirement. If available OOB size can't fit the chip
5411  *    requirement then fallback to the maximum ECC step size and ECC strength.
5412  *
5413  * On success, the chosen ECC settings are set.
5414  */
5415 int nand_ecc_choose_conf(struct nand_chip *chip,
5416 			 const struct nand_ecc_caps *caps, int oobavail)
5417 {
5418 	struct mtd_info *mtd = nand_to_mtd(chip);
5419 
5420 	if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5421 		return -EINVAL;
5422 
5423 	if (chip->ecc.size && chip->ecc.strength)
5424 		return nand_check_ecc_caps(chip, caps, oobavail);
5425 
5426 	if (chip->ecc.options & NAND_ECC_MAXIMIZE)
5427 		return nand_maximize_ecc(chip, caps, oobavail);
5428 
5429 	if (!nand_match_ecc_req(chip, caps, oobavail))
5430 		return 0;
5431 
5432 	return nand_maximize_ecc(chip, caps, oobavail);
5433 }
5434 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
5435 
5436 /*
5437  * Check if the chip configuration meet the datasheet requirements.
5438 
5439  * If our configuration corrects A bits per B bytes and the minimum
5440  * required correction level is X bits per Y bytes, then we must ensure
5441  * both of the following are true:
5442  *
5443  * (1) A / B >= X / Y
5444  * (2) A >= X
5445  *
5446  * Requirement (1) ensures we can correct for the required bitflip density.
5447  * Requirement (2) ensures we can correct even when all bitflips are clumped
5448  * in the same sector.
5449  */
5450 static bool nand_ecc_strength_good(struct nand_chip *chip)
5451 {
5452 	struct mtd_info *mtd = nand_to_mtd(chip);
5453 	struct nand_ecc_ctrl *ecc = &chip->ecc;
5454 	int corr, ds_corr;
5455 
5456 	if (ecc->size == 0 || chip->ecc_step_ds == 0)
5457 		/* Not enough information */
5458 		return true;
5459 
5460 	/*
5461 	 * We get the number of corrected bits per page to compare
5462 	 * the correction density.
5463 	 */
5464 	corr = (mtd->writesize * ecc->strength) / ecc->size;
5465 	ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
5466 
5467 	return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
5468 }
5469 
5470 /**
5471  * nand_scan_tail - Scan for the NAND device
5472  * @chip: NAND chip object
5473  *
5474  * This is the second phase of the normal nand_scan() function. It fills out
5475  * all the uninitialized function pointers with the defaults and scans for a
5476  * bad block table if appropriate.
5477  */
5478 static int nand_scan_tail(struct nand_chip *chip)
5479 {
5480 	struct mtd_info *mtd = nand_to_mtd(chip);
5481 	struct nand_ecc_ctrl *ecc = &chip->ecc;
5482 	int ret, i;
5483 
5484 	/* New bad blocks should be marked in OOB, flash-based BBT, or both */
5485 	if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
5486 		   !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
5487 		return -EINVAL;
5488 	}
5489 
5490 	chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
5491 	if (!chip->data_buf)
5492 		return -ENOMEM;
5493 
5494 	/*
5495 	 * FIXME: some NAND manufacturer drivers expect the first die to be
5496 	 * selected when manufacturer->init() is called. They should be fixed
5497 	 * to explictly select the relevant die when interacting with the NAND
5498 	 * chip.
5499 	 */
5500 	nand_select_target(chip, 0);
5501 	ret = nand_manufacturer_init(chip);
5502 	nand_deselect_target(chip);
5503 	if (ret)
5504 		goto err_free_buf;
5505 
5506 	/* Set the internal oob buffer location, just after the page data */
5507 	chip->oob_poi = chip->data_buf + mtd->writesize;
5508 
5509 	/*
5510 	 * If no default placement scheme is given, select an appropriate one.
5511 	 */
5512 	if (!mtd->ooblayout &&
5513 	    !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
5514 		switch (mtd->oobsize) {
5515 		case 8:
5516 		case 16:
5517 			mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
5518 			break;
5519 		case 64:
5520 		case 128:
5521 			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
5522 			break;
5523 		default:
5524 			/*
5525 			 * Expose the whole OOB area to users if ECC_NONE
5526 			 * is passed. We could do that for all kind of
5527 			 * ->oobsize, but we must keep the old large/small
5528 			 * page with ECC layout when ->oobsize <= 128 for
5529 			 * compatibility reasons.
5530 			 */
5531 			if (ecc->mode == NAND_ECC_NONE) {
5532 				mtd_set_ooblayout(mtd,
5533 						&nand_ooblayout_lp_ops);
5534 				break;
5535 			}
5536 
5537 			WARN(1, "No oob scheme defined for oobsize %d\n",
5538 				mtd->oobsize);
5539 			ret = -EINVAL;
5540 			goto err_nand_manuf_cleanup;
5541 		}
5542 	}
5543 
5544 	/*
5545 	 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
5546 	 * selected and we have 256 byte pagesize fallback to software ECC
5547 	 */
5548 
5549 	switch (ecc->mode) {
5550 	case NAND_ECC_HW_OOB_FIRST:
5551 		/* Similar to NAND_ECC_HW, but a separate read_page handle */
5552 		if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
5553 			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5554 			ret = -EINVAL;
5555 			goto err_nand_manuf_cleanup;
5556 		}
5557 		if (!ecc->read_page)
5558 			ecc->read_page = nand_read_page_hwecc_oob_first;
5559 
5560 	case NAND_ECC_HW:
5561 		/* Use standard hwecc read page function? */
5562 		if (!ecc->read_page)
5563 			ecc->read_page = nand_read_page_hwecc;
5564 		if (!ecc->write_page)
5565 			ecc->write_page = nand_write_page_hwecc;
5566 		if (!ecc->read_page_raw)
5567 			ecc->read_page_raw = nand_read_page_raw;
5568 		if (!ecc->write_page_raw)
5569 			ecc->write_page_raw = nand_write_page_raw;
5570 		if (!ecc->read_oob)
5571 			ecc->read_oob = nand_read_oob_std;
5572 		if (!ecc->write_oob)
5573 			ecc->write_oob = nand_write_oob_std;
5574 		if (!ecc->read_subpage)
5575 			ecc->read_subpage = nand_read_subpage;
5576 		if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5577 			ecc->write_subpage = nand_write_subpage_hwecc;
5578 
5579 	case NAND_ECC_HW_SYNDROME:
5580 		if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5581 		    (!ecc->read_page ||
5582 		     ecc->read_page == nand_read_page_hwecc ||
5583 		     !ecc->write_page ||
5584 		     ecc->write_page == nand_write_page_hwecc)) {
5585 			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5586 			ret = -EINVAL;
5587 			goto err_nand_manuf_cleanup;
5588 		}
5589 		/* Use standard syndrome read/write page function? */
5590 		if (!ecc->read_page)
5591 			ecc->read_page = nand_read_page_syndrome;
5592 		if (!ecc->write_page)
5593 			ecc->write_page = nand_write_page_syndrome;
5594 		if (!ecc->read_page_raw)
5595 			ecc->read_page_raw = nand_read_page_raw_syndrome;
5596 		if (!ecc->write_page_raw)
5597 			ecc->write_page_raw = nand_write_page_raw_syndrome;
5598 		if (!ecc->read_oob)
5599 			ecc->read_oob = nand_read_oob_syndrome;
5600 		if (!ecc->write_oob)
5601 			ecc->write_oob = nand_write_oob_syndrome;
5602 
5603 		if (mtd->writesize >= ecc->size) {
5604 			if (!ecc->strength) {
5605 				WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
5606 				ret = -EINVAL;
5607 				goto err_nand_manuf_cleanup;
5608 			}
5609 			break;
5610 		}
5611 		pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
5612 			ecc->size, mtd->writesize);
5613 		ecc->mode = NAND_ECC_SOFT;
5614 		ecc->algo = NAND_ECC_HAMMING;
5615 
5616 	case NAND_ECC_SOFT:
5617 		ret = nand_set_ecc_soft_ops(chip);
5618 		if (ret) {
5619 			ret = -EINVAL;
5620 			goto err_nand_manuf_cleanup;
5621 		}
5622 		break;
5623 
5624 	case NAND_ECC_ON_DIE:
5625 		if (!ecc->read_page || !ecc->write_page) {
5626 			WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
5627 			ret = -EINVAL;
5628 			goto err_nand_manuf_cleanup;
5629 		}
5630 		if (!ecc->read_oob)
5631 			ecc->read_oob = nand_read_oob_std;
5632 		if (!ecc->write_oob)
5633 			ecc->write_oob = nand_write_oob_std;
5634 		break;
5635 
5636 	case NAND_ECC_NONE:
5637 		pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
5638 		ecc->read_page = nand_read_page_raw;
5639 		ecc->write_page = nand_write_page_raw;
5640 		ecc->read_oob = nand_read_oob_std;
5641 		ecc->read_page_raw = nand_read_page_raw;
5642 		ecc->write_page_raw = nand_write_page_raw;
5643 		ecc->write_oob = nand_write_oob_std;
5644 		ecc->size = mtd->writesize;
5645 		ecc->bytes = 0;
5646 		ecc->strength = 0;
5647 		break;
5648 
5649 	default:
5650 		WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
5651 		ret = -EINVAL;
5652 		goto err_nand_manuf_cleanup;
5653 	}
5654 
5655 	if (ecc->correct || ecc->calculate) {
5656 		ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5657 		ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5658 		if (!ecc->calc_buf || !ecc->code_buf) {
5659 			ret = -ENOMEM;
5660 			goto err_nand_manuf_cleanup;
5661 		}
5662 	}
5663 
5664 	/* For many systems, the standard OOB write also works for raw */
5665 	if (!ecc->read_oob_raw)
5666 		ecc->read_oob_raw = ecc->read_oob;
5667 	if (!ecc->write_oob_raw)
5668 		ecc->write_oob_raw = ecc->write_oob;
5669 
5670 	/* propagate ecc info to mtd_info */
5671 	mtd->ecc_strength = ecc->strength;
5672 	mtd->ecc_step_size = ecc->size;
5673 
5674 	/*
5675 	 * Set the number of read / write steps for one page depending on ECC
5676 	 * mode.
5677 	 */
5678 	ecc->steps = mtd->writesize / ecc->size;
5679 	if (ecc->steps * ecc->size != mtd->writesize) {
5680 		WARN(1, "Invalid ECC parameters\n");
5681 		ret = -EINVAL;
5682 		goto err_nand_manuf_cleanup;
5683 	}
5684 	ecc->total = ecc->steps * ecc->bytes;
5685 	if (ecc->total > mtd->oobsize) {
5686 		WARN(1, "Total number of ECC bytes exceeded oobsize\n");
5687 		ret = -EINVAL;
5688 		goto err_nand_manuf_cleanup;
5689 	}
5690 
5691 	/*
5692 	 * The number of bytes available for a client to place data into
5693 	 * the out of band area.
5694 	 */
5695 	ret = mtd_ooblayout_count_freebytes(mtd);
5696 	if (ret < 0)
5697 		ret = 0;
5698 
5699 	mtd->oobavail = ret;
5700 
5701 	/* ECC sanity check: warn if it's too weak */
5702 	if (!nand_ecc_strength_good(chip))
5703 		pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
5704 			mtd->name);
5705 
5706 	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
5707 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
5708 		switch (ecc->steps) {
5709 		case 2:
5710 			mtd->subpage_sft = 1;
5711 			break;
5712 		case 4:
5713 		case 8:
5714 		case 16:
5715 			mtd->subpage_sft = 2;
5716 			break;
5717 		}
5718 	}
5719 	chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
5720 
5721 	/* Initialize state */
5722 	chip->state = FL_READY;
5723 
5724 	/* Invalidate the pagebuffer reference */
5725 	chip->pagebuf = -1;
5726 
5727 	/* Large page NAND with SOFT_ECC should support subpage reads */
5728 	switch (ecc->mode) {
5729 	case NAND_ECC_SOFT:
5730 		if (chip->page_shift > 9)
5731 			chip->options |= NAND_SUBPAGE_READ;
5732 		break;
5733 
5734 	default:
5735 		break;
5736 	}
5737 
5738 	/* Fill in remaining MTD driver data */
5739 	mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
5740 	mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
5741 						MTD_CAP_NANDFLASH;
5742 	mtd->_erase = nand_erase;
5743 	mtd->_point = NULL;
5744 	mtd->_unpoint = NULL;
5745 	mtd->_panic_write = panic_nand_write;
5746 	mtd->_read_oob = nand_read_oob;
5747 	mtd->_write_oob = nand_write_oob;
5748 	mtd->_sync = nand_sync;
5749 	mtd->_lock = NULL;
5750 	mtd->_unlock = NULL;
5751 	mtd->_suspend = nand_suspend;
5752 	mtd->_resume = nand_resume;
5753 	mtd->_reboot = nand_shutdown;
5754 	mtd->_block_isreserved = nand_block_isreserved;
5755 	mtd->_block_isbad = nand_block_isbad;
5756 	mtd->_block_markbad = nand_block_markbad;
5757 	mtd->_max_bad_blocks = nand_max_bad_blocks;
5758 	mtd->writebufsize = mtd->writesize;
5759 
5760 	/*
5761 	 * Initialize bitflip_threshold to its default prior scan_bbt() call.
5762 	 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
5763 	 * properly set.
5764 	 */
5765 	if (!mtd->bitflip_threshold)
5766 		mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
5767 
5768 	/* Initialize the ->data_interface field. */
5769 	ret = nand_init_data_interface(chip);
5770 	if (ret)
5771 		goto err_nand_manuf_cleanup;
5772 
5773 	/* Enter fastest possible mode on all dies. */
5774 	for (i = 0; i < chip->numchips; i++) {
5775 		ret = nand_setup_data_interface(chip, i);
5776 		if (ret)
5777 			goto err_nand_manuf_cleanup;
5778 	}
5779 
5780 	/* Check, if we should skip the bad block table scan */
5781 	if (chip->options & NAND_SKIP_BBTSCAN)
5782 		return 0;
5783 
5784 	/* Build bad block table */
5785 	ret = nand_create_bbt(chip);
5786 	if (ret)
5787 		goto err_nand_manuf_cleanup;
5788 
5789 	return 0;
5790 
5791 
5792 err_nand_manuf_cleanup:
5793 	nand_manufacturer_cleanup(chip);
5794 
5795 err_free_buf:
5796 	kfree(chip->data_buf);
5797 	kfree(ecc->code_buf);
5798 	kfree(ecc->calc_buf);
5799 
5800 	return ret;
5801 }
5802 
5803 static int nand_attach(struct nand_chip *chip)
5804 {
5805 	if (chip->controller->ops && chip->controller->ops->attach_chip)
5806 		return chip->controller->ops->attach_chip(chip);
5807 
5808 	return 0;
5809 }
5810 
5811 static void nand_detach(struct nand_chip *chip)
5812 {
5813 	if (chip->controller->ops && chip->controller->ops->detach_chip)
5814 		chip->controller->ops->detach_chip(chip);
5815 }
5816 
5817 /**
5818  * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
5819  * @chip: NAND chip object
5820  * @maxchips: number of chips to scan for.
5821  * @ids: optional flash IDs table
5822  *
5823  * This fills out all the uninitialized function pointers with the defaults.
5824  * The flash ID is read and the mtd/chip structures are filled with the
5825  * appropriate values.
5826  */
5827 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
5828 		       struct nand_flash_dev *ids)
5829 {
5830 	int ret;
5831 
5832 	if (!maxchips)
5833 		return -EINVAL;
5834 
5835 	ret = nand_scan_ident(chip, maxchips, ids);
5836 	if (ret)
5837 		return ret;
5838 
5839 	ret = nand_attach(chip);
5840 	if (ret)
5841 		goto cleanup_ident;
5842 
5843 	ret = nand_scan_tail(chip);
5844 	if (ret)
5845 		goto detach_chip;
5846 
5847 	return 0;
5848 
5849 detach_chip:
5850 	nand_detach(chip);
5851 cleanup_ident:
5852 	nand_scan_ident_cleanup(chip);
5853 
5854 	return ret;
5855 }
5856 EXPORT_SYMBOL(nand_scan_with_ids);
5857 
5858 /**
5859  * nand_cleanup - [NAND Interface] Free resources held by the NAND device
5860  * @chip: NAND chip object
5861  */
5862 void nand_cleanup(struct nand_chip *chip)
5863 {
5864 	if (chip->ecc.mode == NAND_ECC_SOFT &&
5865 	    chip->ecc.algo == NAND_ECC_BCH)
5866 		nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
5867 
5868 	/* Free bad block table memory */
5869 	kfree(chip->bbt);
5870 	kfree(chip->data_buf);
5871 	kfree(chip->ecc.code_buf);
5872 	kfree(chip->ecc.calc_buf);
5873 
5874 	/* Free bad block descriptor memory */
5875 	if (chip->badblock_pattern && chip->badblock_pattern->options
5876 			& NAND_BBT_DYNAMICSTRUCT)
5877 		kfree(chip->badblock_pattern);
5878 
5879 	/* Free manufacturer priv data. */
5880 	nand_manufacturer_cleanup(chip);
5881 
5882 	/* Free controller specific allocations after chip identification */
5883 	nand_detach(chip);
5884 
5885 	/* Free identification phase allocations */
5886 	nand_scan_ident_cleanup(chip);
5887 }
5888 
5889 EXPORT_SYMBOL_GPL(nand_cleanup);
5890 
5891 /**
5892  * nand_release - [NAND Interface] Unregister the MTD device and free resources
5893  *		  held by the NAND device
5894  * @chip: NAND chip object
5895  */
5896 void nand_release(struct nand_chip *chip)
5897 {
5898 	mtd_device_unregister(nand_to_mtd(chip));
5899 	nand_cleanup(chip);
5900 }
5901 EXPORT_SYMBOL_GPL(nand_release);
5902 
5903 MODULE_LICENSE("GPL");
5904 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
5905 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
5906 MODULE_DESCRIPTION("Generic NAND flash driver code");
5907