xref: /openbmc/linux/drivers/mtd/nand/raw/nand_base.c (revision e33bbe69149b802c0c77bfb822685772f85388ca)
1 /*
2  *  Overview:
3  *   This is the generic MTD driver for NAND flash devices. It should be
4  *   capable of working with almost all NAND chips currently available.
5  *
6  *	Additional technical information is available on
7  *	http://www.linux-mtd.infradead.org/doc/nand.html
8  *
9  *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10  *		  2002-2006 Thomas Gleixner (tglx@linutronix.de)
11  *
12  *  Credits:
13  *	David Woodhouse for adding multichip support
14  *
15  *	Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16  *	rework for 2K page size chips
17  *
18  *  TODO:
19  *	Enable cached programming for 2k page size chips
20  *	Check, if mtd->ecctype should be set to MTD_ECC_HW
21  *	if we have HW ECC support.
22  *	BBT table is not serialized, has to be fixed
23  *
24  * This program is free software; you can redistribute it and/or modify
25  * it under the terms of the GNU General Public License version 2 as
26  * published by the Free Software Foundation.
27  *
28  */
29 
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/mm.h>
39 #include <linux/nmi.h>
40 #include <linux/types.h>
41 #include <linux/mtd/mtd.h>
42 #include <linux/mtd/rawnand.h>
43 #include <linux/mtd/nand_ecc.h>
44 #include <linux/mtd/nand_bch.h>
45 #include <linux/interrupt.h>
46 #include <linux/bitops.h>
47 #include <linux/io.h>
48 #include <linux/mtd/partitions.h>
49 #include <linux/of.h>
50 
51 static int nand_get_device(struct mtd_info *mtd, int new_state);
52 
53 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
54 			     struct mtd_oob_ops *ops);
55 
56 /* Define default oob placement schemes for large and small page devices */
57 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
58 				 struct mtd_oob_region *oobregion)
59 {
60 	struct nand_chip *chip = mtd_to_nand(mtd);
61 	struct nand_ecc_ctrl *ecc = &chip->ecc;
62 
63 	if (section > 1)
64 		return -ERANGE;
65 
66 	if (!section) {
67 		oobregion->offset = 0;
68 		if (mtd->oobsize == 16)
69 			oobregion->length = 4;
70 		else
71 			oobregion->length = 3;
72 	} else {
73 		if (mtd->oobsize == 8)
74 			return -ERANGE;
75 
76 		oobregion->offset = 6;
77 		oobregion->length = ecc->total - 4;
78 	}
79 
80 	return 0;
81 }
82 
83 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
84 				  struct mtd_oob_region *oobregion)
85 {
86 	if (section > 1)
87 		return -ERANGE;
88 
89 	if (mtd->oobsize == 16) {
90 		if (section)
91 			return -ERANGE;
92 
93 		oobregion->length = 8;
94 		oobregion->offset = 8;
95 	} else {
96 		oobregion->length = 2;
97 		if (!section)
98 			oobregion->offset = 3;
99 		else
100 			oobregion->offset = 6;
101 	}
102 
103 	return 0;
104 }
105 
106 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
107 	.ecc = nand_ooblayout_ecc_sp,
108 	.free = nand_ooblayout_free_sp,
109 };
110 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
111 
112 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
113 				 struct mtd_oob_region *oobregion)
114 {
115 	struct nand_chip *chip = mtd_to_nand(mtd);
116 	struct nand_ecc_ctrl *ecc = &chip->ecc;
117 
118 	if (section || !ecc->total)
119 		return -ERANGE;
120 
121 	oobregion->length = ecc->total;
122 	oobregion->offset = mtd->oobsize - oobregion->length;
123 
124 	return 0;
125 }
126 
127 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
128 				  struct mtd_oob_region *oobregion)
129 {
130 	struct nand_chip *chip = mtd_to_nand(mtd);
131 	struct nand_ecc_ctrl *ecc = &chip->ecc;
132 
133 	if (section)
134 		return -ERANGE;
135 
136 	oobregion->length = mtd->oobsize - ecc->total - 2;
137 	oobregion->offset = 2;
138 
139 	return 0;
140 }
141 
142 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
143 	.ecc = nand_ooblayout_ecc_lp,
144 	.free = nand_ooblayout_free_lp,
145 };
146 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
147 
148 /*
149  * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
150  * are placed at a fixed offset.
151  */
152 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
153 					 struct mtd_oob_region *oobregion)
154 {
155 	struct nand_chip *chip = mtd_to_nand(mtd);
156 	struct nand_ecc_ctrl *ecc = &chip->ecc;
157 
158 	if (section)
159 		return -ERANGE;
160 
161 	switch (mtd->oobsize) {
162 	case 64:
163 		oobregion->offset = 40;
164 		break;
165 	case 128:
166 		oobregion->offset = 80;
167 		break;
168 	default:
169 		return -EINVAL;
170 	}
171 
172 	oobregion->length = ecc->total;
173 	if (oobregion->offset + oobregion->length > mtd->oobsize)
174 		return -ERANGE;
175 
176 	return 0;
177 }
178 
179 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
180 					  struct mtd_oob_region *oobregion)
181 {
182 	struct nand_chip *chip = mtd_to_nand(mtd);
183 	struct nand_ecc_ctrl *ecc = &chip->ecc;
184 	int ecc_offset = 0;
185 
186 	if (section < 0 || section > 1)
187 		return -ERANGE;
188 
189 	switch (mtd->oobsize) {
190 	case 64:
191 		ecc_offset = 40;
192 		break;
193 	case 128:
194 		ecc_offset = 80;
195 		break;
196 	default:
197 		return -EINVAL;
198 	}
199 
200 	if (section == 0) {
201 		oobregion->offset = 2;
202 		oobregion->length = ecc_offset - 2;
203 	} else {
204 		oobregion->offset = ecc_offset + ecc->total;
205 		oobregion->length = mtd->oobsize - oobregion->offset;
206 	}
207 
208 	return 0;
209 }
210 
211 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
212 	.ecc = nand_ooblayout_ecc_lp_hamming,
213 	.free = nand_ooblayout_free_lp_hamming,
214 };
215 
216 static int check_offs_len(struct mtd_info *mtd,
217 					loff_t ofs, uint64_t len)
218 {
219 	struct nand_chip *chip = mtd_to_nand(mtd);
220 	int ret = 0;
221 
222 	/* Start address must align on block boundary */
223 	if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
224 		pr_debug("%s: unaligned address\n", __func__);
225 		ret = -EINVAL;
226 	}
227 
228 	/* Length must align on block boundary */
229 	if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
230 		pr_debug("%s: length not block aligned\n", __func__);
231 		ret = -EINVAL;
232 	}
233 
234 	return ret;
235 }
236 
237 /**
238  * nand_release_device - [GENERIC] release chip
239  * @mtd: MTD device structure
240  *
241  * Release chip lock and wake up anyone waiting on the device.
242  */
243 static void nand_release_device(struct mtd_info *mtd)
244 {
245 	struct nand_chip *chip = mtd_to_nand(mtd);
246 
247 	/* Release the controller and the chip */
248 	spin_lock(&chip->controller->lock);
249 	chip->controller->active = NULL;
250 	chip->state = FL_READY;
251 	wake_up(&chip->controller->wq);
252 	spin_unlock(&chip->controller->lock);
253 }
254 
255 /**
256  * nand_read_byte - [DEFAULT] read one byte from the chip
257  * @mtd: MTD device structure
258  *
259  * Default read function for 8bit buswidth
260  */
261 static uint8_t nand_read_byte(struct mtd_info *mtd)
262 {
263 	struct nand_chip *chip = mtd_to_nand(mtd);
264 	return readb(chip->IO_ADDR_R);
265 }
266 
267 /**
268  * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
269  * @mtd: MTD device structure
270  *
271  * Default read function for 16bit buswidth with endianness conversion.
272  *
273  */
274 static uint8_t nand_read_byte16(struct mtd_info *mtd)
275 {
276 	struct nand_chip *chip = mtd_to_nand(mtd);
277 	return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
278 }
279 
280 /**
281  * nand_read_word - [DEFAULT] read one word from the chip
282  * @mtd: MTD device structure
283  *
284  * Default read function for 16bit buswidth without endianness conversion.
285  */
286 static u16 nand_read_word(struct mtd_info *mtd)
287 {
288 	struct nand_chip *chip = mtd_to_nand(mtd);
289 	return readw(chip->IO_ADDR_R);
290 }
291 
292 /**
293  * nand_select_chip - [DEFAULT] control CE line
294  * @mtd: MTD device structure
295  * @chipnr: chipnumber to select, -1 for deselect
296  *
297  * Default select function for 1 chip devices.
298  */
299 static void nand_select_chip(struct mtd_info *mtd, int chipnr)
300 {
301 	struct nand_chip *chip = mtd_to_nand(mtd);
302 
303 	switch (chipnr) {
304 	case -1:
305 		chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
306 		break;
307 	case 0:
308 		break;
309 
310 	default:
311 		BUG();
312 	}
313 }
314 
315 /**
316  * nand_write_byte - [DEFAULT] write single byte to chip
317  * @mtd: MTD device structure
318  * @byte: value to write
319  *
320  * Default function to write a byte to I/O[7:0]
321  */
322 static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
323 {
324 	struct nand_chip *chip = mtd_to_nand(mtd);
325 
326 	chip->write_buf(mtd, &byte, 1);
327 }
328 
329 /**
330  * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
331  * @mtd: MTD device structure
332  * @byte: value to write
333  *
334  * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
335  */
336 static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
337 {
338 	struct nand_chip *chip = mtd_to_nand(mtd);
339 	uint16_t word = byte;
340 
341 	/*
342 	 * It's not entirely clear what should happen to I/O[15:8] when writing
343 	 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
344 	 *
345 	 *    When the host supports a 16-bit bus width, only data is
346 	 *    transferred at the 16-bit width. All address and command line
347 	 *    transfers shall use only the lower 8-bits of the data bus. During
348 	 *    command transfers, the host may place any value on the upper
349 	 *    8-bits of the data bus. During address transfers, the host shall
350 	 *    set the upper 8-bits of the data bus to 00h.
351 	 *
352 	 * One user of the write_byte callback is nand_set_features. The
353 	 * four parameters are specified to be written to I/O[7:0], but this is
354 	 * neither an address nor a command transfer. Let's assume a 0 on the
355 	 * upper I/O lines is OK.
356 	 */
357 	chip->write_buf(mtd, (uint8_t *)&word, 2);
358 }
359 
360 /**
361  * nand_write_buf - [DEFAULT] write buffer to chip
362  * @mtd: MTD device structure
363  * @buf: data buffer
364  * @len: number of bytes to write
365  *
366  * Default write function for 8bit buswidth.
367  */
368 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
369 {
370 	struct nand_chip *chip = mtd_to_nand(mtd);
371 
372 	iowrite8_rep(chip->IO_ADDR_W, buf, len);
373 }
374 
375 /**
376  * nand_read_buf - [DEFAULT] read chip data into buffer
377  * @mtd: MTD device structure
378  * @buf: buffer to store date
379  * @len: number of bytes to read
380  *
381  * Default read function for 8bit buswidth.
382  */
383 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
384 {
385 	struct nand_chip *chip = mtd_to_nand(mtd);
386 
387 	ioread8_rep(chip->IO_ADDR_R, buf, len);
388 }
389 
390 /**
391  * nand_write_buf16 - [DEFAULT] write buffer to chip
392  * @mtd: MTD device structure
393  * @buf: data buffer
394  * @len: number of bytes to write
395  *
396  * Default write function for 16bit buswidth.
397  */
398 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
399 {
400 	struct nand_chip *chip = mtd_to_nand(mtd);
401 	u16 *p = (u16 *) buf;
402 
403 	iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
404 }
405 
406 /**
407  * nand_read_buf16 - [DEFAULT] read chip data into buffer
408  * @mtd: MTD device structure
409  * @buf: buffer to store date
410  * @len: number of bytes to read
411  *
412  * Default read function for 16bit buswidth.
413  */
414 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
415 {
416 	struct nand_chip *chip = mtd_to_nand(mtd);
417 	u16 *p = (u16 *) buf;
418 
419 	ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
420 }
421 
422 /**
423  * nand_block_bad - [DEFAULT] Read bad block marker from the chip
424  * @mtd: MTD device structure
425  * @ofs: offset from device start
426  *
427  * Check, if the block is bad.
428  */
429 static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
430 {
431 	int page, page_end, res;
432 	struct nand_chip *chip = mtd_to_nand(mtd);
433 	u8 bad;
434 
435 	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
436 		ofs += mtd->erasesize - mtd->writesize;
437 
438 	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
439 	page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
440 
441 	for (; page < page_end; page++) {
442 		res = chip->ecc.read_oob(mtd, chip, page);
443 		if (res)
444 			return res;
445 
446 		bad = chip->oob_poi[chip->badblockpos];
447 
448 		if (likely(chip->badblockbits == 8))
449 			res = bad != 0xFF;
450 		else
451 			res = hweight8(bad) < chip->badblockbits;
452 		if (res)
453 			return res;
454 	}
455 
456 	return 0;
457 }
458 
459 /**
460  * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
461  * @mtd: MTD device structure
462  * @ofs: offset from device start
463  *
464  * This is the default implementation, which can be overridden by a hardware
465  * specific driver. It provides the details for writing a bad block marker to a
466  * block.
467  */
468 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
469 {
470 	struct nand_chip *chip = mtd_to_nand(mtd);
471 	struct mtd_oob_ops ops;
472 	uint8_t buf[2] = { 0, 0 };
473 	int ret = 0, res, i = 0;
474 
475 	memset(&ops, 0, sizeof(ops));
476 	ops.oobbuf = buf;
477 	ops.ooboffs = chip->badblockpos;
478 	if (chip->options & NAND_BUSWIDTH_16) {
479 		ops.ooboffs &= ~0x01;
480 		ops.len = ops.ooblen = 2;
481 	} else {
482 		ops.len = ops.ooblen = 1;
483 	}
484 	ops.mode = MTD_OPS_PLACE_OOB;
485 
486 	/* Write to first/last page(s) if necessary */
487 	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
488 		ofs += mtd->erasesize - mtd->writesize;
489 	do {
490 		res = nand_do_write_oob(mtd, ofs, &ops);
491 		if (!ret)
492 			ret = res;
493 
494 		i++;
495 		ofs += mtd->writesize;
496 	} while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
497 
498 	return ret;
499 }
500 
501 /**
502  * nand_block_markbad_lowlevel - mark a block bad
503  * @mtd: MTD device structure
504  * @ofs: offset from device start
505  *
506  * This function performs the generic NAND bad block marking steps (i.e., bad
507  * block table(s) and/or marker(s)). We only allow the hardware driver to
508  * specify how to write bad block markers to OOB (chip->block_markbad).
509  *
510  * We try operations in the following order:
511  *
512  *  (1) erase the affected block, to allow OOB marker to be written cleanly
513  *  (2) write bad block marker to OOB area of affected block (unless flag
514  *      NAND_BBT_NO_OOB_BBM is present)
515  *  (3) update the BBT
516  *
517  * Note that we retain the first error encountered in (2) or (3), finish the
518  * procedures, and dump the error in the end.
519 */
520 static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
521 {
522 	struct nand_chip *chip = mtd_to_nand(mtd);
523 	int res, ret = 0;
524 
525 	if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
526 		struct erase_info einfo;
527 
528 		/* Attempt erase before marking OOB */
529 		memset(&einfo, 0, sizeof(einfo));
530 		einfo.addr = ofs;
531 		einfo.len = 1ULL << chip->phys_erase_shift;
532 		nand_erase_nand(mtd, &einfo, 0);
533 
534 		/* Write bad block marker to OOB */
535 		nand_get_device(mtd, FL_WRITING);
536 		ret = chip->block_markbad(mtd, ofs);
537 		nand_release_device(mtd);
538 	}
539 
540 	/* Mark block bad in BBT */
541 	if (chip->bbt) {
542 		res = nand_markbad_bbt(mtd, ofs);
543 		if (!ret)
544 			ret = res;
545 	}
546 
547 	if (!ret)
548 		mtd->ecc_stats.badblocks++;
549 
550 	return ret;
551 }
552 
553 /**
554  * nand_check_wp - [GENERIC] check if the chip is write protected
555  * @mtd: MTD device structure
556  *
557  * Check, if the device is write protected. The function expects, that the
558  * device is already selected.
559  */
560 static int nand_check_wp(struct mtd_info *mtd)
561 {
562 	struct nand_chip *chip = mtd_to_nand(mtd);
563 	u8 status;
564 	int ret;
565 
566 	/* Broken xD cards report WP despite being writable */
567 	if (chip->options & NAND_BROKEN_XD)
568 		return 0;
569 
570 	/* Check the WP bit */
571 	ret = nand_status_op(chip, &status);
572 	if (ret)
573 		return ret;
574 
575 	return status & NAND_STATUS_WP ? 0 : 1;
576 }
577 
578 /**
579  * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
580  * @mtd: MTD device structure
581  * @ofs: offset from device start
582  *
583  * Check if the block is marked as reserved.
584  */
585 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
586 {
587 	struct nand_chip *chip = mtd_to_nand(mtd);
588 
589 	if (!chip->bbt)
590 		return 0;
591 	/* Return info from the table */
592 	return nand_isreserved_bbt(mtd, ofs);
593 }
594 
595 /**
596  * nand_block_checkbad - [GENERIC] Check if a block is marked bad
597  * @mtd: MTD device structure
598  * @ofs: offset from device start
599  * @allowbbt: 1, if its allowed to access the bbt area
600  *
601  * Check, if the block is bad. Either by reading the bad block table or
602  * calling of the scan function.
603  */
604 static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
605 {
606 	struct nand_chip *chip = mtd_to_nand(mtd);
607 
608 	if (!chip->bbt)
609 		return chip->block_bad(mtd, ofs);
610 
611 	/* Return info from the table */
612 	return nand_isbad_bbt(mtd, ofs, allowbbt);
613 }
614 
615 /**
616  * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
617  * @mtd: MTD device structure
618  * @timeo: Timeout
619  *
620  * Helper function for nand_wait_ready used when needing to wait in interrupt
621  * context.
622  */
623 static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
624 {
625 	struct nand_chip *chip = mtd_to_nand(mtd);
626 	int i;
627 
628 	/* Wait for the device to get ready */
629 	for (i = 0; i < timeo; i++) {
630 		if (chip->dev_ready(mtd))
631 			break;
632 		touch_softlockup_watchdog();
633 		mdelay(1);
634 	}
635 }
636 
637 /**
638  * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
639  * @mtd: MTD device structure
640  *
641  * Wait for the ready pin after a command, and warn if a timeout occurs.
642  */
643 void nand_wait_ready(struct mtd_info *mtd)
644 {
645 	struct nand_chip *chip = mtd_to_nand(mtd);
646 	unsigned long timeo = 400;
647 
648 	if (in_interrupt() || oops_in_progress)
649 		return panic_nand_wait_ready(mtd, timeo);
650 
651 	/* Wait until command is processed or timeout occurs */
652 	timeo = jiffies + msecs_to_jiffies(timeo);
653 	do {
654 		if (chip->dev_ready(mtd))
655 			return;
656 		cond_resched();
657 	} while (time_before(jiffies, timeo));
658 
659 	if (!chip->dev_ready(mtd))
660 		pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
661 }
662 EXPORT_SYMBOL_GPL(nand_wait_ready);
663 
664 /**
665  * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
666  * @mtd: MTD device structure
667  * @timeo: Timeout in ms
668  *
669  * Wait for status ready (i.e. command done) or timeout.
670  */
671 static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
672 {
673 	register struct nand_chip *chip = mtd_to_nand(mtd);
674 	int ret;
675 
676 	timeo = jiffies + msecs_to_jiffies(timeo);
677 	do {
678 		u8 status;
679 
680 		ret = nand_read_data_op(chip, &status, sizeof(status), true);
681 		if (ret)
682 			return;
683 
684 		if (status & NAND_STATUS_READY)
685 			break;
686 		touch_softlockup_watchdog();
687 	} while (time_before(jiffies, timeo));
688 };
689 
690 /**
691  * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
692  * @chip: NAND chip structure
693  * @timeout_ms: Timeout in ms
694  *
695  * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
696  * If that does not happen whitin the specified timeout, -ETIMEDOUT is
697  * returned.
698  *
699  * This helper is intended to be used when the controller does not have access
700  * to the NAND R/B pin.
701  *
702  * Be aware that calling this helper from an ->exec_op() implementation means
703  * ->exec_op() must be re-entrant.
704  *
705  * Return 0 if the NAND chip is ready, a negative error otherwise.
706  */
707 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
708 {
709 	u8 status = 0;
710 	int ret;
711 
712 	if (!chip->exec_op)
713 		return -ENOTSUPP;
714 
715 	ret = nand_status_op(chip, NULL);
716 	if (ret)
717 		return ret;
718 
719 	timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
720 	do {
721 		ret = nand_read_data_op(chip, &status, sizeof(status), true);
722 		if (ret)
723 			break;
724 
725 		if (status & NAND_STATUS_READY)
726 			break;
727 
728 		/*
729 		 * Typical lowest execution time for a tR on most NANDs is 10us,
730 		 * use this as polling delay before doing something smarter (ie.
731 		 * deriving a delay from the timeout value, timeout_ms/ratio).
732 		 */
733 		udelay(10);
734 	} while	(time_before(jiffies, timeout_ms));
735 
736 	/*
737 	 * We have to exit READ_STATUS mode in order to read real data on the
738 	 * bus in case the WAITRDY instruction is preceding a DATA_IN
739 	 * instruction.
740 	 */
741 	nand_exit_status_op(chip);
742 
743 	if (ret)
744 		return ret;
745 
746 	return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
747 };
748 EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
749 
750 /**
751  * nand_command - [DEFAULT] Send command to NAND device
752  * @mtd: MTD device structure
753  * @command: the command to be sent
754  * @column: the column address for this command, -1 if none
755  * @page_addr: the page address for this command, -1 if none
756  *
757  * Send command to NAND device. This function is used for small page devices
758  * (512 Bytes per page).
759  */
760 static void nand_command(struct mtd_info *mtd, unsigned int command,
761 			 int column, int page_addr)
762 {
763 	register struct nand_chip *chip = mtd_to_nand(mtd);
764 	int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
765 
766 	/* Write out the command to the device */
767 	if (command == NAND_CMD_SEQIN) {
768 		int readcmd;
769 
770 		if (column >= mtd->writesize) {
771 			/* OOB area */
772 			column -= mtd->writesize;
773 			readcmd = NAND_CMD_READOOB;
774 		} else if (column < 256) {
775 			/* First 256 bytes --> READ0 */
776 			readcmd = NAND_CMD_READ0;
777 		} else {
778 			column -= 256;
779 			readcmd = NAND_CMD_READ1;
780 		}
781 		chip->cmd_ctrl(mtd, readcmd, ctrl);
782 		ctrl &= ~NAND_CTRL_CHANGE;
783 	}
784 	if (command != NAND_CMD_NONE)
785 		chip->cmd_ctrl(mtd, command, ctrl);
786 
787 	/* Address cycle, when necessary */
788 	ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
789 	/* Serially input address */
790 	if (column != -1) {
791 		/* Adjust columns for 16 bit buswidth */
792 		if (chip->options & NAND_BUSWIDTH_16 &&
793 				!nand_opcode_8bits(command))
794 			column >>= 1;
795 		chip->cmd_ctrl(mtd, column, ctrl);
796 		ctrl &= ~NAND_CTRL_CHANGE;
797 	}
798 	if (page_addr != -1) {
799 		chip->cmd_ctrl(mtd, page_addr, ctrl);
800 		ctrl &= ~NAND_CTRL_CHANGE;
801 		chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
802 		if (chip->options & NAND_ROW_ADDR_3)
803 			chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
804 	}
805 	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
806 
807 	/*
808 	 * Program and erase have their own busy handlers status and sequential
809 	 * in needs no delay
810 	 */
811 	switch (command) {
812 
813 	case NAND_CMD_NONE:
814 	case NAND_CMD_PAGEPROG:
815 	case NAND_CMD_ERASE1:
816 	case NAND_CMD_ERASE2:
817 	case NAND_CMD_SEQIN:
818 	case NAND_CMD_STATUS:
819 	case NAND_CMD_READID:
820 	case NAND_CMD_SET_FEATURES:
821 		return;
822 
823 	case NAND_CMD_RESET:
824 		if (chip->dev_ready)
825 			break;
826 		udelay(chip->chip_delay);
827 		chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
828 			       NAND_CTRL_CLE | NAND_CTRL_CHANGE);
829 		chip->cmd_ctrl(mtd,
830 			       NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
831 		/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
832 		nand_wait_status_ready(mtd, 250);
833 		return;
834 
835 		/* This applies to read commands */
836 	case NAND_CMD_READ0:
837 		/*
838 		 * READ0 is sometimes used to exit GET STATUS mode. When this
839 		 * is the case no address cycles are requested, and we can use
840 		 * this information to detect that we should not wait for the
841 		 * device to be ready.
842 		 */
843 		if (column == -1 && page_addr == -1)
844 			return;
845 
846 	default:
847 		/*
848 		 * If we don't have access to the busy pin, we apply the given
849 		 * command delay
850 		 */
851 		if (!chip->dev_ready) {
852 			udelay(chip->chip_delay);
853 			return;
854 		}
855 	}
856 	/*
857 	 * Apply this short delay always to ensure that we do wait tWB in
858 	 * any case on any machine.
859 	 */
860 	ndelay(100);
861 
862 	nand_wait_ready(mtd);
863 }
864 
865 static void nand_ccs_delay(struct nand_chip *chip)
866 {
867 	/*
868 	 * The controller already takes care of waiting for tCCS when the RNDIN
869 	 * or RNDOUT command is sent, return directly.
870 	 */
871 	if (!(chip->options & NAND_WAIT_TCCS))
872 		return;
873 
874 	/*
875 	 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
876 	 * (which should be safe for all NANDs).
877 	 */
878 	if (chip->setup_data_interface)
879 		ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000);
880 	else
881 		ndelay(500);
882 }
883 
884 /**
885  * nand_command_lp - [DEFAULT] Send command to NAND large page device
886  * @mtd: MTD device structure
887  * @command: the command to be sent
888  * @column: the column address for this command, -1 if none
889  * @page_addr: the page address for this command, -1 if none
890  *
891  * Send command to NAND device. This is the version for the new large page
892  * devices. We don't have the separate regions as we have in the small page
893  * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
894  */
895 static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
896 			    int column, int page_addr)
897 {
898 	register struct nand_chip *chip = mtd_to_nand(mtd);
899 
900 	/* Emulate NAND_CMD_READOOB */
901 	if (command == NAND_CMD_READOOB) {
902 		column += mtd->writesize;
903 		command = NAND_CMD_READ0;
904 	}
905 
906 	/* Command latch cycle */
907 	if (command != NAND_CMD_NONE)
908 		chip->cmd_ctrl(mtd, command,
909 			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
910 
911 	if (column != -1 || page_addr != -1) {
912 		int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
913 
914 		/* Serially input address */
915 		if (column != -1) {
916 			/* Adjust columns for 16 bit buswidth */
917 			if (chip->options & NAND_BUSWIDTH_16 &&
918 					!nand_opcode_8bits(command))
919 				column >>= 1;
920 			chip->cmd_ctrl(mtd, column, ctrl);
921 			ctrl &= ~NAND_CTRL_CHANGE;
922 
923 			/* Only output a single addr cycle for 8bits opcodes. */
924 			if (!nand_opcode_8bits(command))
925 				chip->cmd_ctrl(mtd, column >> 8, ctrl);
926 		}
927 		if (page_addr != -1) {
928 			chip->cmd_ctrl(mtd, page_addr, ctrl);
929 			chip->cmd_ctrl(mtd, page_addr >> 8,
930 				       NAND_NCE | NAND_ALE);
931 			if (chip->options & NAND_ROW_ADDR_3)
932 				chip->cmd_ctrl(mtd, page_addr >> 16,
933 					       NAND_NCE | NAND_ALE);
934 		}
935 	}
936 	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
937 
938 	/*
939 	 * Program and erase have their own busy handlers status, sequential
940 	 * in and status need no delay.
941 	 */
942 	switch (command) {
943 
944 	case NAND_CMD_NONE:
945 	case NAND_CMD_CACHEDPROG:
946 	case NAND_CMD_PAGEPROG:
947 	case NAND_CMD_ERASE1:
948 	case NAND_CMD_ERASE2:
949 	case NAND_CMD_SEQIN:
950 	case NAND_CMD_STATUS:
951 	case NAND_CMD_READID:
952 	case NAND_CMD_SET_FEATURES:
953 		return;
954 
955 	case NAND_CMD_RNDIN:
956 		nand_ccs_delay(chip);
957 		return;
958 
959 	case NAND_CMD_RESET:
960 		if (chip->dev_ready)
961 			break;
962 		udelay(chip->chip_delay);
963 		chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
964 			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
965 		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
966 			       NAND_NCE | NAND_CTRL_CHANGE);
967 		/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
968 		nand_wait_status_ready(mtd, 250);
969 		return;
970 
971 	case NAND_CMD_RNDOUT:
972 		/* No ready / busy check necessary */
973 		chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
974 			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
975 		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
976 			       NAND_NCE | NAND_CTRL_CHANGE);
977 
978 		nand_ccs_delay(chip);
979 		return;
980 
981 	case NAND_CMD_READ0:
982 		/*
983 		 * READ0 is sometimes used to exit GET STATUS mode. When this
984 		 * is the case no address cycles are requested, and we can use
985 		 * this information to detect that READSTART should not be
986 		 * issued.
987 		 */
988 		if (column == -1 && page_addr == -1)
989 			return;
990 
991 		chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
992 			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
993 		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
994 			       NAND_NCE | NAND_CTRL_CHANGE);
995 
996 		/* This applies to read commands */
997 	default:
998 		/*
999 		 * If we don't have access to the busy pin, we apply the given
1000 		 * command delay.
1001 		 */
1002 		if (!chip->dev_ready) {
1003 			udelay(chip->chip_delay);
1004 			return;
1005 		}
1006 	}
1007 
1008 	/*
1009 	 * Apply this short delay always to ensure that we do wait tWB in
1010 	 * any case on any machine.
1011 	 */
1012 	ndelay(100);
1013 
1014 	nand_wait_ready(mtd);
1015 }
1016 
1017 /**
1018  * panic_nand_get_device - [GENERIC] Get chip for selected access
1019  * @chip: the nand chip descriptor
1020  * @mtd: MTD device structure
1021  * @new_state: the state which is requested
1022  *
1023  * Used when in panic, no locks are taken.
1024  */
1025 static void panic_nand_get_device(struct nand_chip *chip,
1026 		      struct mtd_info *mtd, int new_state)
1027 {
1028 	/* Hardware controller shared among independent devices */
1029 	chip->controller->active = chip;
1030 	chip->state = new_state;
1031 }
1032 
1033 /**
1034  * nand_get_device - [GENERIC] Get chip for selected access
1035  * @mtd: MTD device structure
1036  * @new_state: the state which is requested
1037  *
1038  * Get the device and lock it for exclusive access
1039  */
1040 static int
1041 nand_get_device(struct mtd_info *mtd, int new_state)
1042 {
1043 	struct nand_chip *chip = mtd_to_nand(mtd);
1044 	spinlock_t *lock = &chip->controller->lock;
1045 	wait_queue_head_t *wq = &chip->controller->wq;
1046 	DECLARE_WAITQUEUE(wait, current);
1047 retry:
1048 	spin_lock(lock);
1049 
1050 	/* Hardware controller shared among independent devices */
1051 	if (!chip->controller->active)
1052 		chip->controller->active = chip;
1053 
1054 	if (chip->controller->active == chip && chip->state == FL_READY) {
1055 		chip->state = new_state;
1056 		spin_unlock(lock);
1057 		return 0;
1058 	}
1059 	if (new_state == FL_PM_SUSPENDED) {
1060 		if (chip->controller->active->state == FL_PM_SUSPENDED) {
1061 			chip->state = FL_PM_SUSPENDED;
1062 			spin_unlock(lock);
1063 			return 0;
1064 		}
1065 	}
1066 	set_current_state(TASK_UNINTERRUPTIBLE);
1067 	add_wait_queue(wq, &wait);
1068 	spin_unlock(lock);
1069 	schedule();
1070 	remove_wait_queue(wq, &wait);
1071 	goto retry;
1072 }
1073 
1074 /**
1075  * panic_nand_wait - [GENERIC] wait until the command is done
1076  * @mtd: MTD device structure
1077  * @chip: NAND chip structure
1078  * @timeo: timeout
1079  *
1080  * Wait for command done. This is a helper function for nand_wait used when
1081  * we are in interrupt context. May happen when in panic and trying to write
1082  * an oops through mtdoops.
1083  */
1084 static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
1085 			    unsigned long timeo)
1086 {
1087 	int i;
1088 	for (i = 0; i < timeo; i++) {
1089 		if (chip->dev_ready) {
1090 			if (chip->dev_ready(mtd))
1091 				break;
1092 		} else {
1093 			int ret;
1094 			u8 status;
1095 
1096 			ret = nand_read_data_op(chip, &status, sizeof(status),
1097 						true);
1098 			if (ret)
1099 				return;
1100 
1101 			if (status & NAND_STATUS_READY)
1102 				break;
1103 		}
1104 		mdelay(1);
1105 	}
1106 }
1107 
1108 /**
1109  * nand_wait - [DEFAULT] wait until the command is done
1110  * @mtd: MTD device structure
1111  * @chip: NAND chip structure
1112  *
1113  * Wait for command done. This applies to erase and program only.
1114  */
1115 static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
1116 {
1117 
1118 	unsigned long timeo = 400;
1119 	u8 status;
1120 	int ret;
1121 
1122 	/*
1123 	 * Apply this short delay always to ensure that we do wait tWB in any
1124 	 * case on any machine.
1125 	 */
1126 	ndelay(100);
1127 
1128 	ret = nand_status_op(chip, NULL);
1129 	if (ret)
1130 		return ret;
1131 
1132 	if (in_interrupt() || oops_in_progress)
1133 		panic_nand_wait(mtd, chip, timeo);
1134 	else {
1135 		timeo = jiffies + msecs_to_jiffies(timeo);
1136 		do {
1137 			if (chip->dev_ready) {
1138 				if (chip->dev_ready(mtd))
1139 					break;
1140 			} else {
1141 				ret = nand_read_data_op(chip, &status,
1142 							sizeof(status), true);
1143 				if (ret)
1144 					return ret;
1145 
1146 				if (status & NAND_STATUS_READY)
1147 					break;
1148 			}
1149 			cond_resched();
1150 		} while (time_before(jiffies, timeo));
1151 	}
1152 
1153 	ret = nand_read_data_op(chip, &status, sizeof(status), true);
1154 	if (ret)
1155 		return ret;
1156 
1157 	/* This can happen if in case of timeout or buggy dev_ready */
1158 	WARN_ON(!(status & NAND_STATUS_READY));
1159 	return status;
1160 }
1161 
1162 static bool nand_supports_get_features(struct nand_chip *chip, int addr)
1163 {
1164 	return (chip->parameters.supports_set_get_features &&
1165 		test_bit(addr, chip->parameters.get_feature_list));
1166 }
1167 
1168 static bool nand_supports_set_features(struct nand_chip *chip, int addr)
1169 {
1170 	return (chip->parameters.supports_set_get_features &&
1171 		test_bit(addr, chip->parameters.set_feature_list));
1172 }
1173 
1174 /**
1175  * nand_get_features - wrapper to perform a GET_FEATURE
1176  * @chip: NAND chip info structure
1177  * @addr: feature address
1178  * @subfeature_param: the subfeature parameters, a four bytes array
1179  *
1180  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
1181  * operation cannot be handled.
1182  */
1183 int nand_get_features(struct nand_chip *chip, int addr,
1184 		      u8 *subfeature_param)
1185 {
1186 	struct mtd_info *mtd = nand_to_mtd(chip);
1187 
1188 	if (!nand_supports_get_features(chip, addr))
1189 		return -ENOTSUPP;
1190 
1191 	return chip->get_features(mtd, chip, addr, subfeature_param);
1192 }
1193 EXPORT_SYMBOL_GPL(nand_get_features);
1194 
1195 /**
1196  * nand_set_features - wrapper to perform a SET_FEATURE
1197  * @chip: NAND chip info structure
1198  * @addr: feature address
1199  * @subfeature_param: the subfeature parameters, a four bytes array
1200  *
1201  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
1202  * operation cannot be handled.
1203  */
1204 int nand_set_features(struct nand_chip *chip, int addr,
1205 		      u8 *subfeature_param)
1206 {
1207 	struct mtd_info *mtd = nand_to_mtd(chip);
1208 
1209 	if (!nand_supports_set_features(chip, addr))
1210 		return -ENOTSUPP;
1211 
1212 	return chip->set_features(mtd, chip, addr, subfeature_param);
1213 }
1214 EXPORT_SYMBOL_GPL(nand_set_features);
1215 
1216 /**
1217  * nand_reset_data_interface - Reset data interface and timings
1218  * @chip: The NAND chip
1219  * @chipnr: Internal die id
1220  *
1221  * Reset the Data interface and timings to ONFI mode 0.
1222  *
1223  * Returns 0 for success or negative error code otherwise.
1224  */
1225 static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
1226 {
1227 	struct mtd_info *mtd = nand_to_mtd(chip);
1228 	int ret;
1229 
1230 	if (!chip->setup_data_interface)
1231 		return 0;
1232 
1233 	/*
1234 	 * The ONFI specification says:
1235 	 * "
1236 	 * To transition from NV-DDR or NV-DDR2 to the SDR data
1237 	 * interface, the host shall use the Reset (FFh) command
1238 	 * using SDR timing mode 0. A device in any timing mode is
1239 	 * required to recognize Reset (FFh) command issued in SDR
1240 	 * timing mode 0.
1241 	 * "
1242 	 *
1243 	 * Configure the data interface in SDR mode and set the
1244 	 * timings to timing mode 0.
1245 	 */
1246 
1247 	onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
1248 	ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
1249 	if (ret)
1250 		pr_err("Failed to configure data interface to SDR timing mode 0\n");
1251 
1252 	return ret;
1253 }
1254 
1255 /**
1256  * nand_setup_data_interface - Setup the best data interface and timings
1257  * @chip: The NAND chip
1258  * @chipnr: Internal die id
1259  *
1260  * Find and configure the best data interface and NAND timings supported by
1261  * the chip and the driver.
1262  * First tries to retrieve supported timing modes from ONFI information,
1263  * and if the NAND chip does not support ONFI, relies on the
1264  * ->onfi_timing_mode_default specified in the nand_ids table.
1265  *
1266  * Returns 0 for success or negative error code otherwise.
1267  */
1268 static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
1269 {
1270 	struct mtd_info *mtd = nand_to_mtd(chip);
1271 	u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1272 		chip->onfi_timing_mode_default,
1273 	};
1274 	int ret;
1275 
1276 	if (!chip->setup_data_interface)
1277 		return 0;
1278 
1279 	/* Change the mode on the chip side (if supported by the NAND chip) */
1280 	if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
1281 		chip->select_chip(mtd, chipnr);
1282 		ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
1283 					tmode_param);
1284 		chip->select_chip(mtd, -1);
1285 		if (ret)
1286 			return ret;
1287 	}
1288 
1289 	/* Change the mode on the controller side */
1290 	ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
1291 	if (ret)
1292 		return ret;
1293 
1294 	/* Check the mode has been accepted by the chip, if supported */
1295 	if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
1296 		return 0;
1297 
1298 	memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
1299 	chip->select_chip(mtd, chipnr);
1300 	ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
1301 				tmode_param);
1302 	chip->select_chip(mtd, -1);
1303 	if (ret)
1304 		goto err_reset_chip;
1305 
1306 	if (tmode_param[0] != chip->onfi_timing_mode_default) {
1307 		pr_warn("timing mode %d not acknowledged by the NAND chip\n",
1308 			chip->onfi_timing_mode_default);
1309 		goto err_reset_chip;
1310 	}
1311 
1312 	return 0;
1313 
1314 err_reset_chip:
1315 	/*
1316 	 * Fallback to mode 0 if the chip explicitly did not ack the chosen
1317 	 * timing mode.
1318 	 */
1319 	nand_reset_data_interface(chip, chipnr);
1320 	chip->select_chip(mtd, chipnr);
1321 	nand_reset_op(chip);
1322 	chip->select_chip(mtd, -1);
1323 
1324 	return ret;
1325 }
1326 
1327 /**
1328  * nand_init_data_interface - find the best data interface and timings
1329  * @chip: The NAND chip
1330  *
1331  * Find the best data interface and NAND timings supported by the chip
1332  * and the driver.
1333  * First tries to retrieve supported timing modes from ONFI information,
1334  * and if the NAND chip does not support ONFI, relies on the
1335  * ->onfi_timing_mode_default specified in the nand_ids table. After this
1336  * function nand_chip->data_interface is initialized with the best timing mode
1337  * available.
1338  *
1339  * Returns 0 for success or negative error code otherwise.
1340  */
1341 static int nand_init_data_interface(struct nand_chip *chip)
1342 {
1343 	struct mtd_info *mtd = nand_to_mtd(chip);
1344 	int modes, mode, ret;
1345 
1346 	if (!chip->setup_data_interface)
1347 		return 0;
1348 
1349 	/*
1350 	 * First try to identify the best timings from ONFI parameters and
1351 	 * if the NAND does not support ONFI, fallback to the default ONFI
1352 	 * timing mode.
1353 	 */
1354 	modes = onfi_get_async_timing_mode(chip);
1355 	if (modes == ONFI_TIMING_MODE_UNKNOWN) {
1356 		if (!chip->onfi_timing_mode_default)
1357 			return 0;
1358 
1359 		modes = GENMASK(chip->onfi_timing_mode_default, 0);
1360 	}
1361 
1362 
1363 	for (mode = fls(modes) - 1; mode >= 0; mode--) {
1364 		ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
1365 		if (ret)
1366 			continue;
1367 
1368 		/*
1369 		 * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
1370 		 * controller supports the requested timings.
1371 		 */
1372 		ret = chip->setup_data_interface(mtd,
1373 						 NAND_DATA_IFACE_CHECK_ONLY,
1374 						 &chip->data_interface);
1375 		if (!ret) {
1376 			chip->onfi_timing_mode_default = mode;
1377 			break;
1378 		}
1379 	}
1380 
1381 	return 0;
1382 }
1383 
1384 /**
1385  * nand_fill_column_cycles - fill the column cycles of an address
1386  * @chip: The NAND chip
1387  * @addrs: Array of address cycles to fill
1388  * @offset_in_page: The offset in the page
1389  *
1390  * Fills the first or the first two bytes of the @addrs field depending
1391  * on the NAND bus width and the page size.
1392  *
1393  * Returns the number of cycles needed to encode the column, or a negative
1394  * error code in case one of the arguments is invalid.
1395  */
1396 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
1397 				   unsigned int offset_in_page)
1398 {
1399 	struct mtd_info *mtd = nand_to_mtd(chip);
1400 
1401 	/* Make sure the offset is less than the actual page size. */
1402 	if (offset_in_page > mtd->writesize + mtd->oobsize)
1403 		return -EINVAL;
1404 
1405 	/*
1406 	 * On small page NANDs, there's a dedicated command to access the OOB
1407 	 * area, and the column address is relative to the start of the OOB
1408 	 * area, not the start of the page. Asjust the address accordingly.
1409 	 */
1410 	if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
1411 		offset_in_page -= mtd->writesize;
1412 
1413 	/*
1414 	 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
1415 	 * wide, then it must be divided by 2.
1416 	 */
1417 	if (chip->options & NAND_BUSWIDTH_16) {
1418 		if (WARN_ON(offset_in_page % 2))
1419 			return -EINVAL;
1420 
1421 		offset_in_page /= 2;
1422 	}
1423 
1424 	addrs[0] = offset_in_page;
1425 
1426 	/*
1427 	 * Small page NANDs use 1 cycle for the columns, while large page NANDs
1428 	 * need 2
1429 	 */
1430 	if (mtd->writesize <= 512)
1431 		return 1;
1432 
1433 	addrs[1] = offset_in_page >> 8;
1434 
1435 	return 2;
1436 }
1437 
1438 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1439 				     unsigned int offset_in_page, void *buf,
1440 				     unsigned int len)
1441 {
1442 	struct mtd_info *mtd = nand_to_mtd(chip);
1443 	const struct nand_sdr_timings *sdr =
1444 		nand_get_sdr_timings(&chip->data_interface);
1445 	u8 addrs[4];
1446 	struct nand_op_instr instrs[] = {
1447 		NAND_OP_CMD(NAND_CMD_READ0, 0),
1448 		NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
1449 		NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1450 				 PSEC_TO_NSEC(sdr->tRR_min)),
1451 		NAND_OP_DATA_IN(len, buf, 0),
1452 	};
1453 	struct nand_operation op = NAND_OPERATION(instrs);
1454 	int ret;
1455 
1456 	/* Drop the DATA_IN instruction if len is set to 0. */
1457 	if (!len)
1458 		op.ninstrs--;
1459 
1460 	if (offset_in_page >= mtd->writesize)
1461 		instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1462 	else if (offset_in_page >= 256 &&
1463 		 !(chip->options & NAND_BUSWIDTH_16))
1464 		instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1465 
1466 	ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1467 	if (ret < 0)
1468 		return ret;
1469 
1470 	addrs[1] = page;
1471 	addrs[2] = page >> 8;
1472 
1473 	if (chip->options & NAND_ROW_ADDR_3) {
1474 		addrs[3] = page >> 16;
1475 		instrs[1].ctx.addr.naddrs++;
1476 	}
1477 
1478 	return nand_exec_op(chip, &op);
1479 }
1480 
1481 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1482 				     unsigned int offset_in_page, void *buf,
1483 				     unsigned int len)
1484 {
1485 	const struct nand_sdr_timings *sdr =
1486 		nand_get_sdr_timings(&chip->data_interface);
1487 	u8 addrs[5];
1488 	struct nand_op_instr instrs[] = {
1489 		NAND_OP_CMD(NAND_CMD_READ0, 0),
1490 		NAND_OP_ADDR(4, addrs, 0),
1491 		NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1492 		NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1493 				 PSEC_TO_NSEC(sdr->tRR_min)),
1494 		NAND_OP_DATA_IN(len, buf, 0),
1495 	};
1496 	struct nand_operation op = NAND_OPERATION(instrs);
1497 	int ret;
1498 
1499 	/* Drop the DATA_IN instruction if len is set to 0. */
1500 	if (!len)
1501 		op.ninstrs--;
1502 
1503 	ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1504 	if (ret < 0)
1505 		return ret;
1506 
1507 	addrs[2] = page;
1508 	addrs[3] = page >> 8;
1509 
1510 	if (chip->options & NAND_ROW_ADDR_3) {
1511 		addrs[4] = page >> 16;
1512 		instrs[1].ctx.addr.naddrs++;
1513 	}
1514 
1515 	return nand_exec_op(chip, &op);
1516 }
1517 
1518 /**
1519  * nand_read_page_op - Do a READ PAGE operation
1520  * @chip: The NAND chip
1521  * @page: page to read
1522  * @offset_in_page: offset within the page
1523  * @buf: buffer used to store the data
1524  * @len: length of the buffer
1525  *
1526  * This function issues a READ PAGE operation.
1527  * This function does not select/unselect the CS line.
1528  *
1529  * Returns 0 on success, a negative error code otherwise.
1530  */
1531 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1532 		      unsigned int offset_in_page, void *buf, unsigned int len)
1533 {
1534 	struct mtd_info *mtd = nand_to_mtd(chip);
1535 
1536 	if (len && !buf)
1537 		return -EINVAL;
1538 
1539 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1540 		return -EINVAL;
1541 
1542 	if (chip->exec_op) {
1543 		if (mtd->writesize > 512)
1544 			return nand_lp_exec_read_page_op(chip, page,
1545 							 offset_in_page, buf,
1546 							 len);
1547 
1548 		return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1549 						 buf, len);
1550 	}
1551 
1552 	chip->cmdfunc(mtd, NAND_CMD_READ0, offset_in_page, page);
1553 	if (len)
1554 		chip->read_buf(mtd, buf, len);
1555 
1556 	return 0;
1557 }
1558 EXPORT_SYMBOL_GPL(nand_read_page_op);
1559 
1560 /**
1561  * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1562  * @chip: The NAND chip
1563  * @page: parameter page to read
1564  * @buf: buffer used to store the data
1565  * @len: length of the buffer
1566  *
1567  * This function issues a READ PARAMETER PAGE operation.
1568  * This function does not select/unselect the CS line.
1569  *
1570  * Returns 0 on success, a negative error code otherwise.
1571  */
1572 static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1573 				   unsigned int len)
1574 {
1575 	struct mtd_info *mtd = nand_to_mtd(chip);
1576 	unsigned int i;
1577 	u8 *p = buf;
1578 
1579 	if (len && !buf)
1580 		return -EINVAL;
1581 
1582 	if (chip->exec_op) {
1583 		const struct nand_sdr_timings *sdr =
1584 			nand_get_sdr_timings(&chip->data_interface);
1585 		struct nand_op_instr instrs[] = {
1586 			NAND_OP_CMD(NAND_CMD_PARAM, 0),
1587 			NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1588 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1589 					 PSEC_TO_NSEC(sdr->tRR_min)),
1590 			NAND_OP_8BIT_DATA_IN(len, buf, 0),
1591 		};
1592 		struct nand_operation op = NAND_OPERATION(instrs);
1593 
1594 		/* Drop the DATA_IN instruction if len is set to 0. */
1595 		if (!len)
1596 			op.ninstrs--;
1597 
1598 		return nand_exec_op(chip, &op);
1599 	}
1600 
1601 	chip->cmdfunc(mtd, NAND_CMD_PARAM, page, -1);
1602 	for (i = 0; i < len; i++)
1603 		p[i] = chip->read_byte(mtd);
1604 
1605 	return 0;
1606 }
1607 
1608 /**
1609  * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1610  * @chip: The NAND chip
1611  * @offset_in_page: offset within the page
1612  * @buf: buffer used to store the data
1613  * @len: length of the buffer
1614  * @force_8bit: force 8-bit bus access
1615  *
1616  * This function issues a CHANGE READ COLUMN operation.
1617  * This function does not select/unselect the CS line.
1618  *
1619  * Returns 0 on success, a negative error code otherwise.
1620  */
1621 int nand_change_read_column_op(struct nand_chip *chip,
1622 			       unsigned int offset_in_page, void *buf,
1623 			       unsigned int len, bool force_8bit)
1624 {
1625 	struct mtd_info *mtd = nand_to_mtd(chip);
1626 
1627 	if (len && !buf)
1628 		return -EINVAL;
1629 
1630 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1631 		return -EINVAL;
1632 
1633 	/* Small page NANDs do not support column change. */
1634 	if (mtd->writesize <= 512)
1635 		return -ENOTSUPP;
1636 
1637 	if (chip->exec_op) {
1638 		const struct nand_sdr_timings *sdr =
1639 			nand_get_sdr_timings(&chip->data_interface);
1640 		u8 addrs[2] = {};
1641 		struct nand_op_instr instrs[] = {
1642 			NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1643 			NAND_OP_ADDR(2, addrs, 0),
1644 			NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1645 				    PSEC_TO_NSEC(sdr->tCCS_min)),
1646 			NAND_OP_DATA_IN(len, buf, 0),
1647 		};
1648 		struct nand_operation op = NAND_OPERATION(instrs);
1649 		int ret;
1650 
1651 		ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1652 		if (ret < 0)
1653 			return ret;
1654 
1655 		/* Drop the DATA_IN instruction if len is set to 0. */
1656 		if (!len)
1657 			op.ninstrs--;
1658 
1659 		instrs[3].ctx.data.force_8bit = force_8bit;
1660 
1661 		return nand_exec_op(chip, &op);
1662 	}
1663 
1664 	chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset_in_page, -1);
1665 	if (len)
1666 		chip->read_buf(mtd, buf, len);
1667 
1668 	return 0;
1669 }
1670 EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1671 
1672 /**
1673  * nand_read_oob_op - Do a READ OOB operation
1674  * @chip: The NAND chip
1675  * @page: page to read
1676  * @offset_in_oob: offset within the OOB area
1677  * @buf: buffer used to store the data
1678  * @len: length of the buffer
1679  *
1680  * This function issues a READ OOB operation.
1681  * This function does not select/unselect the CS line.
1682  *
1683  * Returns 0 on success, a negative error code otherwise.
1684  */
1685 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1686 		     unsigned int offset_in_oob, void *buf, unsigned int len)
1687 {
1688 	struct mtd_info *mtd = nand_to_mtd(chip);
1689 
1690 	if (len && !buf)
1691 		return -EINVAL;
1692 
1693 	if (offset_in_oob + len > mtd->oobsize)
1694 		return -EINVAL;
1695 
1696 	if (chip->exec_op)
1697 		return nand_read_page_op(chip, page,
1698 					 mtd->writesize + offset_in_oob,
1699 					 buf, len);
1700 
1701 	chip->cmdfunc(mtd, NAND_CMD_READOOB, offset_in_oob, page);
1702 	if (len)
1703 		chip->read_buf(mtd, buf, len);
1704 
1705 	return 0;
1706 }
1707 EXPORT_SYMBOL_GPL(nand_read_oob_op);
1708 
1709 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1710 				  unsigned int offset_in_page, const void *buf,
1711 				  unsigned int len, bool prog)
1712 {
1713 	struct mtd_info *mtd = nand_to_mtd(chip);
1714 	const struct nand_sdr_timings *sdr =
1715 		nand_get_sdr_timings(&chip->data_interface);
1716 	u8 addrs[5] = {};
1717 	struct nand_op_instr instrs[] = {
1718 		/*
1719 		 * The first instruction will be dropped if we're dealing
1720 		 * with a large page NAND and adjusted if we're dealing
1721 		 * with a small page NAND and the page offset is > 255.
1722 		 */
1723 		NAND_OP_CMD(NAND_CMD_READ0, 0),
1724 		NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1725 		NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1726 		NAND_OP_DATA_OUT(len, buf, 0),
1727 		NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1728 		NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1729 	};
1730 	struct nand_operation op = NAND_OPERATION(instrs);
1731 	int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1732 	int ret;
1733 	u8 status;
1734 
1735 	if (naddrs < 0)
1736 		return naddrs;
1737 
1738 	addrs[naddrs++] = page;
1739 	addrs[naddrs++] = page >> 8;
1740 	if (chip->options & NAND_ROW_ADDR_3)
1741 		addrs[naddrs++] = page >> 16;
1742 
1743 	instrs[2].ctx.addr.naddrs = naddrs;
1744 
1745 	/* Drop the last two instructions if we're not programming the page. */
1746 	if (!prog) {
1747 		op.ninstrs -= 2;
1748 		/* Also drop the DATA_OUT instruction if empty. */
1749 		if (!len)
1750 			op.ninstrs--;
1751 	}
1752 
1753 	if (mtd->writesize <= 512) {
1754 		/*
1755 		 * Small pages need some more tweaking: we have to adjust the
1756 		 * first instruction depending on the page offset we're trying
1757 		 * to access.
1758 		 */
1759 		if (offset_in_page >= mtd->writesize)
1760 			instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1761 		else if (offset_in_page >= 256 &&
1762 			 !(chip->options & NAND_BUSWIDTH_16))
1763 			instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1764 	} else {
1765 		/*
1766 		 * Drop the first command if we're dealing with a large page
1767 		 * NAND.
1768 		 */
1769 		op.instrs++;
1770 		op.ninstrs--;
1771 	}
1772 
1773 	ret = nand_exec_op(chip, &op);
1774 	if (!prog || ret)
1775 		return ret;
1776 
1777 	ret = nand_status_op(chip, &status);
1778 	if (ret)
1779 		return ret;
1780 
1781 	return status;
1782 }
1783 
1784 /**
1785  * nand_prog_page_begin_op - starts a PROG PAGE operation
1786  * @chip: The NAND chip
1787  * @page: page to write
1788  * @offset_in_page: offset within the page
1789  * @buf: buffer containing the data to write to the page
1790  * @len: length of the buffer
1791  *
1792  * This function issues the first half of a PROG PAGE operation.
1793  * This function does not select/unselect the CS line.
1794  *
1795  * Returns 0 on success, a negative error code otherwise.
1796  */
1797 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1798 			    unsigned int offset_in_page, const void *buf,
1799 			    unsigned int len)
1800 {
1801 	struct mtd_info *mtd = nand_to_mtd(chip);
1802 
1803 	if (len && !buf)
1804 		return -EINVAL;
1805 
1806 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1807 		return -EINVAL;
1808 
1809 	if (chip->exec_op)
1810 		return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1811 					      len, false);
1812 
1813 	chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
1814 
1815 	if (buf)
1816 		chip->write_buf(mtd, buf, len);
1817 
1818 	return 0;
1819 }
1820 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1821 
1822 /**
1823  * nand_prog_page_end_op - ends a PROG PAGE operation
1824  * @chip: The NAND chip
1825  *
1826  * This function issues the second half of a PROG PAGE operation.
1827  * This function does not select/unselect the CS line.
1828  *
1829  * Returns 0 on success, a negative error code otherwise.
1830  */
1831 int nand_prog_page_end_op(struct nand_chip *chip)
1832 {
1833 	struct mtd_info *mtd = nand_to_mtd(chip);
1834 	int ret;
1835 	u8 status;
1836 
1837 	if (chip->exec_op) {
1838 		const struct nand_sdr_timings *sdr =
1839 			nand_get_sdr_timings(&chip->data_interface);
1840 		struct nand_op_instr instrs[] = {
1841 			NAND_OP_CMD(NAND_CMD_PAGEPROG,
1842 				    PSEC_TO_NSEC(sdr->tWB_max)),
1843 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1844 		};
1845 		struct nand_operation op = NAND_OPERATION(instrs);
1846 
1847 		ret = nand_exec_op(chip, &op);
1848 		if (ret)
1849 			return ret;
1850 
1851 		ret = nand_status_op(chip, &status);
1852 		if (ret)
1853 			return ret;
1854 	} else {
1855 		chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1856 		ret = chip->waitfunc(mtd, chip);
1857 		if (ret < 0)
1858 			return ret;
1859 
1860 		status = ret;
1861 	}
1862 
1863 	if (status & NAND_STATUS_FAIL)
1864 		return -EIO;
1865 
1866 	return 0;
1867 }
1868 EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1869 
1870 /**
1871  * nand_prog_page_op - Do a full PROG PAGE operation
1872  * @chip: The NAND chip
1873  * @page: page to write
1874  * @offset_in_page: offset within the page
1875  * @buf: buffer containing the data to write to the page
1876  * @len: length of the buffer
1877  *
1878  * This function issues a full PROG PAGE operation.
1879  * This function does not select/unselect the CS line.
1880  *
1881  * Returns 0 on success, a negative error code otherwise.
1882  */
1883 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1884 		      unsigned int offset_in_page, const void *buf,
1885 		      unsigned int len)
1886 {
1887 	struct mtd_info *mtd = nand_to_mtd(chip);
1888 	int status;
1889 
1890 	if (!len || !buf)
1891 		return -EINVAL;
1892 
1893 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1894 		return -EINVAL;
1895 
1896 	if (chip->exec_op) {
1897 		status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1898 						len, true);
1899 	} else {
1900 		chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
1901 		chip->write_buf(mtd, buf, len);
1902 		chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1903 		status = chip->waitfunc(mtd, chip);
1904 	}
1905 
1906 	if (status & NAND_STATUS_FAIL)
1907 		return -EIO;
1908 
1909 	return 0;
1910 }
1911 EXPORT_SYMBOL_GPL(nand_prog_page_op);
1912 
1913 /**
1914  * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1915  * @chip: The NAND chip
1916  * @offset_in_page: offset within the page
1917  * @buf: buffer containing the data to send to the NAND
1918  * @len: length of the buffer
1919  * @force_8bit: force 8-bit bus access
1920  *
1921  * This function issues a CHANGE WRITE COLUMN operation.
1922  * This function does not select/unselect the CS line.
1923  *
1924  * Returns 0 on success, a negative error code otherwise.
1925  */
1926 int nand_change_write_column_op(struct nand_chip *chip,
1927 				unsigned int offset_in_page,
1928 				const void *buf, unsigned int len,
1929 				bool force_8bit)
1930 {
1931 	struct mtd_info *mtd = nand_to_mtd(chip);
1932 
1933 	if (len && !buf)
1934 		return -EINVAL;
1935 
1936 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1937 		return -EINVAL;
1938 
1939 	/* Small page NANDs do not support column change. */
1940 	if (mtd->writesize <= 512)
1941 		return -ENOTSUPP;
1942 
1943 	if (chip->exec_op) {
1944 		const struct nand_sdr_timings *sdr =
1945 			nand_get_sdr_timings(&chip->data_interface);
1946 		u8 addrs[2];
1947 		struct nand_op_instr instrs[] = {
1948 			NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1949 			NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1950 			NAND_OP_DATA_OUT(len, buf, 0),
1951 		};
1952 		struct nand_operation op = NAND_OPERATION(instrs);
1953 		int ret;
1954 
1955 		ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1956 		if (ret < 0)
1957 			return ret;
1958 
1959 		instrs[2].ctx.data.force_8bit = force_8bit;
1960 
1961 		/* Drop the DATA_OUT instruction if len is set to 0. */
1962 		if (!len)
1963 			op.ninstrs--;
1964 
1965 		return nand_exec_op(chip, &op);
1966 	}
1967 
1968 	chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset_in_page, -1);
1969 	if (len)
1970 		chip->write_buf(mtd, buf, len);
1971 
1972 	return 0;
1973 }
1974 EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1975 
1976 /**
1977  * nand_readid_op - Do a READID operation
1978  * @chip: The NAND chip
1979  * @addr: address cycle to pass after the READID command
1980  * @buf: buffer used to store the ID
1981  * @len: length of the buffer
1982  *
1983  * This function sends a READID command and reads back the ID returned by the
1984  * NAND.
1985  * This function does not select/unselect the CS line.
1986  *
1987  * Returns 0 on success, a negative error code otherwise.
1988  */
1989 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1990 		   unsigned int len)
1991 {
1992 	struct mtd_info *mtd = nand_to_mtd(chip);
1993 	unsigned int i;
1994 	u8 *id = buf;
1995 
1996 	if (len && !buf)
1997 		return -EINVAL;
1998 
1999 	if (chip->exec_op) {
2000 		const struct nand_sdr_timings *sdr =
2001 			nand_get_sdr_timings(&chip->data_interface);
2002 		struct nand_op_instr instrs[] = {
2003 			NAND_OP_CMD(NAND_CMD_READID, 0),
2004 			NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
2005 			NAND_OP_8BIT_DATA_IN(len, buf, 0),
2006 		};
2007 		struct nand_operation op = NAND_OPERATION(instrs);
2008 
2009 		/* Drop the DATA_IN instruction if len is set to 0. */
2010 		if (!len)
2011 			op.ninstrs--;
2012 
2013 		return nand_exec_op(chip, &op);
2014 	}
2015 
2016 	chip->cmdfunc(mtd, NAND_CMD_READID, addr, -1);
2017 
2018 	for (i = 0; i < len; i++)
2019 		id[i] = chip->read_byte(mtd);
2020 
2021 	return 0;
2022 }
2023 EXPORT_SYMBOL_GPL(nand_readid_op);
2024 
2025 /**
2026  * nand_status_op - Do a STATUS operation
2027  * @chip: The NAND chip
2028  * @status: out variable to store the NAND status
2029  *
2030  * This function sends a STATUS command and reads back the status returned by
2031  * the NAND.
2032  * This function does not select/unselect the CS line.
2033  *
2034  * Returns 0 on success, a negative error code otherwise.
2035  */
2036 int nand_status_op(struct nand_chip *chip, u8 *status)
2037 {
2038 	struct mtd_info *mtd = nand_to_mtd(chip);
2039 
2040 	if (chip->exec_op) {
2041 		const struct nand_sdr_timings *sdr =
2042 			nand_get_sdr_timings(&chip->data_interface);
2043 		struct nand_op_instr instrs[] = {
2044 			NAND_OP_CMD(NAND_CMD_STATUS,
2045 				    PSEC_TO_NSEC(sdr->tADL_min)),
2046 			NAND_OP_8BIT_DATA_IN(1, status, 0),
2047 		};
2048 		struct nand_operation op = NAND_OPERATION(instrs);
2049 
2050 		if (!status)
2051 			op.ninstrs--;
2052 
2053 		return nand_exec_op(chip, &op);
2054 	}
2055 
2056 	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
2057 	if (status)
2058 		*status = chip->read_byte(mtd);
2059 
2060 	return 0;
2061 }
2062 EXPORT_SYMBOL_GPL(nand_status_op);
2063 
2064 /**
2065  * nand_exit_status_op - Exit a STATUS operation
2066  * @chip: The NAND chip
2067  *
2068  * This function sends a READ0 command to cancel the effect of the STATUS
2069  * command to avoid reading only the status until a new read command is sent.
2070  *
2071  * This function does not select/unselect the CS line.
2072  *
2073  * Returns 0 on success, a negative error code otherwise.
2074  */
2075 int nand_exit_status_op(struct nand_chip *chip)
2076 {
2077 	struct mtd_info *mtd = nand_to_mtd(chip);
2078 
2079 	if (chip->exec_op) {
2080 		struct nand_op_instr instrs[] = {
2081 			NAND_OP_CMD(NAND_CMD_READ0, 0),
2082 		};
2083 		struct nand_operation op = NAND_OPERATION(instrs);
2084 
2085 		return nand_exec_op(chip, &op);
2086 	}
2087 
2088 	chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
2089 
2090 	return 0;
2091 }
2092 EXPORT_SYMBOL_GPL(nand_exit_status_op);
2093 
2094 /**
2095  * nand_erase_op - Do an erase operation
2096  * @chip: The NAND chip
2097  * @eraseblock: block to erase
2098  *
2099  * This function sends an ERASE command and waits for the NAND to be ready
2100  * before returning.
2101  * This function does not select/unselect the CS line.
2102  *
2103  * Returns 0 on success, a negative error code otherwise.
2104  */
2105 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
2106 {
2107 	struct mtd_info *mtd = nand_to_mtd(chip);
2108 	unsigned int page = eraseblock <<
2109 			    (chip->phys_erase_shift - chip->page_shift);
2110 	int ret;
2111 	u8 status;
2112 
2113 	if (chip->exec_op) {
2114 		const struct nand_sdr_timings *sdr =
2115 			nand_get_sdr_timings(&chip->data_interface);
2116 		u8 addrs[3] = {	page, page >> 8, page >> 16 };
2117 		struct nand_op_instr instrs[] = {
2118 			NAND_OP_CMD(NAND_CMD_ERASE1, 0),
2119 			NAND_OP_ADDR(2, addrs, 0),
2120 			NAND_OP_CMD(NAND_CMD_ERASE2,
2121 				    PSEC_TO_MSEC(sdr->tWB_max)),
2122 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
2123 		};
2124 		struct nand_operation op = NAND_OPERATION(instrs);
2125 
2126 		if (chip->options & NAND_ROW_ADDR_3)
2127 			instrs[1].ctx.addr.naddrs++;
2128 
2129 		ret = nand_exec_op(chip, &op);
2130 		if (ret)
2131 			return ret;
2132 
2133 		ret = nand_status_op(chip, &status);
2134 		if (ret)
2135 			return ret;
2136 	} else {
2137 		chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
2138 		chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
2139 
2140 		ret = chip->waitfunc(mtd, chip);
2141 		if (ret < 0)
2142 			return ret;
2143 
2144 		status = ret;
2145 	}
2146 
2147 	if (status & NAND_STATUS_FAIL)
2148 		return -EIO;
2149 
2150 	return 0;
2151 }
2152 EXPORT_SYMBOL_GPL(nand_erase_op);
2153 
2154 /**
2155  * nand_set_features_op - Do a SET FEATURES operation
2156  * @chip: The NAND chip
2157  * @feature: feature id
2158  * @data: 4 bytes of data
2159  *
2160  * This function sends a SET FEATURES command and waits for the NAND to be
2161  * ready before returning.
2162  * This function does not select/unselect the CS line.
2163  *
2164  * Returns 0 on success, a negative error code otherwise.
2165  */
2166 static int nand_set_features_op(struct nand_chip *chip, u8 feature,
2167 				const void *data)
2168 {
2169 	struct mtd_info *mtd = nand_to_mtd(chip);
2170 	const u8 *params = data;
2171 	int i, ret;
2172 	u8 status;
2173 
2174 	if (chip->exec_op) {
2175 		const struct nand_sdr_timings *sdr =
2176 			nand_get_sdr_timings(&chip->data_interface);
2177 		struct nand_op_instr instrs[] = {
2178 			NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
2179 			NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
2180 			NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
2181 					      PSEC_TO_NSEC(sdr->tWB_max)),
2182 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
2183 		};
2184 		struct nand_operation op = NAND_OPERATION(instrs);
2185 
2186 		ret = nand_exec_op(chip, &op);
2187 		if (ret)
2188 			return ret;
2189 
2190 		ret = nand_status_op(chip, &status);
2191 		if (ret)
2192 			return ret;
2193 	} else {
2194 		chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
2195 		for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
2196 			chip->write_byte(mtd, params[i]);
2197 
2198 		ret = chip->waitfunc(mtd, chip);
2199 		if (ret < 0)
2200 			return ret;
2201 
2202 		status = ret;
2203 	}
2204 
2205 	if (status & NAND_STATUS_FAIL)
2206 		return -EIO;
2207 
2208 	return 0;
2209 }
2210 
2211 /**
2212  * nand_get_features_op - Do a GET FEATURES operation
2213  * @chip: The NAND chip
2214  * @feature: feature id
2215  * @data: 4 bytes of data
2216  *
2217  * This function sends a GET FEATURES command and waits for the NAND to be
2218  * ready before returning.
2219  * This function does not select/unselect the CS line.
2220  *
2221  * Returns 0 on success, a negative error code otherwise.
2222  */
2223 static int nand_get_features_op(struct nand_chip *chip, u8 feature,
2224 				void *data)
2225 {
2226 	struct mtd_info *mtd = nand_to_mtd(chip);
2227 	u8 *params = data;
2228 	int i;
2229 
2230 	if (chip->exec_op) {
2231 		const struct nand_sdr_timings *sdr =
2232 			nand_get_sdr_timings(&chip->data_interface);
2233 		struct nand_op_instr instrs[] = {
2234 			NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
2235 			NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
2236 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
2237 					 PSEC_TO_NSEC(sdr->tRR_min)),
2238 			NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
2239 					     data, 0),
2240 		};
2241 		struct nand_operation op = NAND_OPERATION(instrs);
2242 
2243 		return nand_exec_op(chip, &op);
2244 	}
2245 
2246 	chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, feature, -1);
2247 	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
2248 		params[i] = chip->read_byte(mtd);
2249 
2250 	return 0;
2251 }
2252 
2253 /**
2254  * nand_reset_op - Do a reset operation
2255  * @chip: The NAND chip
2256  *
2257  * This function sends a RESET command and waits for the NAND to be ready
2258  * before returning.
2259  * This function does not select/unselect the CS line.
2260  *
2261  * Returns 0 on success, a negative error code otherwise.
2262  */
2263 int nand_reset_op(struct nand_chip *chip)
2264 {
2265 	struct mtd_info *mtd = nand_to_mtd(chip);
2266 
2267 	if (chip->exec_op) {
2268 		const struct nand_sdr_timings *sdr =
2269 			nand_get_sdr_timings(&chip->data_interface);
2270 		struct nand_op_instr instrs[] = {
2271 			NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
2272 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
2273 		};
2274 		struct nand_operation op = NAND_OPERATION(instrs);
2275 
2276 		return nand_exec_op(chip, &op);
2277 	}
2278 
2279 	chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
2280 
2281 	return 0;
2282 }
2283 EXPORT_SYMBOL_GPL(nand_reset_op);
2284 
2285 /**
2286  * nand_read_data_op - Read data from the NAND
2287  * @chip: The NAND chip
2288  * @buf: buffer used to store the data
2289  * @len: length of the buffer
2290  * @force_8bit: force 8-bit bus access
2291  *
2292  * This function does a raw data read on the bus. Usually used after launching
2293  * another NAND operation like nand_read_page_op().
2294  * This function does not select/unselect the CS line.
2295  *
2296  * Returns 0 on success, a negative error code otherwise.
2297  */
2298 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
2299 		      bool force_8bit)
2300 {
2301 	struct mtd_info *mtd = nand_to_mtd(chip);
2302 
2303 	if (!len || !buf)
2304 		return -EINVAL;
2305 
2306 	if (chip->exec_op) {
2307 		struct nand_op_instr instrs[] = {
2308 			NAND_OP_DATA_IN(len, buf, 0),
2309 		};
2310 		struct nand_operation op = NAND_OPERATION(instrs);
2311 
2312 		instrs[0].ctx.data.force_8bit = force_8bit;
2313 
2314 		return nand_exec_op(chip, &op);
2315 	}
2316 
2317 	if (force_8bit) {
2318 		u8 *p = buf;
2319 		unsigned int i;
2320 
2321 		for (i = 0; i < len; i++)
2322 			p[i] = chip->read_byte(mtd);
2323 	} else {
2324 		chip->read_buf(mtd, buf, len);
2325 	}
2326 
2327 	return 0;
2328 }
2329 EXPORT_SYMBOL_GPL(nand_read_data_op);
2330 
2331 /**
2332  * nand_write_data_op - Write data from the NAND
2333  * @chip: The NAND chip
2334  * @buf: buffer containing the data to send on the bus
2335  * @len: length of the buffer
2336  * @force_8bit: force 8-bit bus access
2337  *
2338  * This function does a raw data write on the bus. Usually used after launching
2339  * another NAND operation like nand_write_page_begin_op().
2340  * This function does not select/unselect the CS line.
2341  *
2342  * Returns 0 on success, a negative error code otherwise.
2343  */
2344 int nand_write_data_op(struct nand_chip *chip, const void *buf,
2345 		       unsigned int len, bool force_8bit)
2346 {
2347 	struct mtd_info *mtd = nand_to_mtd(chip);
2348 
2349 	if (!len || !buf)
2350 		return -EINVAL;
2351 
2352 	if (chip->exec_op) {
2353 		struct nand_op_instr instrs[] = {
2354 			NAND_OP_DATA_OUT(len, buf, 0),
2355 		};
2356 		struct nand_operation op = NAND_OPERATION(instrs);
2357 
2358 		instrs[0].ctx.data.force_8bit = force_8bit;
2359 
2360 		return nand_exec_op(chip, &op);
2361 	}
2362 
2363 	if (force_8bit) {
2364 		const u8 *p = buf;
2365 		unsigned int i;
2366 
2367 		for (i = 0; i < len; i++)
2368 			chip->write_byte(mtd, p[i]);
2369 	} else {
2370 		chip->write_buf(mtd, buf, len);
2371 	}
2372 
2373 	return 0;
2374 }
2375 EXPORT_SYMBOL_GPL(nand_write_data_op);
2376 
2377 /**
2378  * struct nand_op_parser_ctx - Context used by the parser
2379  * @instrs: array of all the instructions that must be addressed
2380  * @ninstrs: length of the @instrs array
2381  * @subop: Sub-operation to be passed to the NAND controller
2382  *
2383  * This structure is used by the core to split NAND operations into
2384  * sub-operations that can be handled by the NAND controller.
2385  */
2386 struct nand_op_parser_ctx {
2387 	const struct nand_op_instr *instrs;
2388 	unsigned int ninstrs;
2389 	struct nand_subop subop;
2390 };
2391 
2392 /**
2393  * nand_op_parser_must_split_instr - Checks if an instruction must be split
2394  * @pat: the parser pattern element that matches @instr
2395  * @instr: pointer to the instruction to check
2396  * @start_offset: this is an in/out parameter. If @instr has already been
2397  *		  split, then @start_offset is the offset from which to start
2398  *		  (either an address cycle or an offset in the data buffer).
2399  *		  Conversely, if the function returns true (ie. instr must be
2400  *		  split), this parameter is updated to point to the first
2401  *		  data/address cycle that has not been taken care of.
2402  *
2403  * Some NAND controllers are limited and cannot send X address cycles with a
2404  * unique operation, or cannot read/write more than Y bytes at the same time.
2405  * In this case, split the instruction that does not fit in a single
2406  * controller-operation into two or more chunks.
2407  *
2408  * Returns true if the instruction must be split, false otherwise.
2409  * The @start_offset parameter is also updated to the offset at which the next
2410  * bundle of instruction must start (if an address or a data instruction).
2411  */
2412 static bool
2413 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
2414 				const struct nand_op_instr *instr,
2415 				unsigned int *start_offset)
2416 {
2417 	switch (pat->type) {
2418 	case NAND_OP_ADDR_INSTR:
2419 		if (!pat->ctx.addr.maxcycles)
2420 			break;
2421 
2422 		if (instr->ctx.addr.naddrs - *start_offset >
2423 		    pat->ctx.addr.maxcycles) {
2424 			*start_offset += pat->ctx.addr.maxcycles;
2425 			return true;
2426 		}
2427 		break;
2428 
2429 	case NAND_OP_DATA_IN_INSTR:
2430 	case NAND_OP_DATA_OUT_INSTR:
2431 		if (!pat->ctx.data.maxlen)
2432 			break;
2433 
2434 		if (instr->ctx.data.len - *start_offset >
2435 		    pat->ctx.data.maxlen) {
2436 			*start_offset += pat->ctx.data.maxlen;
2437 			return true;
2438 		}
2439 		break;
2440 
2441 	default:
2442 		break;
2443 	}
2444 
2445 	return false;
2446 }
2447 
2448 /**
2449  * nand_op_parser_match_pat - Checks if a pattern matches the instructions
2450  *			      remaining in the parser context
2451  * @pat: the pattern to test
2452  * @ctx: the parser context structure to match with the pattern @pat
2453  *
2454  * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2455  * Returns true if this is the case, false ortherwise. When true is returned,
2456  * @ctx->subop is updated with the set of instructions to be passed to the
2457  * controller driver.
2458  */
2459 static bool
2460 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2461 			 struct nand_op_parser_ctx *ctx)
2462 {
2463 	unsigned int instr_offset = ctx->subop.first_instr_start_off;
2464 	const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2465 	const struct nand_op_instr *instr = ctx->subop.instrs;
2466 	unsigned int i, ninstrs;
2467 
2468 	for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2469 		/*
2470 		 * The pattern instruction does not match the operation
2471 		 * instruction. If the instruction is marked optional in the
2472 		 * pattern definition, we skip the pattern element and continue
2473 		 * to the next one. If the element is mandatory, there's no
2474 		 * match and we can return false directly.
2475 		 */
2476 		if (instr->type != pat->elems[i].type) {
2477 			if (!pat->elems[i].optional)
2478 				return false;
2479 
2480 			continue;
2481 		}
2482 
2483 		/*
2484 		 * Now check the pattern element constraints. If the pattern is
2485 		 * not able to handle the whole instruction in a single step,
2486 		 * we have to split it.
2487 		 * The last_instr_end_off value comes back updated to point to
2488 		 * the position where we have to split the instruction (the
2489 		 * start of the next subop chunk).
2490 		 */
2491 		if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2492 						    &instr_offset)) {
2493 			ninstrs++;
2494 			i++;
2495 			break;
2496 		}
2497 
2498 		instr++;
2499 		ninstrs++;
2500 		instr_offset = 0;
2501 	}
2502 
2503 	/*
2504 	 * This can happen if all instructions of a pattern are optional.
2505 	 * Still, if there's not at least one instruction handled by this
2506 	 * pattern, this is not a match, and we should try the next one (if
2507 	 * any).
2508 	 */
2509 	if (!ninstrs)
2510 		return false;
2511 
2512 	/*
2513 	 * We had a match on the pattern head, but the pattern may be longer
2514 	 * than the instructions we're asked to execute. We need to make sure
2515 	 * there's no mandatory elements in the pattern tail.
2516 	 */
2517 	for (; i < pat->nelems; i++) {
2518 		if (!pat->elems[i].optional)
2519 			return false;
2520 	}
2521 
2522 	/*
2523 	 * We have a match: update the subop structure accordingly and return
2524 	 * true.
2525 	 */
2526 	ctx->subop.ninstrs = ninstrs;
2527 	ctx->subop.last_instr_end_off = instr_offset;
2528 
2529 	return true;
2530 }
2531 
2532 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2533 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2534 {
2535 	const struct nand_op_instr *instr;
2536 	char *prefix = "      ";
2537 	unsigned int i;
2538 
2539 	pr_debug("executing subop:\n");
2540 
2541 	for (i = 0; i < ctx->ninstrs; i++) {
2542 		instr = &ctx->instrs[i];
2543 
2544 		if (instr == &ctx->subop.instrs[0])
2545 			prefix = "    ->";
2546 
2547 		switch (instr->type) {
2548 		case NAND_OP_CMD_INSTR:
2549 			pr_debug("%sCMD      [0x%02x]\n", prefix,
2550 				 instr->ctx.cmd.opcode);
2551 			break;
2552 		case NAND_OP_ADDR_INSTR:
2553 			pr_debug("%sADDR     [%d cyc: %*ph]\n", prefix,
2554 				 instr->ctx.addr.naddrs,
2555 				 instr->ctx.addr.naddrs < 64 ?
2556 				 instr->ctx.addr.naddrs : 64,
2557 				 instr->ctx.addr.addrs);
2558 			break;
2559 		case NAND_OP_DATA_IN_INSTR:
2560 			pr_debug("%sDATA_IN  [%d B%s]\n", prefix,
2561 				 instr->ctx.data.len,
2562 				 instr->ctx.data.force_8bit ?
2563 				 ", force 8-bit" : "");
2564 			break;
2565 		case NAND_OP_DATA_OUT_INSTR:
2566 			pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
2567 				 instr->ctx.data.len,
2568 				 instr->ctx.data.force_8bit ?
2569 				 ", force 8-bit" : "");
2570 			break;
2571 		case NAND_OP_WAITRDY_INSTR:
2572 			pr_debug("%sWAITRDY  [max %d ms]\n", prefix,
2573 				 instr->ctx.waitrdy.timeout_ms);
2574 			break;
2575 		}
2576 
2577 		if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2578 			prefix = "      ";
2579 	}
2580 }
2581 #else
2582 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2583 {
2584 	/* NOP */
2585 }
2586 #endif
2587 
2588 /**
2589  * nand_op_parser_exec_op - exec_op parser
2590  * @chip: the NAND chip
2591  * @parser: patterns description provided by the controller driver
2592  * @op: the NAND operation to address
2593  * @check_only: when true, the function only checks if @op can be handled but
2594  *		does not execute the operation
2595  *
2596  * Helper function designed to ease integration of NAND controller drivers that
2597  * only support a limited set of instruction sequences. The supported sequences
2598  * are described in @parser, and the framework takes care of splitting @op into
2599  * multiple sub-operations (if required) and pass them back to the ->exec()
2600  * callback of the matching pattern if @check_only is set to false.
2601  *
2602  * NAND controller drivers should call this function from their own ->exec_op()
2603  * implementation.
2604  *
2605  * Returns 0 on success, a negative error code otherwise. A failure can be
2606  * caused by an unsupported operation (none of the supported patterns is able
2607  * to handle the requested operation), or an error returned by one of the
2608  * matching pattern->exec() hook.
2609  */
2610 int nand_op_parser_exec_op(struct nand_chip *chip,
2611 			   const struct nand_op_parser *parser,
2612 			   const struct nand_operation *op, bool check_only)
2613 {
2614 	struct nand_op_parser_ctx ctx = {
2615 		.subop.instrs = op->instrs,
2616 		.instrs = op->instrs,
2617 		.ninstrs = op->ninstrs,
2618 	};
2619 	unsigned int i;
2620 
2621 	while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2622 		int ret;
2623 
2624 		for (i = 0; i < parser->npatterns; i++) {
2625 			const struct nand_op_parser_pattern *pattern;
2626 
2627 			pattern = &parser->patterns[i];
2628 			if (!nand_op_parser_match_pat(pattern, &ctx))
2629 				continue;
2630 
2631 			nand_op_parser_trace(&ctx);
2632 
2633 			if (check_only)
2634 				break;
2635 
2636 			ret = pattern->exec(chip, &ctx.subop);
2637 			if (ret)
2638 				return ret;
2639 
2640 			break;
2641 		}
2642 
2643 		if (i == parser->npatterns) {
2644 			pr_debug("->exec_op() parser: pattern not found!\n");
2645 			return -ENOTSUPP;
2646 		}
2647 
2648 		/*
2649 		 * Update the context structure by pointing to the start of the
2650 		 * next subop.
2651 		 */
2652 		ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2653 		if (ctx.subop.last_instr_end_off)
2654 			ctx.subop.instrs -= 1;
2655 
2656 		ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2657 	}
2658 
2659 	return 0;
2660 }
2661 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2662 
2663 static bool nand_instr_is_data(const struct nand_op_instr *instr)
2664 {
2665 	return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2666 			 instr->type == NAND_OP_DATA_OUT_INSTR);
2667 }
2668 
2669 static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2670 				      unsigned int instr_idx)
2671 {
2672 	return subop && instr_idx < subop->ninstrs;
2673 }
2674 
2675 static int nand_subop_get_start_off(const struct nand_subop *subop,
2676 				    unsigned int instr_idx)
2677 {
2678 	if (instr_idx)
2679 		return 0;
2680 
2681 	return subop->first_instr_start_off;
2682 }
2683 
2684 /**
2685  * nand_subop_get_addr_start_off - Get the start offset in an address array
2686  * @subop: The entire sub-operation
2687  * @instr_idx: Index of the instruction inside the sub-operation
2688  *
2689  * During driver development, one could be tempted to directly use the
2690  * ->addr.addrs field of address instructions. This is wrong as address
2691  * instructions might be split.
2692  *
2693  * Given an address instruction, returns the offset of the first cycle to issue.
2694  */
2695 int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2696 				  unsigned int instr_idx)
2697 {
2698 	if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2699 	    subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
2700 		return -EINVAL;
2701 
2702 	return nand_subop_get_start_off(subop, instr_idx);
2703 }
2704 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2705 
2706 /**
2707  * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2708  * @subop: The entire sub-operation
2709  * @instr_idx: Index of the instruction inside the sub-operation
2710  *
2711  * During driver development, one could be tempted to directly use the
2712  * ->addr->naddrs field of a data instruction. This is wrong as instructions
2713  * might be split.
2714  *
2715  * Given an address instruction, returns the number of address cycle to issue.
2716  */
2717 int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2718 				unsigned int instr_idx)
2719 {
2720 	int start_off, end_off;
2721 
2722 	if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2723 	    subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
2724 		return -EINVAL;
2725 
2726 	start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2727 
2728 	if (instr_idx == subop->ninstrs - 1 &&
2729 	    subop->last_instr_end_off)
2730 		end_off = subop->last_instr_end_off;
2731 	else
2732 		end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2733 
2734 	return end_off - start_off;
2735 }
2736 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2737 
2738 /**
2739  * nand_subop_get_data_start_off - Get the start offset in a data array
2740  * @subop: The entire sub-operation
2741  * @instr_idx: Index of the instruction inside the sub-operation
2742  *
2743  * During driver development, one could be tempted to directly use the
2744  * ->data->buf.{in,out} field of data instructions. This is wrong as data
2745  * instructions might be split.
2746  *
2747  * Given a data instruction, returns the offset to start from.
2748  */
2749 int nand_subop_get_data_start_off(const struct nand_subop *subop,
2750 				  unsigned int instr_idx)
2751 {
2752 	if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2753 	    !nand_instr_is_data(&subop->instrs[instr_idx]))
2754 		return -EINVAL;
2755 
2756 	return nand_subop_get_start_off(subop, instr_idx);
2757 }
2758 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2759 
2760 /**
2761  * nand_subop_get_data_len - Get the number of bytes to retrieve
2762  * @subop: The entire sub-operation
2763  * @instr_idx: Index of the instruction inside the sub-operation
2764  *
2765  * During driver development, one could be tempted to directly use the
2766  * ->data->len field of a data instruction. This is wrong as data instructions
2767  * might be split.
2768  *
2769  * Returns the length of the chunk of data to send/receive.
2770  */
2771 int nand_subop_get_data_len(const struct nand_subop *subop,
2772 			    unsigned int instr_idx)
2773 {
2774 	int start_off = 0, end_off;
2775 
2776 	if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2777 	    !nand_instr_is_data(&subop->instrs[instr_idx]))
2778 		return -EINVAL;
2779 
2780 	start_off = nand_subop_get_data_start_off(subop, instr_idx);
2781 
2782 	if (instr_idx == subop->ninstrs - 1 &&
2783 	    subop->last_instr_end_off)
2784 		end_off = subop->last_instr_end_off;
2785 	else
2786 		end_off = subop->instrs[instr_idx].ctx.data.len;
2787 
2788 	return end_off - start_off;
2789 }
2790 EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2791 
2792 /**
2793  * nand_reset - Reset and initialize a NAND device
2794  * @chip: The NAND chip
2795  * @chipnr: Internal die id
2796  *
2797  * Save the timings data structure, then apply SDR timings mode 0 (see
2798  * nand_reset_data_interface for details), do the reset operation, and
2799  * apply back the previous timings.
2800  *
2801  * Returns 0 on success, a negative error code otherwise.
2802  */
2803 int nand_reset(struct nand_chip *chip, int chipnr)
2804 {
2805 	struct mtd_info *mtd = nand_to_mtd(chip);
2806 	struct nand_data_interface saved_data_intf = chip->data_interface;
2807 	int ret;
2808 
2809 	ret = nand_reset_data_interface(chip, chipnr);
2810 	if (ret)
2811 		return ret;
2812 
2813 	/*
2814 	 * The CS line has to be released before we can apply the new NAND
2815 	 * interface settings, hence this weird ->select_chip() dance.
2816 	 */
2817 	chip->select_chip(mtd, chipnr);
2818 	ret = nand_reset_op(chip);
2819 	chip->select_chip(mtd, -1);
2820 	if (ret)
2821 		return ret;
2822 
2823 	/*
2824 	 * A nand_reset_data_interface() put both the NAND chip and the NAND
2825 	 * controller in timings mode 0. If the default mode for this chip is
2826 	 * also 0, no need to proceed to the change again. Plus, at probe time,
2827 	 * nand_setup_data_interface() uses ->set/get_features() which would
2828 	 * fail anyway as the parameter page is not available yet.
2829 	 */
2830 	if (!chip->onfi_timing_mode_default)
2831 		return 0;
2832 
2833 	chip->data_interface = saved_data_intf;
2834 	ret = nand_setup_data_interface(chip, chipnr);
2835 	if (ret)
2836 		return ret;
2837 
2838 	return 0;
2839 }
2840 EXPORT_SYMBOL_GPL(nand_reset);
2841 
2842 /**
2843  * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2844  * @buf: buffer to test
2845  * @len: buffer length
2846  * @bitflips_threshold: maximum number of bitflips
2847  *
2848  * Check if a buffer contains only 0xff, which means the underlying region
2849  * has been erased and is ready to be programmed.
2850  * The bitflips_threshold specify the maximum number of bitflips before
2851  * considering the region is not erased.
2852  * Note: The logic of this function has been extracted from the memweight
2853  * implementation, except that nand_check_erased_buf function exit before
2854  * testing the whole buffer if the number of bitflips exceed the
2855  * bitflips_threshold value.
2856  *
2857  * Returns a positive number of bitflips less than or equal to
2858  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2859  * threshold.
2860  */
2861 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2862 {
2863 	const unsigned char *bitmap = buf;
2864 	int bitflips = 0;
2865 	int weight;
2866 
2867 	for (; len && ((uintptr_t)bitmap) % sizeof(long);
2868 	     len--, bitmap++) {
2869 		weight = hweight8(*bitmap);
2870 		bitflips += BITS_PER_BYTE - weight;
2871 		if (unlikely(bitflips > bitflips_threshold))
2872 			return -EBADMSG;
2873 	}
2874 
2875 	for (; len >= sizeof(long);
2876 	     len -= sizeof(long), bitmap += sizeof(long)) {
2877 		unsigned long d = *((unsigned long *)bitmap);
2878 		if (d == ~0UL)
2879 			continue;
2880 		weight = hweight_long(d);
2881 		bitflips += BITS_PER_LONG - weight;
2882 		if (unlikely(bitflips > bitflips_threshold))
2883 			return -EBADMSG;
2884 	}
2885 
2886 	for (; len > 0; len--, bitmap++) {
2887 		weight = hweight8(*bitmap);
2888 		bitflips += BITS_PER_BYTE - weight;
2889 		if (unlikely(bitflips > bitflips_threshold))
2890 			return -EBADMSG;
2891 	}
2892 
2893 	return bitflips;
2894 }
2895 
2896 /**
2897  * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2898  *				 0xff data
2899  * @data: data buffer to test
2900  * @datalen: data length
2901  * @ecc: ECC buffer
2902  * @ecclen: ECC length
2903  * @extraoob: extra OOB buffer
2904  * @extraooblen: extra OOB length
2905  * @bitflips_threshold: maximum number of bitflips
2906  *
2907  * Check if a data buffer and its associated ECC and OOB data contains only
2908  * 0xff pattern, which means the underlying region has been erased and is
2909  * ready to be programmed.
2910  * The bitflips_threshold specify the maximum number of bitflips before
2911  * considering the region as not erased.
2912  *
2913  * Note:
2914  * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2915  *    different from the NAND page size. When fixing bitflips, ECC engines will
2916  *    report the number of errors per chunk, and the NAND core infrastructure
2917  *    expect you to return the maximum number of bitflips for the whole page.
2918  *    This is why you should always use this function on a single chunk and
2919  *    not on the whole page. After checking each chunk you should update your
2920  *    max_bitflips value accordingly.
2921  * 2/ When checking for bitflips in erased pages you should not only check
2922  *    the payload data but also their associated ECC data, because a user might
2923  *    have programmed almost all bits to 1 but a few. In this case, we
2924  *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
2925  *    this case.
2926  * 3/ The extraoob argument is optional, and should be used if some of your OOB
2927  *    data are protected by the ECC engine.
2928  *    It could also be used if you support subpages and want to attach some
2929  *    extra OOB data to an ECC chunk.
2930  *
2931  * Returns a positive number of bitflips less than or equal to
2932  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2933  * threshold. In case of success, the passed buffers are filled with 0xff.
2934  */
2935 int nand_check_erased_ecc_chunk(void *data, int datalen,
2936 				void *ecc, int ecclen,
2937 				void *extraoob, int extraooblen,
2938 				int bitflips_threshold)
2939 {
2940 	int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2941 
2942 	data_bitflips = nand_check_erased_buf(data, datalen,
2943 					      bitflips_threshold);
2944 	if (data_bitflips < 0)
2945 		return data_bitflips;
2946 
2947 	bitflips_threshold -= data_bitflips;
2948 
2949 	ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2950 	if (ecc_bitflips < 0)
2951 		return ecc_bitflips;
2952 
2953 	bitflips_threshold -= ecc_bitflips;
2954 
2955 	extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2956 						  bitflips_threshold);
2957 	if (extraoob_bitflips < 0)
2958 		return extraoob_bitflips;
2959 
2960 	if (data_bitflips)
2961 		memset(data, 0xff, datalen);
2962 
2963 	if (ecc_bitflips)
2964 		memset(ecc, 0xff, ecclen);
2965 
2966 	if (extraoob_bitflips)
2967 		memset(extraoob, 0xff, extraooblen);
2968 
2969 	return data_bitflips + ecc_bitflips + extraoob_bitflips;
2970 }
2971 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2972 
2973 /**
2974  * nand_read_page_raw - [INTERN] read raw page data without ecc
2975  * @mtd: mtd info structure
2976  * @chip: nand chip info structure
2977  * @buf: buffer to store read data
2978  * @oob_required: caller requires OOB data read to chip->oob_poi
2979  * @page: page number to read
2980  *
2981  * Not for syndrome calculating ECC controllers, which use a special oob layout.
2982  */
2983 int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
2984 		       uint8_t *buf, int oob_required, int page)
2985 {
2986 	int ret;
2987 
2988 	ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2989 	if (ret)
2990 		return ret;
2991 
2992 	if (oob_required) {
2993 		ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2994 					false);
2995 		if (ret)
2996 			return ret;
2997 	}
2998 
2999 	return 0;
3000 }
3001 EXPORT_SYMBOL(nand_read_page_raw);
3002 
3003 /**
3004  * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
3005  * @mtd: mtd info structure
3006  * @chip: nand chip info structure
3007  * @buf: buffer to store read data
3008  * @oob_required: caller requires OOB data read to chip->oob_poi
3009  * @page: page number to read
3010  *
3011  * We need a special oob layout and handling even when OOB isn't used.
3012  */
3013 static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
3014 				       struct nand_chip *chip, uint8_t *buf,
3015 				       int oob_required, int page)
3016 {
3017 	int eccsize = chip->ecc.size;
3018 	int eccbytes = chip->ecc.bytes;
3019 	uint8_t *oob = chip->oob_poi;
3020 	int steps, size, ret;
3021 
3022 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
3023 	if (ret)
3024 		return ret;
3025 
3026 	for (steps = chip->ecc.steps; steps > 0; steps--) {
3027 		ret = nand_read_data_op(chip, buf, eccsize, false);
3028 		if (ret)
3029 			return ret;
3030 
3031 		buf += eccsize;
3032 
3033 		if (chip->ecc.prepad) {
3034 			ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3035 						false);
3036 			if (ret)
3037 				return ret;
3038 
3039 			oob += chip->ecc.prepad;
3040 		}
3041 
3042 		ret = nand_read_data_op(chip, oob, eccbytes, false);
3043 		if (ret)
3044 			return ret;
3045 
3046 		oob += eccbytes;
3047 
3048 		if (chip->ecc.postpad) {
3049 			ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3050 						false);
3051 			if (ret)
3052 				return ret;
3053 
3054 			oob += chip->ecc.postpad;
3055 		}
3056 	}
3057 
3058 	size = mtd->oobsize - (oob - chip->oob_poi);
3059 	if (size) {
3060 		ret = nand_read_data_op(chip, oob, size, false);
3061 		if (ret)
3062 			return ret;
3063 	}
3064 
3065 	return 0;
3066 }
3067 
3068 /**
3069  * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
3070  * @mtd: mtd info structure
3071  * @chip: nand chip info structure
3072  * @buf: buffer to store read data
3073  * @oob_required: caller requires OOB data read to chip->oob_poi
3074  * @page: page number to read
3075  */
3076 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
3077 				uint8_t *buf, int oob_required, int page)
3078 {
3079 	int i, eccsize = chip->ecc.size, ret;
3080 	int eccbytes = chip->ecc.bytes;
3081 	int eccsteps = chip->ecc.steps;
3082 	uint8_t *p = buf;
3083 	uint8_t *ecc_calc = chip->ecc.calc_buf;
3084 	uint8_t *ecc_code = chip->ecc.code_buf;
3085 	unsigned int max_bitflips = 0;
3086 
3087 	chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
3088 
3089 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3090 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
3091 
3092 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3093 					 chip->ecc.total);
3094 	if (ret)
3095 		return ret;
3096 
3097 	eccsteps = chip->ecc.steps;
3098 	p = buf;
3099 
3100 	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3101 		int stat;
3102 
3103 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
3104 		if (stat < 0) {
3105 			mtd->ecc_stats.failed++;
3106 		} else {
3107 			mtd->ecc_stats.corrected += stat;
3108 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
3109 		}
3110 	}
3111 	return max_bitflips;
3112 }
3113 
3114 /**
3115  * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
3116  * @mtd: mtd info structure
3117  * @chip: nand chip info structure
3118  * @data_offs: offset of requested data within the page
3119  * @readlen: data length
3120  * @bufpoi: buffer to store read data
3121  * @page: page number to read
3122  */
3123 static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
3124 			uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
3125 			int page)
3126 {
3127 	int start_step, end_step, num_steps, ret;
3128 	uint8_t *p;
3129 	int data_col_addr, i, gaps = 0;
3130 	int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
3131 	int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
3132 	int index, section = 0;
3133 	unsigned int max_bitflips = 0;
3134 	struct mtd_oob_region oobregion = { };
3135 
3136 	/* Column address within the page aligned to ECC size (256bytes) */
3137 	start_step = data_offs / chip->ecc.size;
3138 	end_step = (data_offs + readlen - 1) / chip->ecc.size;
3139 	num_steps = end_step - start_step + 1;
3140 	index = start_step * chip->ecc.bytes;
3141 
3142 	/* Data size aligned to ECC ecc.size */
3143 	datafrag_len = num_steps * chip->ecc.size;
3144 	eccfrag_len = num_steps * chip->ecc.bytes;
3145 
3146 	data_col_addr = start_step * chip->ecc.size;
3147 	/* If we read not a page aligned data */
3148 	p = bufpoi + data_col_addr;
3149 	ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
3150 	if (ret)
3151 		return ret;
3152 
3153 	/* Calculate ECC */
3154 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
3155 		chip->ecc.calculate(mtd, p, &chip->ecc.calc_buf[i]);
3156 
3157 	/*
3158 	 * The performance is faster if we position offsets according to
3159 	 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
3160 	 */
3161 	ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
3162 	if (ret)
3163 		return ret;
3164 
3165 	if (oobregion.length < eccfrag_len)
3166 		gaps = 1;
3167 
3168 	if (gaps) {
3169 		ret = nand_change_read_column_op(chip, mtd->writesize,
3170 						 chip->oob_poi, mtd->oobsize,
3171 						 false);
3172 		if (ret)
3173 			return ret;
3174 	} else {
3175 		/*
3176 		 * Send the command to read the particular ECC bytes take care
3177 		 * about buswidth alignment in read_buf.
3178 		 */
3179 		aligned_pos = oobregion.offset & ~(busw - 1);
3180 		aligned_len = eccfrag_len;
3181 		if (oobregion.offset & (busw - 1))
3182 			aligned_len++;
3183 		if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
3184 		    (busw - 1))
3185 			aligned_len++;
3186 
3187 		ret = nand_change_read_column_op(chip,
3188 						 mtd->writesize + aligned_pos,
3189 						 &chip->oob_poi[aligned_pos],
3190 						 aligned_len, false);
3191 		if (ret)
3192 			return ret;
3193 	}
3194 
3195 	ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
3196 					 chip->oob_poi, index, eccfrag_len);
3197 	if (ret)
3198 		return ret;
3199 
3200 	p = bufpoi + data_col_addr;
3201 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
3202 		int stat;
3203 
3204 		stat = chip->ecc.correct(mtd, p, &chip->ecc.code_buf[i],
3205 					 &chip->ecc.calc_buf[i]);
3206 		if (stat == -EBADMSG &&
3207 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3208 			/* check for empty pages with bitflips */
3209 			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3210 						&chip->ecc.code_buf[i],
3211 						chip->ecc.bytes,
3212 						NULL, 0,
3213 						chip->ecc.strength);
3214 		}
3215 
3216 		if (stat < 0) {
3217 			mtd->ecc_stats.failed++;
3218 		} else {
3219 			mtd->ecc_stats.corrected += stat;
3220 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
3221 		}
3222 	}
3223 	return max_bitflips;
3224 }
3225 
3226 /**
3227  * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
3228  * @mtd: mtd info structure
3229  * @chip: nand chip info structure
3230  * @buf: buffer to store read data
3231  * @oob_required: caller requires OOB data read to chip->oob_poi
3232  * @page: page number to read
3233  *
3234  * Not for syndrome calculating ECC controllers which need a special oob layout.
3235  */
3236 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
3237 				uint8_t *buf, int oob_required, int page)
3238 {
3239 	int i, eccsize = chip->ecc.size, ret;
3240 	int eccbytes = chip->ecc.bytes;
3241 	int eccsteps = chip->ecc.steps;
3242 	uint8_t *p = buf;
3243 	uint8_t *ecc_calc = chip->ecc.calc_buf;
3244 	uint8_t *ecc_code = chip->ecc.code_buf;
3245 	unsigned int max_bitflips = 0;
3246 
3247 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
3248 	if (ret)
3249 		return ret;
3250 
3251 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3252 		chip->ecc.hwctl(mtd, NAND_ECC_READ);
3253 
3254 		ret = nand_read_data_op(chip, p, eccsize, false);
3255 		if (ret)
3256 			return ret;
3257 
3258 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
3259 	}
3260 
3261 	ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3262 	if (ret)
3263 		return ret;
3264 
3265 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3266 					 chip->ecc.total);
3267 	if (ret)
3268 		return ret;
3269 
3270 	eccsteps = chip->ecc.steps;
3271 	p = buf;
3272 
3273 	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3274 		int stat;
3275 
3276 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
3277 		if (stat == -EBADMSG &&
3278 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3279 			/* check for empty pages with bitflips */
3280 			stat = nand_check_erased_ecc_chunk(p, eccsize,
3281 						&ecc_code[i], eccbytes,
3282 						NULL, 0,
3283 						chip->ecc.strength);
3284 		}
3285 
3286 		if (stat < 0) {
3287 			mtd->ecc_stats.failed++;
3288 		} else {
3289 			mtd->ecc_stats.corrected += stat;
3290 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
3291 		}
3292 	}
3293 	return max_bitflips;
3294 }
3295 
3296 /**
3297  * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
3298  * @mtd: mtd info structure
3299  * @chip: nand chip info structure
3300  * @buf: buffer to store read data
3301  * @oob_required: caller requires OOB data read to chip->oob_poi
3302  * @page: page number to read
3303  *
3304  * Hardware ECC for large page chips, require OOB to be read first. For this
3305  * ECC mode, the write_page method is re-used from ECC_HW. These methods
3306  * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
3307  * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
3308  * the data area, by overwriting the NAND manufacturer bad block markings.
3309  */
3310 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
3311 	struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
3312 {
3313 	int i, eccsize = chip->ecc.size, ret;
3314 	int eccbytes = chip->ecc.bytes;
3315 	int eccsteps = chip->ecc.steps;
3316 	uint8_t *p = buf;
3317 	uint8_t *ecc_code = chip->ecc.code_buf;
3318 	uint8_t *ecc_calc = chip->ecc.calc_buf;
3319 	unsigned int max_bitflips = 0;
3320 
3321 	/* Read the OOB area first */
3322 	ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3323 	if (ret)
3324 		return ret;
3325 
3326 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
3327 	if (ret)
3328 		return ret;
3329 
3330 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3331 					 chip->ecc.total);
3332 	if (ret)
3333 		return ret;
3334 
3335 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3336 		int stat;
3337 
3338 		chip->ecc.hwctl(mtd, NAND_ECC_READ);
3339 
3340 		ret = nand_read_data_op(chip, p, eccsize, false);
3341 		if (ret)
3342 			return ret;
3343 
3344 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
3345 
3346 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
3347 		if (stat == -EBADMSG &&
3348 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3349 			/* check for empty pages with bitflips */
3350 			stat = nand_check_erased_ecc_chunk(p, eccsize,
3351 						&ecc_code[i], eccbytes,
3352 						NULL, 0,
3353 						chip->ecc.strength);
3354 		}
3355 
3356 		if (stat < 0) {
3357 			mtd->ecc_stats.failed++;
3358 		} else {
3359 			mtd->ecc_stats.corrected += stat;
3360 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
3361 		}
3362 	}
3363 	return max_bitflips;
3364 }
3365 
3366 /**
3367  * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
3368  * @mtd: mtd info structure
3369  * @chip: nand chip info structure
3370  * @buf: buffer to store read data
3371  * @oob_required: caller requires OOB data read to chip->oob_poi
3372  * @page: page number to read
3373  *
3374  * The hw generator calculates the error syndrome automatically. Therefore we
3375  * need a special oob layout and handling.
3376  */
3377 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
3378 				   uint8_t *buf, int oob_required, int page)
3379 {
3380 	int ret, i, eccsize = chip->ecc.size;
3381 	int eccbytes = chip->ecc.bytes;
3382 	int eccsteps = chip->ecc.steps;
3383 	int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
3384 	uint8_t *p = buf;
3385 	uint8_t *oob = chip->oob_poi;
3386 	unsigned int max_bitflips = 0;
3387 
3388 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
3389 	if (ret)
3390 		return ret;
3391 
3392 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3393 		int stat;
3394 
3395 		chip->ecc.hwctl(mtd, NAND_ECC_READ);
3396 
3397 		ret = nand_read_data_op(chip, p, eccsize, false);
3398 		if (ret)
3399 			return ret;
3400 
3401 		if (chip->ecc.prepad) {
3402 			ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3403 						false);
3404 			if (ret)
3405 				return ret;
3406 
3407 			oob += chip->ecc.prepad;
3408 		}
3409 
3410 		chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
3411 
3412 		ret = nand_read_data_op(chip, oob, eccbytes, false);
3413 		if (ret)
3414 			return ret;
3415 
3416 		stat = chip->ecc.correct(mtd, p, oob, NULL);
3417 
3418 		oob += eccbytes;
3419 
3420 		if (chip->ecc.postpad) {
3421 			ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3422 						false);
3423 			if (ret)
3424 				return ret;
3425 
3426 			oob += chip->ecc.postpad;
3427 		}
3428 
3429 		if (stat == -EBADMSG &&
3430 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3431 			/* check for empty pages with bitflips */
3432 			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3433 							   oob - eccpadbytes,
3434 							   eccpadbytes,
3435 							   NULL, 0,
3436 							   chip->ecc.strength);
3437 		}
3438 
3439 		if (stat < 0) {
3440 			mtd->ecc_stats.failed++;
3441 		} else {
3442 			mtd->ecc_stats.corrected += stat;
3443 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
3444 		}
3445 	}
3446 
3447 	/* Calculate remaining oob bytes */
3448 	i = mtd->oobsize - (oob - chip->oob_poi);
3449 	if (i) {
3450 		ret = nand_read_data_op(chip, oob, i, false);
3451 		if (ret)
3452 			return ret;
3453 	}
3454 
3455 	return max_bitflips;
3456 }
3457 
3458 /**
3459  * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3460  * @mtd: mtd info structure
3461  * @oob: oob destination address
3462  * @ops: oob ops structure
3463  * @len: size of oob to transfer
3464  */
3465 static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
3466 				  struct mtd_oob_ops *ops, size_t len)
3467 {
3468 	struct nand_chip *chip = mtd_to_nand(mtd);
3469 	int ret;
3470 
3471 	switch (ops->mode) {
3472 
3473 	case MTD_OPS_PLACE_OOB:
3474 	case MTD_OPS_RAW:
3475 		memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3476 		return oob + len;
3477 
3478 	case MTD_OPS_AUTO_OOB:
3479 		ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3480 						  ops->ooboffs, len);
3481 		BUG_ON(ret);
3482 		return oob + len;
3483 
3484 	default:
3485 		BUG();
3486 	}
3487 	return NULL;
3488 }
3489 
3490 /**
3491  * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3492  * @mtd: MTD device structure
3493  * @retry_mode: the retry mode to use
3494  *
3495  * Some vendors supply a special command to shift the Vt threshold, to be used
3496  * when there are too many bitflips in a page (i.e., ECC error). After setting
3497  * a new threshold, the host should retry reading the page.
3498  */
3499 static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
3500 {
3501 	struct nand_chip *chip = mtd_to_nand(mtd);
3502 
3503 	pr_debug("setting READ RETRY mode %d\n", retry_mode);
3504 
3505 	if (retry_mode >= chip->read_retries)
3506 		return -EINVAL;
3507 
3508 	if (!chip->setup_read_retry)
3509 		return -EOPNOTSUPP;
3510 
3511 	return chip->setup_read_retry(mtd, retry_mode);
3512 }
3513 
3514 /**
3515  * nand_do_read_ops - [INTERN] Read data with ECC
3516  * @mtd: MTD device structure
3517  * @from: offset to read from
3518  * @ops: oob ops structure
3519  *
3520  * Internal function. Called with chip held.
3521  */
3522 static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
3523 			    struct mtd_oob_ops *ops)
3524 {
3525 	int chipnr, page, realpage, col, bytes, aligned, oob_required;
3526 	struct nand_chip *chip = mtd_to_nand(mtd);
3527 	int ret = 0;
3528 	uint32_t readlen = ops->len;
3529 	uint32_t oobreadlen = ops->ooblen;
3530 	uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3531 
3532 	uint8_t *bufpoi, *oob, *buf;
3533 	int use_bufpoi;
3534 	unsigned int max_bitflips = 0;
3535 	int retry_mode = 0;
3536 	bool ecc_fail = false;
3537 
3538 	chipnr = (int)(from >> chip->chip_shift);
3539 	chip->select_chip(mtd, chipnr);
3540 
3541 	realpage = (int)(from >> chip->page_shift);
3542 	page = realpage & chip->pagemask;
3543 
3544 	col = (int)(from & (mtd->writesize - 1));
3545 
3546 	buf = ops->datbuf;
3547 	oob = ops->oobbuf;
3548 	oob_required = oob ? 1 : 0;
3549 
3550 	while (1) {
3551 		unsigned int ecc_failures = mtd->ecc_stats.failed;
3552 
3553 		bytes = min(mtd->writesize - col, readlen);
3554 		aligned = (bytes == mtd->writesize);
3555 
3556 		if (!aligned)
3557 			use_bufpoi = 1;
3558 		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
3559 			use_bufpoi = !virt_addr_valid(buf) ||
3560 				     !IS_ALIGNED((unsigned long)buf,
3561 						 chip->buf_align);
3562 		else
3563 			use_bufpoi = 0;
3564 
3565 		/* Is the current page in the buffer? */
3566 		if (realpage != chip->pagebuf || oob) {
3567 			bufpoi = use_bufpoi ? chip->data_buf : buf;
3568 
3569 			if (use_bufpoi && aligned)
3570 				pr_debug("%s: using read bounce buffer for buf@%p\n",
3571 						 __func__, buf);
3572 
3573 read_retry:
3574 			/*
3575 			 * Now read the page into the buffer.  Absent an error,
3576 			 * the read methods return max bitflips per ecc step.
3577 			 */
3578 			if (unlikely(ops->mode == MTD_OPS_RAW))
3579 				ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
3580 							      oob_required,
3581 							      page);
3582 			else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3583 				 !oob)
3584 				ret = chip->ecc.read_subpage(mtd, chip,
3585 							col, bytes, bufpoi,
3586 							page);
3587 			else
3588 				ret = chip->ecc.read_page(mtd, chip, bufpoi,
3589 							  oob_required, page);
3590 			if (ret < 0) {
3591 				if (use_bufpoi)
3592 					/* Invalidate page cache */
3593 					chip->pagebuf = -1;
3594 				break;
3595 			}
3596 
3597 			/* Transfer not aligned data */
3598 			if (use_bufpoi) {
3599 				if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3600 				    !(mtd->ecc_stats.failed - ecc_failures) &&
3601 				    (ops->mode != MTD_OPS_RAW)) {
3602 					chip->pagebuf = realpage;
3603 					chip->pagebuf_bitflips = ret;
3604 				} else {
3605 					/* Invalidate page cache */
3606 					chip->pagebuf = -1;
3607 				}
3608 				memcpy(buf, chip->data_buf + col, bytes);
3609 			}
3610 
3611 			if (unlikely(oob)) {
3612 				int toread = min(oobreadlen, max_oobsize);
3613 
3614 				if (toread) {
3615 					oob = nand_transfer_oob(mtd,
3616 						oob, ops, toread);
3617 					oobreadlen -= toread;
3618 				}
3619 			}
3620 
3621 			if (chip->options & NAND_NEED_READRDY) {
3622 				/* Apply delay or wait for ready/busy pin */
3623 				if (!chip->dev_ready)
3624 					udelay(chip->chip_delay);
3625 				else
3626 					nand_wait_ready(mtd);
3627 			}
3628 
3629 			if (mtd->ecc_stats.failed - ecc_failures) {
3630 				if (retry_mode + 1 < chip->read_retries) {
3631 					retry_mode++;
3632 					ret = nand_setup_read_retry(mtd,
3633 							retry_mode);
3634 					if (ret < 0)
3635 						break;
3636 
3637 					/* Reset failures; retry */
3638 					mtd->ecc_stats.failed = ecc_failures;
3639 					goto read_retry;
3640 				} else {
3641 					/* No more retry modes; real failure */
3642 					ecc_fail = true;
3643 				}
3644 			}
3645 
3646 			buf += bytes;
3647 			max_bitflips = max_t(unsigned int, max_bitflips, ret);
3648 		} else {
3649 			memcpy(buf, chip->data_buf + col, bytes);
3650 			buf += bytes;
3651 			max_bitflips = max_t(unsigned int, max_bitflips,
3652 					     chip->pagebuf_bitflips);
3653 		}
3654 
3655 		readlen -= bytes;
3656 
3657 		/* Reset to retry mode 0 */
3658 		if (retry_mode) {
3659 			ret = nand_setup_read_retry(mtd, 0);
3660 			if (ret < 0)
3661 				break;
3662 			retry_mode = 0;
3663 		}
3664 
3665 		if (!readlen)
3666 			break;
3667 
3668 		/* For subsequent reads align to page boundary */
3669 		col = 0;
3670 		/* Increment page address */
3671 		realpage++;
3672 
3673 		page = realpage & chip->pagemask;
3674 		/* Check, if we cross a chip boundary */
3675 		if (!page) {
3676 			chipnr++;
3677 			chip->select_chip(mtd, -1);
3678 			chip->select_chip(mtd, chipnr);
3679 		}
3680 	}
3681 	chip->select_chip(mtd, -1);
3682 
3683 	ops->retlen = ops->len - (size_t) readlen;
3684 	if (oob)
3685 		ops->oobretlen = ops->ooblen - oobreadlen;
3686 
3687 	if (ret < 0)
3688 		return ret;
3689 
3690 	if (ecc_fail)
3691 		return -EBADMSG;
3692 
3693 	return max_bitflips;
3694 }
3695 
3696 /**
3697  * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3698  * @mtd: mtd info structure
3699  * @chip: nand chip info structure
3700  * @page: page number to read
3701  */
3702 int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
3703 {
3704 	return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3705 }
3706 EXPORT_SYMBOL(nand_read_oob_std);
3707 
3708 /**
3709  * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3710  *			    with syndromes
3711  * @mtd: mtd info structure
3712  * @chip: nand chip info structure
3713  * @page: page number to read
3714  */
3715 int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
3716 			   int page)
3717 {
3718 	int length = mtd->oobsize;
3719 	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3720 	int eccsize = chip->ecc.size;
3721 	uint8_t *bufpoi = chip->oob_poi;
3722 	int i, toread, sndrnd = 0, pos, ret;
3723 
3724 	ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3725 	if (ret)
3726 		return ret;
3727 
3728 	for (i = 0; i < chip->ecc.steps; i++) {
3729 		if (sndrnd) {
3730 			int ret;
3731 
3732 			pos = eccsize + i * (eccsize + chunk);
3733 			if (mtd->writesize > 512)
3734 				ret = nand_change_read_column_op(chip, pos,
3735 								 NULL, 0,
3736 								 false);
3737 			else
3738 				ret = nand_read_page_op(chip, page, pos, NULL,
3739 							0);
3740 
3741 			if (ret)
3742 				return ret;
3743 		} else
3744 			sndrnd = 1;
3745 		toread = min_t(int, length, chunk);
3746 
3747 		ret = nand_read_data_op(chip, bufpoi, toread, false);
3748 		if (ret)
3749 			return ret;
3750 
3751 		bufpoi += toread;
3752 		length -= toread;
3753 	}
3754 	if (length > 0) {
3755 		ret = nand_read_data_op(chip, bufpoi, length, false);
3756 		if (ret)
3757 			return ret;
3758 	}
3759 
3760 	return 0;
3761 }
3762 EXPORT_SYMBOL(nand_read_oob_syndrome);
3763 
3764 /**
3765  * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3766  * @mtd: mtd info structure
3767  * @chip: nand chip info structure
3768  * @page: page number to write
3769  */
3770 int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
3771 {
3772 	return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3773 				 mtd->oobsize);
3774 }
3775 EXPORT_SYMBOL(nand_write_oob_std);
3776 
3777 /**
3778  * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3779  *			     with syndrome - only for large page flash
3780  * @mtd: mtd info structure
3781  * @chip: nand chip info structure
3782  * @page: page number to write
3783  */
3784 int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
3785 			    int page)
3786 {
3787 	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3788 	int eccsize = chip->ecc.size, length = mtd->oobsize;
3789 	int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3790 	const uint8_t *bufpoi = chip->oob_poi;
3791 
3792 	/*
3793 	 * data-ecc-data-ecc ... ecc-oob
3794 	 * or
3795 	 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3796 	 */
3797 	if (!chip->ecc.prepad && !chip->ecc.postpad) {
3798 		pos = steps * (eccsize + chunk);
3799 		steps = 0;
3800 	} else
3801 		pos = eccsize;
3802 
3803 	ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3804 	if (ret)
3805 		return ret;
3806 
3807 	for (i = 0; i < steps; i++) {
3808 		if (sndcmd) {
3809 			if (mtd->writesize <= 512) {
3810 				uint32_t fill = 0xFFFFFFFF;
3811 
3812 				len = eccsize;
3813 				while (len > 0) {
3814 					int num = min_t(int, len, 4);
3815 
3816 					ret = nand_write_data_op(chip, &fill,
3817 								 num, false);
3818 					if (ret)
3819 						return ret;
3820 
3821 					len -= num;
3822 				}
3823 			} else {
3824 				pos = eccsize + i * (eccsize + chunk);
3825 				ret = nand_change_write_column_op(chip, pos,
3826 								  NULL, 0,
3827 								  false);
3828 				if (ret)
3829 					return ret;
3830 			}
3831 		} else
3832 			sndcmd = 1;
3833 		len = min_t(int, length, chunk);
3834 
3835 		ret = nand_write_data_op(chip, bufpoi, len, false);
3836 		if (ret)
3837 			return ret;
3838 
3839 		bufpoi += len;
3840 		length -= len;
3841 	}
3842 	if (length > 0) {
3843 		ret = nand_write_data_op(chip, bufpoi, length, false);
3844 		if (ret)
3845 			return ret;
3846 	}
3847 
3848 	return nand_prog_page_end_op(chip);
3849 }
3850 EXPORT_SYMBOL(nand_write_oob_syndrome);
3851 
3852 /**
3853  * nand_do_read_oob - [INTERN] NAND read out-of-band
3854  * @mtd: MTD device structure
3855  * @from: offset to read from
3856  * @ops: oob operations description structure
3857  *
3858  * NAND read out-of-band data from the spare area.
3859  */
3860 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
3861 			    struct mtd_oob_ops *ops)
3862 {
3863 	unsigned int max_bitflips = 0;
3864 	int page, realpage, chipnr;
3865 	struct nand_chip *chip = mtd_to_nand(mtd);
3866 	struct mtd_ecc_stats stats;
3867 	int readlen = ops->ooblen;
3868 	int len;
3869 	uint8_t *buf = ops->oobbuf;
3870 	int ret = 0;
3871 
3872 	pr_debug("%s: from = 0x%08Lx, len = %i\n",
3873 			__func__, (unsigned long long)from, readlen);
3874 
3875 	stats = mtd->ecc_stats;
3876 
3877 	len = mtd_oobavail(mtd, ops);
3878 
3879 	chipnr = (int)(from >> chip->chip_shift);
3880 	chip->select_chip(mtd, chipnr);
3881 
3882 	/* Shift to get page */
3883 	realpage = (int)(from >> chip->page_shift);
3884 	page = realpage & chip->pagemask;
3885 
3886 	while (1) {
3887 		if (ops->mode == MTD_OPS_RAW)
3888 			ret = chip->ecc.read_oob_raw(mtd, chip, page);
3889 		else
3890 			ret = chip->ecc.read_oob(mtd, chip, page);
3891 
3892 		if (ret < 0)
3893 			break;
3894 
3895 		len = min(len, readlen);
3896 		buf = nand_transfer_oob(mtd, buf, ops, len);
3897 
3898 		if (chip->options & NAND_NEED_READRDY) {
3899 			/* Apply delay or wait for ready/busy pin */
3900 			if (!chip->dev_ready)
3901 				udelay(chip->chip_delay);
3902 			else
3903 				nand_wait_ready(mtd);
3904 		}
3905 
3906 		max_bitflips = max_t(unsigned int, max_bitflips, ret);
3907 
3908 		readlen -= len;
3909 		if (!readlen)
3910 			break;
3911 
3912 		/* Increment page address */
3913 		realpage++;
3914 
3915 		page = realpage & chip->pagemask;
3916 		/* Check, if we cross a chip boundary */
3917 		if (!page) {
3918 			chipnr++;
3919 			chip->select_chip(mtd, -1);
3920 			chip->select_chip(mtd, chipnr);
3921 		}
3922 	}
3923 	chip->select_chip(mtd, -1);
3924 
3925 	ops->oobretlen = ops->ooblen - readlen;
3926 
3927 	if (ret < 0)
3928 		return ret;
3929 
3930 	if (mtd->ecc_stats.failed - stats.failed)
3931 		return -EBADMSG;
3932 
3933 	return max_bitflips;
3934 }
3935 
3936 /**
3937  * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3938  * @mtd: MTD device structure
3939  * @from: offset to read from
3940  * @ops: oob operation description structure
3941  *
3942  * NAND read data and/or out-of-band data.
3943  */
3944 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3945 			 struct mtd_oob_ops *ops)
3946 {
3947 	int ret;
3948 
3949 	ops->retlen = 0;
3950 
3951 	if (ops->mode != MTD_OPS_PLACE_OOB &&
3952 	    ops->mode != MTD_OPS_AUTO_OOB &&
3953 	    ops->mode != MTD_OPS_RAW)
3954 		return -ENOTSUPP;
3955 
3956 	nand_get_device(mtd, FL_READING);
3957 
3958 	if (!ops->datbuf)
3959 		ret = nand_do_read_oob(mtd, from, ops);
3960 	else
3961 		ret = nand_do_read_ops(mtd, from, ops);
3962 
3963 	nand_release_device(mtd);
3964 	return ret;
3965 }
3966 
3967 
3968 /**
3969  * nand_write_page_raw - [INTERN] raw page write function
3970  * @mtd: mtd info structure
3971  * @chip: nand chip info structure
3972  * @buf: data buffer
3973  * @oob_required: must write chip->oob_poi to OOB
3974  * @page: page number to write
3975  *
3976  * Not for syndrome calculating ECC controllers, which use a special oob layout.
3977  */
3978 int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
3979 			const uint8_t *buf, int oob_required, int page)
3980 {
3981 	int ret;
3982 
3983 	ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3984 	if (ret)
3985 		return ret;
3986 
3987 	if (oob_required) {
3988 		ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3989 					 false);
3990 		if (ret)
3991 			return ret;
3992 	}
3993 
3994 	return nand_prog_page_end_op(chip);
3995 }
3996 EXPORT_SYMBOL(nand_write_page_raw);
3997 
3998 /**
3999  * nand_write_page_raw_syndrome - [INTERN] raw page write function
4000  * @mtd: mtd info structure
4001  * @chip: nand chip info structure
4002  * @buf: data buffer
4003  * @oob_required: must write chip->oob_poi to OOB
4004  * @page: page number to write
4005  *
4006  * We need a special oob layout and handling even when ECC isn't checked.
4007  */
4008 static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
4009 					struct nand_chip *chip,
4010 					const uint8_t *buf, int oob_required,
4011 					int page)
4012 {
4013 	int eccsize = chip->ecc.size;
4014 	int eccbytes = chip->ecc.bytes;
4015 	uint8_t *oob = chip->oob_poi;
4016 	int steps, size, ret;
4017 
4018 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4019 	if (ret)
4020 		return ret;
4021 
4022 	for (steps = chip->ecc.steps; steps > 0; steps--) {
4023 		ret = nand_write_data_op(chip, buf, eccsize, false);
4024 		if (ret)
4025 			return ret;
4026 
4027 		buf += eccsize;
4028 
4029 		if (chip->ecc.prepad) {
4030 			ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
4031 						 false);
4032 			if (ret)
4033 				return ret;
4034 
4035 			oob += chip->ecc.prepad;
4036 		}
4037 
4038 		ret = nand_write_data_op(chip, oob, eccbytes, false);
4039 		if (ret)
4040 			return ret;
4041 
4042 		oob += eccbytes;
4043 
4044 		if (chip->ecc.postpad) {
4045 			ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
4046 						 false);
4047 			if (ret)
4048 				return ret;
4049 
4050 			oob += chip->ecc.postpad;
4051 		}
4052 	}
4053 
4054 	size = mtd->oobsize - (oob - chip->oob_poi);
4055 	if (size) {
4056 		ret = nand_write_data_op(chip, oob, size, false);
4057 		if (ret)
4058 			return ret;
4059 	}
4060 
4061 	return nand_prog_page_end_op(chip);
4062 }
4063 /**
4064  * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
4065  * @mtd: mtd info structure
4066  * @chip: nand chip info structure
4067  * @buf: data buffer
4068  * @oob_required: must write chip->oob_poi to OOB
4069  * @page: page number to write
4070  */
4071 static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
4072 				 const uint8_t *buf, int oob_required,
4073 				 int page)
4074 {
4075 	int i, eccsize = chip->ecc.size, ret;
4076 	int eccbytes = chip->ecc.bytes;
4077 	int eccsteps = chip->ecc.steps;
4078 	uint8_t *ecc_calc = chip->ecc.calc_buf;
4079 	const uint8_t *p = buf;
4080 
4081 	/* Software ECC calculation */
4082 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
4083 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
4084 
4085 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4086 					 chip->ecc.total);
4087 	if (ret)
4088 		return ret;
4089 
4090 	return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
4091 }
4092 
4093 /**
4094  * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
4095  * @mtd: mtd info structure
4096  * @chip: nand chip info structure
4097  * @buf: data buffer
4098  * @oob_required: must write chip->oob_poi to OOB
4099  * @page: page number to write
4100  */
4101 static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
4102 				  const uint8_t *buf, int oob_required,
4103 				  int page)
4104 {
4105 	int i, eccsize = chip->ecc.size, ret;
4106 	int eccbytes = chip->ecc.bytes;
4107 	int eccsteps = chip->ecc.steps;
4108 	uint8_t *ecc_calc = chip->ecc.calc_buf;
4109 	const uint8_t *p = buf;
4110 
4111 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4112 	if (ret)
4113 		return ret;
4114 
4115 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4116 		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
4117 
4118 		ret = nand_write_data_op(chip, p, eccsize, false);
4119 		if (ret)
4120 			return ret;
4121 
4122 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
4123 	}
4124 
4125 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4126 					 chip->ecc.total);
4127 	if (ret)
4128 		return ret;
4129 
4130 	ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4131 	if (ret)
4132 		return ret;
4133 
4134 	return nand_prog_page_end_op(chip);
4135 }
4136 
4137 
4138 /**
4139  * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
4140  * @mtd:	mtd info structure
4141  * @chip:	nand chip info structure
4142  * @offset:	column address of subpage within the page
4143  * @data_len:	data length
4144  * @buf:	data buffer
4145  * @oob_required: must write chip->oob_poi to OOB
4146  * @page: page number to write
4147  */
4148 static int nand_write_subpage_hwecc(struct mtd_info *mtd,
4149 				struct nand_chip *chip, uint32_t offset,
4150 				uint32_t data_len, const uint8_t *buf,
4151 				int oob_required, int page)
4152 {
4153 	uint8_t *oob_buf  = chip->oob_poi;
4154 	uint8_t *ecc_calc = chip->ecc.calc_buf;
4155 	int ecc_size      = chip->ecc.size;
4156 	int ecc_bytes     = chip->ecc.bytes;
4157 	int ecc_steps     = chip->ecc.steps;
4158 	uint32_t start_step = offset / ecc_size;
4159 	uint32_t end_step   = (offset + data_len - 1) / ecc_size;
4160 	int oob_bytes       = mtd->oobsize / ecc_steps;
4161 	int step, ret;
4162 
4163 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4164 	if (ret)
4165 		return ret;
4166 
4167 	for (step = 0; step < ecc_steps; step++) {
4168 		/* configure controller for WRITE access */
4169 		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
4170 
4171 		/* write data (untouched subpages already masked by 0xFF) */
4172 		ret = nand_write_data_op(chip, buf, ecc_size, false);
4173 		if (ret)
4174 			return ret;
4175 
4176 		/* mask ECC of un-touched subpages by padding 0xFF */
4177 		if ((step < start_step) || (step > end_step))
4178 			memset(ecc_calc, 0xff, ecc_bytes);
4179 		else
4180 			chip->ecc.calculate(mtd, buf, ecc_calc);
4181 
4182 		/* mask OOB of un-touched subpages by padding 0xFF */
4183 		/* if oob_required, preserve OOB metadata of written subpage */
4184 		if (!oob_required || (step < start_step) || (step > end_step))
4185 			memset(oob_buf, 0xff, oob_bytes);
4186 
4187 		buf += ecc_size;
4188 		ecc_calc += ecc_bytes;
4189 		oob_buf  += oob_bytes;
4190 	}
4191 
4192 	/* copy calculated ECC for whole page to chip->buffer->oob */
4193 	/* this include masked-value(0xFF) for unwritten subpages */
4194 	ecc_calc = chip->ecc.calc_buf;
4195 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4196 					 chip->ecc.total);
4197 	if (ret)
4198 		return ret;
4199 
4200 	/* write OOB buffer to NAND device */
4201 	ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4202 	if (ret)
4203 		return ret;
4204 
4205 	return nand_prog_page_end_op(chip);
4206 }
4207 
4208 
4209 /**
4210  * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
4211  * @mtd: mtd info structure
4212  * @chip: nand chip info structure
4213  * @buf: data buffer
4214  * @oob_required: must write chip->oob_poi to OOB
4215  * @page: page number to write
4216  *
4217  * The hw generator calculates the error syndrome automatically. Therefore we
4218  * need a special oob layout and handling.
4219  */
4220 static int nand_write_page_syndrome(struct mtd_info *mtd,
4221 				    struct nand_chip *chip,
4222 				    const uint8_t *buf, int oob_required,
4223 				    int page)
4224 {
4225 	int i, eccsize = chip->ecc.size;
4226 	int eccbytes = chip->ecc.bytes;
4227 	int eccsteps = chip->ecc.steps;
4228 	const uint8_t *p = buf;
4229 	uint8_t *oob = chip->oob_poi;
4230 	int ret;
4231 
4232 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4233 	if (ret)
4234 		return ret;
4235 
4236 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4237 		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
4238 
4239 		ret = nand_write_data_op(chip, p, eccsize, false);
4240 		if (ret)
4241 			return ret;
4242 
4243 		if (chip->ecc.prepad) {
4244 			ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
4245 						 false);
4246 			if (ret)
4247 				return ret;
4248 
4249 			oob += chip->ecc.prepad;
4250 		}
4251 
4252 		chip->ecc.calculate(mtd, p, oob);
4253 
4254 		ret = nand_write_data_op(chip, oob, eccbytes, false);
4255 		if (ret)
4256 			return ret;
4257 
4258 		oob += eccbytes;
4259 
4260 		if (chip->ecc.postpad) {
4261 			ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
4262 						 false);
4263 			if (ret)
4264 				return ret;
4265 
4266 			oob += chip->ecc.postpad;
4267 		}
4268 	}
4269 
4270 	/* Calculate remaining oob bytes */
4271 	i = mtd->oobsize - (oob - chip->oob_poi);
4272 	if (i) {
4273 		ret = nand_write_data_op(chip, oob, i, false);
4274 		if (ret)
4275 			return ret;
4276 	}
4277 
4278 	return nand_prog_page_end_op(chip);
4279 }
4280 
4281 /**
4282  * nand_write_page - write one page
4283  * @mtd: MTD device structure
4284  * @chip: NAND chip descriptor
4285  * @offset: address offset within the page
4286  * @data_len: length of actual data to be written
4287  * @buf: the data to write
4288  * @oob_required: must write chip->oob_poi to OOB
4289  * @page: page number to write
4290  * @raw: use _raw version of write_page
4291  */
4292 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
4293 		uint32_t offset, int data_len, const uint8_t *buf,
4294 		int oob_required, int page, int raw)
4295 {
4296 	int status, subpage;
4297 
4298 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
4299 		chip->ecc.write_subpage)
4300 		subpage = offset || (data_len < mtd->writesize);
4301 	else
4302 		subpage = 0;
4303 
4304 	if (unlikely(raw))
4305 		status = chip->ecc.write_page_raw(mtd, chip, buf,
4306 						  oob_required, page);
4307 	else if (subpage)
4308 		status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
4309 						 buf, oob_required, page);
4310 	else
4311 		status = chip->ecc.write_page(mtd, chip, buf, oob_required,
4312 					      page);
4313 
4314 	if (status < 0)
4315 		return status;
4316 
4317 	return 0;
4318 }
4319 
4320 /**
4321  * nand_fill_oob - [INTERN] Transfer client buffer to oob
4322  * @mtd: MTD device structure
4323  * @oob: oob data buffer
4324  * @len: oob data write length
4325  * @ops: oob ops structure
4326  */
4327 static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
4328 			      struct mtd_oob_ops *ops)
4329 {
4330 	struct nand_chip *chip = mtd_to_nand(mtd);
4331 	int ret;
4332 
4333 	/*
4334 	 * Initialise to all 0xFF, to avoid the possibility of left over OOB
4335 	 * data from a previous OOB read.
4336 	 */
4337 	memset(chip->oob_poi, 0xff, mtd->oobsize);
4338 
4339 	switch (ops->mode) {
4340 
4341 	case MTD_OPS_PLACE_OOB:
4342 	case MTD_OPS_RAW:
4343 		memcpy(chip->oob_poi + ops->ooboffs, oob, len);
4344 		return oob + len;
4345 
4346 	case MTD_OPS_AUTO_OOB:
4347 		ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
4348 						  ops->ooboffs, len);
4349 		BUG_ON(ret);
4350 		return oob + len;
4351 
4352 	default:
4353 		BUG();
4354 	}
4355 	return NULL;
4356 }
4357 
4358 #define NOTALIGNED(x)	((x & (chip->subpagesize - 1)) != 0)
4359 
4360 /**
4361  * nand_do_write_ops - [INTERN] NAND write with ECC
4362  * @mtd: MTD device structure
4363  * @to: offset to write to
4364  * @ops: oob operations description structure
4365  *
4366  * NAND write with ECC.
4367  */
4368 static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
4369 			     struct mtd_oob_ops *ops)
4370 {
4371 	int chipnr, realpage, page, column;
4372 	struct nand_chip *chip = mtd_to_nand(mtd);
4373 	uint32_t writelen = ops->len;
4374 
4375 	uint32_t oobwritelen = ops->ooblen;
4376 	uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
4377 
4378 	uint8_t *oob = ops->oobbuf;
4379 	uint8_t *buf = ops->datbuf;
4380 	int ret;
4381 	int oob_required = oob ? 1 : 0;
4382 
4383 	ops->retlen = 0;
4384 	if (!writelen)
4385 		return 0;
4386 
4387 	/* Reject writes, which are not page aligned */
4388 	if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
4389 		pr_notice("%s: attempt to write non page aligned data\n",
4390 			   __func__);
4391 		return -EINVAL;
4392 	}
4393 
4394 	column = to & (mtd->writesize - 1);
4395 
4396 	chipnr = (int)(to >> chip->chip_shift);
4397 	chip->select_chip(mtd, chipnr);
4398 
4399 	/* Check, if it is write protected */
4400 	if (nand_check_wp(mtd)) {
4401 		ret = -EIO;
4402 		goto err_out;
4403 	}
4404 
4405 	realpage = (int)(to >> chip->page_shift);
4406 	page = realpage & chip->pagemask;
4407 
4408 	/* Invalidate the page cache, when we write to the cached page */
4409 	if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
4410 	    ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
4411 		chip->pagebuf = -1;
4412 
4413 	/* Don't allow multipage oob writes with offset */
4414 	if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
4415 		ret = -EINVAL;
4416 		goto err_out;
4417 	}
4418 
4419 	while (1) {
4420 		int bytes = mtd->writesize;
4421 		uint8_t *wbuf = buf;
4422 		int use_bufpoi;
4423 		int part_pagewr = (column || writelen < mtd->writesize);
4424 
4425 		if (part_pagewr)
4426 			use_bufpoi = 1;
4427 		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
4428 			use_bufpoi = !virt_addr_valid(buf) ||
4429 				     !IS_ALIGNED((unsigned long)buf,
4430 						 chip->buf_align);
4431 		else
4432 			use_bufpoi = 0;
4433 
4434 		/* Partial page write?, or need to use bounce buffer */
4435 		if (use_bufpoi) {
4436 			pr_debug("%s: using write bounce buffer for buf@%p\n",
4437 					 __func__, buf);
4438 			if (part_pagewr)
4439 				bytes = min_t(int, bytes - column, writelen);
4440 			chip->pagebuf = -1;
4441 			memset(chip->data_buf, 0xff, mtd->writesize);
4442 			memcpy(&chip->data_buf[column], buf, bytes);
4443 			wbuf = chip->data_buf;
4444 		}
4445 
4446 		if (unlikely(oob)) {
4447 			size_t len = min(oobwritelen, oobmaxlen);
4448 			oob = nand_fill_oob(mtd, oob, len, ops);
4449 			oobwritelen -= len;
4450 		} else {
4451 			/* We still need to erase leftover OOB data */
4452 			memset(chip->oob_poi, 0xff, mtd->oobsize);
4453 		}
4454 
4455 		ret = nand_write_page(mtd, chip, column, bytes, wbuf,
4456 				      oob_required, page,
4457 				      (ops->mode == MTD_OPS_RAW));
4458 		if (ret)
4459 			break;
4460 
4461 		writelen -= bytes;
4462 		if (!writelen)
4463 			break;
4464 
4465 		column = 0;
4466 		buf += bytes;
4467 		realpage++;
4468 
4469 		page = realpage & chip->pagemask;
4470 		/* Check, if we cross a chip boundary */
4471 		if (!page) {
4472 			chipnr++;
4473 			chip->select_chip(mtd, -1);
4474 			chip->select_chip(mtd, chipnr);
4475 		}
4476 	}
4477 
4478 	ops->retlen = ops->len - writelen;
4479 	if (unlikely(oob))
4480 		ops->oobretlen = ops->ooblen;
4481 
4482 err_out:
4483 	chip->select_chip(mtd, -1);
4484 	return ret;
4485 }
4486 
4487 /**
4488  * panic_nand_write - [MTD Interface] NAND write with ECC
4489  * @mtd: MTD device structure
4490  * @to: offset to write to
4491  * @len: number of bytes to write
4492  * @retlen: pointer to variable to store the number of written bytes
4493  * @buf: the data to write
4494  *
4495  * NAND write with ECC. Used when performing writes in interrupt context, this
4496  * may for example be called by mtdoops when writing an oops while in panic.
4497  */
4498 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4499 			    size_t *retlen, const uint8_t *buf)
4500 {
4501 	struct nand_chip *chip = mtd_to_nand(mtd);
4502 	int chipnr = (int)(to >> chip->chip_shift);
4503 	struct mtd_oob_ops ops;
4504 	int ret;
4505 
4506 	/* Grab the device */
4507 	panic_nand_get_device(chip, mtd, FL_WRITING);
4508 
4509 	chip->select_chip(mtd, chipnr);
4510 
4511 	/* Wait for the device to get ready */
4512 	panic_nand_wait(mtd, chip, 400);
4513 
4514 	memset(&ops, 0, sizeof(ops));
4515 	ops.len = len;
4516 	ops.datbuf = (uint8_t *)buf;
4517 	ops.mode = MTD_OPS_PLACE_OOB;
4518 
4519 	ret = nand_do_write_ops(mtd, to, &ops);
4520 
4521 	*retlen = ops.retlen;
4522 	return ret;
4523 }
4524 
4525 /**
4526  * nand_do_write_oob - [MTD Interface] NAND write out-of-band
4527  * @mtd: MTD device structure
4528  * @to: offset to write to
4529  * @ops: oob operation description structure
4530  *
4531  * NAND write out-of-band.
4532  */
4533 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
4534 			     struct mtd_oob_ops *ops)
4535 {
4536 	int chipnr, page, status, len;
4537 	struct nand_chip *chip = mtd_to_nand(mtd);
4538 
4539 	pr_debug("%s: to = 0x%08x, len = %i\n",
4540 			 __func__, (unsigned int)to, (int)ops->ooblen);
4541 
4542 	len = mtd_oobavail(mtd, ops);
4543 
4544 	/* Do not allow write past end of page */
4545 	if ((ops->ooboffs + ops->ooblen) > len) {
4546 		pr_debug("%s: attempt to write past end of page\n",
4547 				__func__);
4548 		return -EINVAL;
4549 	}
4550 
4551 	chipnr = (int)(to >> chip->chip_shift);
4552 
4553 	/*
4554 	 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
4555 	 * of my DiskOnChip 2000 test units) will clear the whole data page too
4556 	 * if we don't do this. I have no clue why, but I seem to have 'fixed'
4557 	 * it in the doc2000 driver in August 1999.  dwmw2.
4558 	 */
4559 	nand_reset(chip, chipnr);
4560 
4561 	chip->select_chip(mtd, chipnr);
4562 
4563 	/* Shift to get page */
4564 	page = (int)(to >> chip->page_shift);
4565 
4566 	/* Check, if it is write protected */
4567 	if (nand_check_wp(mtd)) {
4568 		chip->select_chip(mtd, -1);
4569 		return -EROFS;
4570 	}
4571 
4572 	/* Invalidate the page cache, if we write to the cached page */
4573 	if (page == chip->pagebuf)
4574 		chip->pagebuf = -1;
4575 
4576 	nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
4577 
4578 	if (ops->mode == MTD_OPS_RAW)
4579 		status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
4580 	else
4581 		status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
4582 
4583 	chip->select_chip(mtd, -1);
4584 
4585 	if (status)
4586 		return status;
4587 
4588 	ops->oobretlen = ops->ooblen;
4589 
4590 	return 0;
4591 }
4592 
4593 /**
4594  * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4595  * @mtd: MTD device structure
4596  * @to: offset to write to
4597  * @ops: oob operation description structure
4598  */
4599 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4600 			  struct mtd_oob_ops *ops)
4601 {
4602 	int ret = -ENOTSUPP;
4603 
4604 	ops->retlen = 0;
4605 
4606 	nand_get_device(mtd, FL_WRITING);
4607 
4608 	switch (ops->mode) {
4609 	case MTD_OPS_PLACE_OOB:
4610 	case MTD_OPS_AUTO_OOB:
4611 	case MTD_OPS_RAW:
4612 		break;
4613 
4614 	default:
4615 		goto out;
4616 	}
4617 
4618 	if (!ops->datbuf)
4619 		ret = nand_do_write_oob(mtd, to, ops);
4620 	else
4621 		ret = nand_do_write_ops(mtd, to, ops);
4622 
4623 out:
4624 	nand_release_device(mtd);
4625 	return ret;
4626 }
4627 
4628 /**
4629  * single_erase - [GENERIC] NAND standard block erase command function
4630  * @mtd: MTD device structure
4631  * @page: the page address of the block which will be erased
4632  *
4633  * Standard erase command for NAND chips. Returns NAND status.
4634  */
4635 static int single_erase(struct mtd_info *mtd, int page)
4636 {
4637 	struct nand_chip *chip = mtd_to_nand(mtd);
4638 	unsigned int eraseblock;
4639 
4640 	/* Send commands to erase a block */
4641 	eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
4642 
4643 	return nand_erase_op(chip, eraseblock);
4644 }
4645 
4646 /**
4647  * nand_erase - [MTD Interface] erase block(s)
4648  * @mtd: MTD device structure
4649  * @instr: erase instruction
4650  *
4651  * Erase one ore more blocks.
4652  */
4653 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4654 {
4655 	return nand_erase_nand(mtd, instr, 0);
4656 }
4657 
4658 /**
4659  * nand_erase_nand - [INTERN] erase block(s)
4660  * @mtd: MTD device structure
4661  * @instr: erase instruction
4662  * @allowbbt: allow erasing the bbt area
4663  *
4664  * Erase one ore more blocks.
4665  */
4666 int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
4667 		    int allowbbt)
4668 {
4669 	int page, status, pages_per_block, ret, chipnr;
4670 	struct nand_chip *chip = mtd_to_nand(mtd);
4671 	loff_t len;
4672 
4673 	pr_debug("%s: start = 0x%012llx, len = %llu\n",
4674 			__func__, (unsigned long long)instr->addr,
4675 			(unsigned long long)instr->len);
4676 
4677 	if (check_offs_len(mtd, instr->addr, instr->len))
4678 		return -EINVAL;
4679 
4680 	/* Grab the lock and see if the device is available */
4681 	nand_get_device(mtd, FL_ERASING);
4682 
4683 	/* Shift to get first page */
4684 	page = (int)(instr->addr >> chip->page_shift);
4685 	chipnr = (int)(instr->addr >> chip->chip_shift);
4686 
4687 	/* Calculate pages in each block */
4688 	pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4689 
4690 	/* Select the NAND device */
4691 	chip->select_chip(mtd, chipnr);
4692 
4693 	/* Check, if it is write protected */
4694 	if (nand_check_wp(mtd)) {
4695 		pr_debug("%s: device is write protected!\n",
4696 				__func__);
4697 		ret = -EIO;
4698 		goto erase_exit;
4699 	}
4700 
4701 	/* Loop through the pages */
4702 	len = instr->len;
4703 
4704 	while (len) {
4705 		/* Check if we have a bad block, we do not erase bad blocks! */
4706 		if (nand_block_checkbad(mtd, ((loff_t) page) <<
4707 					chip->page_shift, allowbbt)) {
4708 			pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4709 				    __func__, page);
4710 			ret = -EIO;
4711 			goto erase_exit;
4712 		}
4713 
4714 		/*
4715 		 * Invalidate the page cache, if we erase the block which
4716 		 * contains the current cached page.
4717 		 */
4718 		if (page <= chip->pagebuf && chip->pagebuf <
4719 		    (page + pages_per_block))
4720 			chip->pagebuf = -1;
4721 
4722 		status = chip->erase(mtd, page & chip->pagemask);
4723 
4724 		/* See if block erase succeeded */
4725 		if (status) {
4726 			pr_debug("%s: failed erase, page 0x%08x\n",
4727 					__func__, page);
4728 			ret = -EIO;
4729 			instr->fail_addr =
4730 				((loff_t)page << chip->page_shift);
4731 			goto erase_exit;
4732 		}
4733 
4734 		/* Increment page address and decrement length */
4735 		len -= (1ULL << chip->phys_erase_shift);
4736 		page += pages_per_block;
4737 
4738 		/* Check, if we cross a chip boundary */
4739 		if (len && !(page & chip->pagemask)) {
4740 			chipnr++;
4741 			chip->select_chip(mtd, -1);
4742 			chip->select_chip(mtd, chipnr);
4743 		}
4744 	}
4745 
4746 	ret = 0;
4747 erase_exit:
4748 
4749 	/* Deselect and wake up anyone waiting on the device */
4750 	chip->select_chip(mtd, -1);
4751 	nand_release_device(mtd);
4752 
4753 	/* Return more or less happy */
4754 	return ret;
4755 }
4756 
4757 /**
4758  * nand_sync - [MTD Interface] sync
4759  * @mtd: MTD device structure
4760  *
4761  * Sync is actually a wait for chip ready function.
4762  */
4763 static void nand_sync(struct mtd_info *mtd)
4764 {
4765 	pr_debug("%s: called\n", __func__);
4766 
4767 	/* Grab the lock and see if the device is available */
4768 	nand_get_device(mtd, FL_SYNCING);
4769 	/* Release it and go back */
4770 	nand_release_device(mtd);
4771 }
4772 
4773 /**
4774  * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4775  * @mtd: MTD device structure
4776  * @offs: offset relative to mtd start
4777  */
4778 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4779 {
4780 	struct nand_chip *chip = mtd_to_nand(mtd);
4781 	int chipnr = (int)(offs >> chip->chip_shift);
4782 	int ret;
4783 
4784 	/* Select the NAND device */
4785 	nand_get_device(mtd, FL_READING);
4786 	chip->select_chip(mtd, chipnr);
4787 
4788 	ret = nand_block_checkbad(mtd, offs, 0);
4789 
4790 	chip->select_chip(mtd, -1);
4791 	nand_release_device(mtd);
4792 
4793 	return ret;
4794 }
4795 
4796 /**
4797  * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4798  * @mtd: MTD device structure
4799  * @ofs: offset relative to mtd start
4800  */
4801 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4802 {
4803 	int ret;
4804 
4805 	ret = nand_block_isbad(mtd, ofs);
4806 	if (ret) {
4807 		/* If it was bad already, return success and do nothing */
4808 		if (ret > 0)
4809 			return 0;
4810 		return ret;
4811 	}
4812 
4813 	return nand_block_markbad_lowlevel(mtd, ofs);
4814 }
4815 
4816 /**
4817  * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
4818  * @mtd: MTD device structure
4819  * @ofs: offset relative to mtd start
4820  * @len: length of mtd
4821  */
4822 static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
4823 {
4824 	struct nand_chip *chip = mtd_to_nand(mtd);
4825 	u32 part_start_block;
4826 	u32 part_end_block;
4827 	u32 part_start_die;
4828 	u32 part_end_die;
4829 
4830 	/*
4831 	 * max_bb_per_die and blocks_per_die used to determine
4832 	 * the maximum bad block count.
4833 	 */
4834 	if (!chip->max_bb_per_die || !chip->blocks_per_die)
4835 		return -ENOTSUPP;
4836 
4837 	/* Get the start and end of the partition in erase blocks. */
4838 	part_start_block = mtd_div_by_eb(ofs, mtd);
4839 	part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
4840 
4841 	/* Get the start and end LUNs of the partition. */
4842 	part_start_die = part_start_block / chip->blocks_per_die;
4843 	part_end_die = part_end_block / chip->blocks_per_die;
4844 
4845 	/*
4846 	 * Look up the bad blocks per unit and multiply by the number of units
4847 	 * that the partition spans.
4848 	 */
4849 	return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
4850 }
4851 
4852 /**
4853  * nand_default_set_features- [REPLACEABLE] set NAND chip features
4854  * @mtd: MTD device structure
4855  * @chip: nand chip info structure
4856  * @addr: feature address.
4857  * @subfeature_param: the subfeature parameters, a four bytes array.
4858  */
4859 static int nand_default_set_features(struct mtd_info *mtd,
4860 				     struct nand_chip *chip, int addr,
4861 				     uint8_t *subfeature_param)
4862 {
4863 	return nand_set_features_op(chip, addr, subfeature_param);
4864 }
4865 
4866 /**
4867  * nand_default_get_features- [REPLACEABLE] get NAND chip features
4868  * @mtd: MTD device structure
4869  * @chip: nand chip info structure
4870  * @addr: feature address.
4871  * @subfeature_param: the subfeature parameters, a four bytes array.
4872  */
4873 static int nand_default_get_features(struct mtd_info *mtd,
4874 				     struct nand_chip *chip, int addr,
4875 				     uint8_t *subfeature_param)
4876 {
4877 	return nand_get_features_op(chip, addr, subfeature_param);
4878 }
4879 
4880 /**
4881  * nand_get_set_features_notsupp - set/get features stub returning -ENOTSUPP
4882  * @mtd: MTD device structure
4883  * @chip: nand chip info structure
4884  * @addr: feature address.
4885  * @subfeature_param: the subfeature parameters, a four bytes array.
4886  *
4887  * Should be used by NAND controller drivers that do not support the SET/GET
4888  * FEATURES operations.
4889  */
4890 int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
4891 				  int addr, u8 *subfeature_param)
4892 {
4893 	return -ENOTSUPP;
4894 }
4895 EXPORT_SYMBOL(nand_get_set_features_notsupp);
4896 
4897 /**
4898  * nand_suspend - [MTD Interface] Suspend the NAND flash
4899  * @mtd: MTD device structure
4900  */
4901 static int nand_suspend(struct mtd_info *mtd)
4902 {
4903 	return nand_get_device(mtd, FL_PM_SUSPENDED);
4904 }
4905 
4906 /**
4907  * nand_resume - [MTD Interface] Resume the NAND flash
4908  * @mtd: MTD device structure
4909  */
4910 static void nand_resume(struct mtd_info *mtd)
4911 {
4912 	struct nand_chip *chip = mtd_to_nand(mtd);
4913 
4914 	if (chip->state == FL_PM_SUSPENDED)
4915 		nand_release_device(mtd);
4916 	else
4917 		pr_err("%s called for a chip which is not in suspended state\n",
4918 			__func__);
4919 }
4920 
4921 /**
4922  * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4923  *                 prevent further operations
4924  * @mtd: MTD device structure
4925  */
4926 static void nand_shutdown(struct mtd_info *mtd)
4927 {
4928 	nand_get_device(mtd, FL_PM_SUSPENDED);
4929 }
4930 
4931 /* Set default functions */
4932 static void nand_set_defaults(struct nand_chip *chip)
4933 {
4934 	unsigned int busw = chip->options & NAND_BUSWIDTH_16;
4935 
4936 	/* check for proper chip_delay setup, set 20us if not */
4937 	if (!chip->chip_delay)
4938 		chip->chip_delay = 20;
4939 
4940 	/* check, if a user supplied command function given */
4941 	if (!chip->cmdfunc && !chip->exec_op)
4942 		chip->cmdfunc = nand_command;
4943 
4944 	/* check, if a user supplied wait function given */
4945 	if (chip->waitfunc == NULL)
4946 		chip->waitfunc = nand_wait;
4947 
4948 	if (!chip->select_chip)
4949 		chip->select_chip = nand_select_chip;
4950 
4951 	/* set for ONFI nand */
4952 	if (!chip->set_features)
4953 		chip->set_features = nand_default_set_features;
4954 	if (!chip->get_features)
4955 		chip->get_features = nand_default_get_features;
4956 
4957 	/* If called twice, pointers that depend on busw may need to be reset */
4958 	if (!chip->read_byte || chip->read_byte == nand_read_byte)
4959 		chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
4960 	if (!chip->read_word)
4961 		chip->read_word = nand_read_word;
4962 	if (!chip->block_bad)
4963 		chip->block_bad = nand_block_bad;
4964 	if (!chip->block_markbad)
4965 		chip->block_markbad = nand_default_block_markbad;
4966 	if (!chip->write_buf || chip->write_buf == nand_write_buf)
4967 		chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
4968 	if (!chip->write_byte || chip->write_byte == nand_write_byte)
4969 		chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
4970 	if (!chip->read_buf || chip->read_buf == nand_read_buf)
4971 		chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
4972 	if (!chip->scan_bbt)
4973 		chip->scan_bbt = nand_default_bbt;
4974 
4975 	if (!chip->controller) {
4976 		chip->controller = &chip->hwcontrol;
4977 		nand_hw_control_init(chip->controller);
4978 	}
4979 
4980 	if (!chip->buf_align)
4981 		chip->buf_align = 1;
4982 }
4983 
4984 /* Sanitize ONFI strings so we can safely print them */
4985 static void sanitize_string(uint8_t *s, size_t len)
4986 {
4987 	ssize_t i;
4988 
4989 	/* Null terminate */
4990 	s[len - 1] = 0;
4991 
4992 	/* Remove non printable chars */
4993 	for (i = 0; i < len - 1; i++) {
4994 		if (s[i] < ' ' || s[i] > 127)
4995 			s[i] = '?';
4996 	}
4997 
4998 	/* Remove trailing spaces */
4999 	strim(s);
5000 }
5001 
5002 static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
5003 {
5004 	int i;
5005 	while (len--) {
5006 		crc ^= *p++ << 8;
5007 		for (i = 0; i < 8; i++)
5008 			crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
5009 	}
5010 
5011 	return crc;
5012 }
5013 
5014 /* Parse the Extended Parameter Page. */
5015 static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
5016 					    struct nand_onfi_params *p)
5017 {
5018 	struct onfi_ext_param_page *ep;
5019 	struct onfi_ext_section *s;
5020 	struct onfi_ext_ecc_info *ecc;
5021 	uint8_t *cursor;
5022 	int ret;
5023 	int len;
5024 	int i;
5025 
5026 	len = le16_to_cpu(p->ext_param_page_length) * 16;
5027 	ep = kmalloc(len, GFP_KERNEL);
5028 	if (!ep)
5029 		return -ENOMEM;
5030 
5031 	/* Send our own NAND_CMD_PARAM. */
5032 	ret = nand_read_param_page_op(chip, 0, NULL, 0);
5033 	if (ret)
5034 		goto ext_out;
5035 
5036 	/* Use the Change Read Column command to skip the ONFI param pages. */
5037 	ret = nand_change_read_column_op(chip,
5038 					 sizeof(*p) * p->num_of_param_pages,
5039 					 ep, len, true);
5040 	if (ret)
5041 		goto ext_out;
5042 
5043 	ret = -EINVAL;
5044 	if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
5045 		!= le16_to_cpu(ep->crc))) {
5046 		pr_debug("fail in the CRC.\n");
5047 		goto ext_out;
5048 	}
5049 
5050 	/*
5051 	 * Check the signature.
5052 	 * Do not strictly follow the ONFI spec, maybe changed in future.
5053 	 */
5054 	if (strncmp(ep->sig, "EPPS", 4)) {
5055 		pr_debug("The signature is invalid.\n");
5056 		goto ext_out;
5057 	}
5058 
5059 	/* find the ECC section. */
5060 	cursor = (uint8_t *)(ep + 1);
5061 	for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
5062 		s = ep->sections + i;
5063 		if (s->type == ONFI_SECTION_TYPE_2)
5064 			break;
5065 		cursor += s->length * 16;
5066 	}
5067 	if (i == ONFI_EXT_SECTION_MAX) {
5068 		pr_debug("We can not find the ECC section.\n");
5069 		goto ext_out;
5070 	}
5071 
5072 	/* get the info we want. */
5073 	ecc = (struct onfi_ext_ecc_info *)cursor;
5074 
5075 	if (!ecc->codeword_size) {
5076 		pr_debug("Invalid codeword size\n");
5077 		goto ext_out;
5078 	}
5079 
5080 	chip->ecc_strength_ds = ecc->ecc_bits;
5081 	chip->ecc_step_ds = 1 << ecc->codeword_size;
5082 	ret = 0;
5083 
5084 ext_out:
5085 	kfree(ep);
5086 	return ret;
5087 }
5088 
5089 /*
5090  * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
5091  */
5092 static int nand_flash_detect_onfi(struct nand_chip *chip)
5093 {
5094 	struct mtd_info *mtd = nand_to_mtd(chip);
5095 	struct nand_onfi_params *p;
5096 	char id[4];
5097 	int i, ret, val;
5098 
5099 	/* Try ONFI for unknown chip or LP */
5100 	ret = nand_readid_op(chip, 0x20, id, sizeof(id));
5101 	if (ret || strncmp(id, "ONFI", 4))
5102 		return 0;
5103 
5104 	/* ONFI chip: allocate a buffer to hold its parameter page */
5105 	p = kzalloc(sizeof(*p), GFP_KERNEL);
5106 	if (!p)
5107 		return -ENOMEM;
5108 
5109 	ret = nand_read_param_page_op(chip, 0, NULL, 0);
5110 	if (ret) {
5111 		ret = 0;
5112 		goto free_onfi_param_page;
5113 	}
5114 
5115 	for (i = 0; i < 3; i++) {
5116 		ret = nand_read_data_op(chip, p, sizeof(*p), true);
5117 		if (ret) {
5118 			ret = 0;
5119 			goto free_onfi_param_page;
5120 		}
5121 
5122 		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
5123 				le16_to_cpu(p->crc)) {
5124 			break;
5125 		}
5126 	}
5127 
5128 	if (i == 3) {
5129 		pr_err("Could not find valid ONFI parameter page; aborting\n");
5130 		goto free_onfi_param_page;
5131 	}
5132 
5133 	/* Check version */
5134 	val = le16_to_cpu(p->revision);
5135 	if (val & (1 << 5))
5136 		chip->parameters.onfi.version = 23;
5137 	else if (val & (1 << 4))
5138 		chip->parameters.onfi.version = 22;
5139 	else if (val & (1 << 3))
5140 		chip->parameters.onfi.version = 21;
5141 	else if (val & (1 << 2))
5142 		chip->parameters.onfi.version = 20;
5143 	else if (val & (1 << 1))
5144 		chip->parameters.onfi.version = 10;
5145 
5146 	if (!chip->parameters.onfi.version) {
5147 		pr_info("unsupported ONFI version: %d\n", val);
5148 		goto free_onfi_param_page;
5149 	} else {
5150 		ret = 1;
5151 	}
5152 
5153 	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
5154 	sanitize_string(p->model, sizeof(p->model));
5155 	strncpy(chip->parameters.model, p->model,
5156 		sizeof(chip->parameters.model) - 1);
5157 
5158 	mtd->writesize = le32_to_cpu(p->byte_per_page);
5159 
5160 	/*
5161 	 * pages_per_block and blocks_per_lun may not be a power-of-2 size
5162 	 * (don't ask me who thought of this...). MTD assumes that these
5163 	 * dimensions will be power-of-2, so just truncate the remaining area.
5164 	 */
5165 	mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
5166 	mtd->erasesize *= mtd->writesize;
5167 
5168 	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
5169 
5170 	/* See erasesize comment */
5171 	chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
5172 	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
5173 	chip->bits_per_cell = p->bits_per_cell;
5174 
5175 	chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
5176 	chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
5177 
5178 	if (le16_to_cpu(p->features) & ONFI_FEATURE_16_BIT_BUS)
5179 		chip->options |= NAND_BUSWIDTH_16;
5180 
5181 	if (p->ecc_bits != 0xff) {
5182 		chip->ecc_strength_ds = p->ecc_bits;
5183 		chip->ecc_step_ds = 512;
5184 	} else if (chip->parameters.onfi.version >= 21 &&
5185 		(le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
5186 
5187 		/*
5188 		 * The nand_flash_detect_ext_param_page() uses the
5189 		 * Change Read Column command which maybe not supported
5190 		 * by the chip->cmdfunc. So try to update the chip->cmdfunc
5191 		 * now. We do not replace user supplied command function.
5192 		 */
5193 		if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
5194 			chip->cmdfunc = nand_command_lp;
5195 
5196 		/* The Extended Parameter Page is supported since ONFI 2.1. */
5197 		if (nand_flash_detect_ext_param_page(chip, p))
5198 			pr_warn("Failed to detect ONFI extended param page\n");
5199 	} else {
5200 		pr_warn("Could not retrieve ONFI ECC requirements\n");
5201 	}
5202 
5203 	/* Save some parameters from the parameter page for future use */
5204 	if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_SET_GET_FEATURES) {
5205 		chip->parameters.supports_set_get_features = true;
5206 		bitmap_set(chip->parameters.get_feature_list,
5207 			   ONFI_FEATURE_ADDR_TIMING_MODE, 1);
5208 		bitmap_set(chip->parameters.set_feature_list,
5209 			   ONFI_FEATURE_ADDR_TIMING_MODE, 1);
5210 	}
5211 	chip->parameters.onfi.tPROG = le16_to_cpu(p->t_prog);
5212 	chip->parameters.onfi.tBERS = le16_to_cpu(p->t_bers);
5213 	chip->parameters.onfi.tR = le16_to_cpu(p->t_r);
5214 	chip->parameters.onfi.tCCS = le16_to_cpu(p->t_ccs);
5215 	chip->parameters.onfi.async_timing_mode =
5216 		le16_to_cpu(p->async_timing_mode);
5217 	chip->parameters.onfi.vendor_revision =
5218 		le16_to_cpu(p->vendor_revision);
5219 	memcpy(chip->parameters.onfi.vendor, p->vendor,
5220 	       sizeof(p->vendor));
5221 
5222 free_onfi_param_page:
5223 	kfree(p);
5224 	return ret;
5225 }
5226 
5227 /*
5228  * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
5229  */
5230 static int nand_flash_detect_jedec(struct nand_chip *chip)
5231 {
5232 	struct mtd_info *mtd = nand_to_mtd(chip);
5233 	struct nand_jedec_params *p;
5234 	struct jedec_ecc_info *ecc;
5235 	int jedec_version = 0;
5236 	char id[5];
5237 	int i, val, ret;
5238 
5239 	/* Try JEDEC for unknown chip or LP */
5240 	ret = nand_readid_op(chip, 0x40, id, sizeof(id));
5241 	if (ret || strncmp(id, "JEDEC", sizeof(id)))
5242 		return 0;
5243 
5244 	/* JEDEC chip: allocate a buffer to hold its parameter page */
5245 	p = kzalloc(sizeof(*p), GFP_KERNEL);
5246 	if (!p)
5247 		return -ENOMEM;
5248 
5249 	ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
5250 	if (ret) {
5251 		ret = 0;
5252 		goto free_jedec_param_page;
5253 	}
5254 
5255 	for (i = 0; i < 3; i++) {
5256 		ret = nand_read_data_op(chip, p, sizeof(*p), true);
5257 		if (ret) {
5258 			ret = 0;
5259 			goto free_jedec_param_page;
5260 		}
5261 
5262 		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
5263 				le16_to_cpu(p->crc))
5264 			break;
5265 	}
5266 
5267 	if (i == 3) {
5268 		pr_err("Could not find valid JEDEC parameter page; aborting\n");
5269 		goto free_jedec_param_page;
5270 	}
5271 
5272 	/* Check version */
5273 	val = le16_to_cpu(p->revision);
5274 	if (val & (1 << 2))
5275 		jedec_version = 10;
5276 	else if (val & (1 << 1))
5277 		jedec_version = 1; /* vendor specific version */
5278 
5279 	if (!jedec_version) {
5280 		pr_info("unsupported JEDEC version: %d\n", val);
5281 		goto free_jedec_param_page;
5282 	}
5283 
5284 	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
5285 	sanitize_string(p->model, sizeof(p->model));
5286 	strncpy(chip->parameters.model, p->model,
5287 		sizeof(chip->parameters.model) - 1);
5288 
5289 	mtd->writesize = le32_to_cpu(p->byte_per_page);
5290 
5291 	/* Please reference to the comment for nand_flash_detect_onfi. */
5292 	mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
5293 	mtd->erasesize *= mtd->writesize;
5294 
5295 	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
5296 
5297 	/* Please reference to the comment for nand_flash_detect_onfi. */
5298 	chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
5299 	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
5300 	chip->bits_per_cell = p->bits_per_cell;
5301 
5302 	if (le16_to_cpu(p->features) & JEDEC_FEATURE_16_BIT_BUS)
5303 		chip->options |= NAND_BUSWIDTH_16;
5304 
5305 	/* ECC info */
5306 	ecc = &p->ecc_info[0];
5307 
5308 	if (ecc->codeword_size >= 9) {
5309 		chip->ecc_strength_ds = ecc->ecc_bits;
5310 		chip->ecc_step_ds = 1 << ecc->codeword_size;
5311 	} else {
5312 		pr_warn("Invalid codeword size\n");
5313 	}
5314 
5315 free_jedec_param_page:
5316 	kfree(p);
5317 	return ret;
5318 }
5319 
5320 /*
5321  * nand_id_has_period - Check if an ID string has a given wraparound period
5322  * @id_data: the ID string
5323  * @arrlen: the length of the @id_data array
5324  * @period: the period of repitition
5325  *
5326  * Check if an ID string is repeated within a given sequence of bytes at
5327  * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
5328  * period of 3). This is a helper function for nand_id_len(). Returns non-zero
5329  * if the repetition has a period of @period; otherwise, returns zero.
5330  */
5331 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
5332 {
5333 	int i, j;
5334 	for (i = 0; i < period; i++)
5335 		for (j = i + period; j < arrlen; j += period)
5336 			if (id_data[i] != id_data[j])
5337 				return 0;
5338 	return 1;
5339 }
5340 
5341 /*
5342  * nand_id_len - Get the length of an ID string returned by CMD_READID
5343  * @id_data: the ID string
5344  * @arrlen: the length of the @id_data array
5345 
5346  * Returns the length of the ID string, according to known wraparound/trailing
5347  * zero patterns. If no pattern exists, returns the length of the array.
5348  */
5349 static int nand_id_len(u8 *id_data, int arrlen)
5350 {
5351 	int last_nonzero, period;
5352 
5353 	/* Find last non-zero byte */
5354 	for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
5355 		if (id_data[last_nonzero])
5356 			break;
5357 
5358 	/* All zeros */
5359 	if (last_nonzero < 0)
5360 		return 0;
5361 
5362 	/* Calculate wraparound period */
5363 	for (period = 1; period < arrlen; period++)
5364 		if (nand_id_has_period(id_data, arrlen, period))
5365 			break;
5366 
5367 	/* There's a repeated pattern */
5368 	if (period < arrlen)
5369 		return period;
5370 
5371 	/* There are trailing zeros */
5372 	if (last_nonzero < arrlen - 1)
5373 		return last_nonzero + 1;
5374 
5375 	/* No pattern detected */
5376 	return arrlen;
5377 }
5378 
5379 /* Extract the bits of per cell from the 3rd byte of the extended ID */
5380 static int nand_get_bits_per_cell(u8 cellinfo)
5381 {
5382 	int bits;
5383 
5384 	bits = cellinfo & NAND_CI_CELLTYPE_MSK;
5385 	bits >>= NAND_CI_CELLTYPE_SHIFT;
5386 	return bits + 1;
5387 }
5388 
5389 /*
5390  * Many new NAND share similar device ID codes, which represent the size of the
5391  * chip. The rest of the parameters must be decoded according to generic or
5392  * manufacturer-specific "extended ID" decoding patterns.
5393  */
5394 void nand_decode_ext_id(struct nand_chip *chip)
5395 {
5396 	struct mtd_info *mtd = nand_to_mtd(chip);
5397 	int extid;
5398 	u8 *id_data = chip->id.data;
5399 	/* The 3rd id byte holds MLC / multichip data */
5400 	chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
5401 	/* The 4th id byte is the important one */
5402 	extid = id_data[3];
5403 
5404 	/* Calc pagesize */
5405 	mtd->writesize = 1024 << (extid & 0x03);
5406 	extid >>= 2;
5407 	/* Calc oobsize */
5408 	mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
5409 	extid >>= 2;
5410 	/* Calc blocksize. Blocksize is multiples of 64KiB */
5411 	mtd->erasesize = (64 * 1024) << (extid & 0x03);
5412 	extid >>= 2;
5413 	/* Get buswidth information */
5414 	if (extid & 0x1)
5415 		chip->options |= NAND_BUSWIDTH_16;
5416 }
5417 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
5418 
5419 /*
5420  * Old devices have chip data hardcoded in the device ID table. nand_decode_id
5421  * decodes a matching ID table entry and assigns the MTD size parameters for
5422  * the chip.
5423  */
5424 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
5425 {
5426 	struct mtd_info *mtd = nand_to_mtd(chip);
5427 
5428 	mtd->erasesize = type->erasesize;
5429 	mtd->writesize = type->pagesize;
5430 	mtd->oobsize = mtd->writesize / 32;
5431 
5432 	/* All legacy ID NAND are small-page, SLC */
5433 	chip->bits_per_cell = 1;
5434 }
5435 
5436 /*
5437  * Set the bad block marker/indicator (BBM/BBI) patterns according to some
5438  * heuristic patterns using various detected parameters (e.g., manufacturer,
5439  * page size, cell-type information).
5440  */
5441 static void nand_decode_bbm_options(struct nand_chip *chip)
5442 {
5443 	struct mtd_info *mtd = nand_to_mtd(chip);
5444 
5445 	/* Set the bad block position */
5446 	if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
5447 		chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
5448 	else
5449 		chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
5450 }
5451 
5452 static inline bool is_full_id_nand(struct nand_flash_dev *type)
5453 {
5454 	return type->id_len;
5455 }
5456 
5457 static bool find_full_id_nand(struct nand_chip *chip,
5458 			      struct nand_flash_dev *type)
5459 {
5460 	struct mtd_info *mtd = nand_to_mtd(chip);
5461 	u8 *id_data = chip->id.data;
5462 
5463 	if (!strncmp(type->id, id_data, type->id_len)) {
5464 		mtd->writesize = type->pagesize;
5465 		mtd->erasesize = type->erasesize;
5466 		mtd->oobsize = type->oobsize;
5467 
5468 		chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
5469 		chip->chipsize = (uint64_t)type->chipsize << 20;
5470 		chip->options |= type->options;
5471 		chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
5472 		chip->ecc_step_ds = NAND_ECC_STEP(type);
5473 		chip->onfi_timing_mode_default =
5474 					type->onfi_timing_mode_default;
5475 
5476 		strncpy(chip->parameters.model, type->name,
5477 			sizeof(chip->parameters.model) - 1);
5478 
5479 		return true;
5480 	}
5481 	return false;
5482 }
5483 
5484 /*
5485  * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
5486  * compliant and does not have a full-id or legacy-id entry in the nand_ids
5487  * table.
5488  */
5489 static void nand_manufacturer_detect(struct nand_chip *chip)
5490 {
5491 	/*
5492 	 * Try manufacturer detection if available and use
5493 	 * nand_decode_ext_id() otherwise.
5494 	 */
5495 	if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
5496 	    chip->manufacturer.desc->ops->detect) {
5497 		/* The 3rd id byte holds MLC / multichip data */
5498 		chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
5499 		chip->manufacturer.desc->ops->detect(chip);
5500 	} else {
5501 		nand_decode_ext_id(chip);
5502 	}
5503 }
5504 
5505 /*
5506  * Manufacturer initialization. This function is called for all NANDs including
5507  * ONFI and JEDEC compliant ones.
5508  * Manufacturer drivers should put all their specific initialization code in
5509  * their ->init() hook.
5510  */
5511 static int nand_manufacturer_init(struct nand_chip *chip)
5512 {
5513 	if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
5514 	    !chip->manufacturer.desc->ops->init)
5515 		return 0;
5516 
5517 	return chip->manufacturer.desc->ops->init(chip);
5518 }
5519 
5520 /*
5521  * Manufacturer cleanup. This function is called for all NANDs including
5522  * ONFI and JEDEC compliant ones.
5523  * Manufacturer drivers should put all their specific cleanup code in their
5524  * ->cleanup() hook.
5525  */
5526 static void nand_manufacturer_cleanup(struct nand_chip *chip)
5527 {
5528 	/* Release manufacturer private data */
5529 	if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
5530 	    chip->manufacturer.desc->ops->cleanup)
5531 		chip->manufacturer.desc->ops->cleanup(chip);
5532 }
5533 
5534 /*
5535  * Get the flash and manufacturer id and lookup if the type is supported.
5536  */
5537 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
5538 {
5539 	const struct nand_manufacturer *manufacturer;
5540 	struct mtd_info *mtd = nand_to_mtd(chip);
5541 	int busw, ret;
5542 	u8 *id_data = chip->id.data;
5543 	u8 maf_id, dev_id;
5544 
5545 	/*
5546 	 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
5547 	 * after power-up.
5548 	 */
5549 	ret = nand_reset(chip, 0);
5550 	if (ret)
5551 		return ret;
5552 
5553 	/* Select the device */
5554 	chip->select_chip(mtd, 0);
5555 
5556 	/* Send the command for reading device ID */
5557 	ret = nand_readid_op(chip, 0, id_data, 2);
5558 	if (ret)
5559 		return ret;
5560 
5561 	/* Read manufacturer and device IDs */
5562 	maf_id = id_data[0];
5563 	dev_id = id_data[1];
5564 
5565 	/*
5566 	 * Try again to make sure, as some systems the bus-hold or other
5567 	 * interface concerns can cause random data which looks like a
5568 	 * possibly credible NAND flash to appear. If the two results do
5569 	 * not match, ignore the device completely.
5570 	 */
5571 
5572 	/* Read entire ID string */
5573 	ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
5574 	if (ret)
5575 		return ret;
5576 
5577 	if (id_data[0] != maf_id || id_data[1] != dev_id) {
5578 		pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
5579 			maf_id, dev_id, id_data[0], id_data[1]);
5580 		return -ENODEV;
5581 	}
5582 
5583 	chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
5584 
5585 	/* Try to identify manufacturer */
5586 	manufacturer = nand_get_manufacturer(maf_id);
5587 	chip->manufacturer.desc = manufacturer;
5588 
5589 	if (!type)
5590 		type = nand_flash_ids;
5591 
5592 	/*
5593 	 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
5594 	 * override it.
5595 	 * This is required to make sure initial NAND bus width set by the
5596 	 * NAND controller driver is coherent with the real NAND bus width
5597 	 * (extracted by auto-detection code).
5598 	 */
5599 	busw = chip->options & NAND_BUSWIDTH_16;
5600 
5601 	/*
5602 	 * The flag is only set (never cleared), reset it to its default value
5603 	 * before starting auto-detection.
5604 	 */
5605 	chip->options &= ~NAND_BUSWIDTH_16;
5606 
5607 	for (; type->name != NULL; type++) {
5608 		if (is_full_id_nand(type)) {
5609 			if (find_full_id_nand(chip, type))
5610 				goto ident_done;
5611 		} else if (dev_id == type->dev_id) {
5612 			break;
5613 		}
5614 	}
5615 
5616 	chip->parameters.onfi.version = 0;
5617 	if (!type->name || !type->pagesize) {
5618 		/* Check if the chip is ONFI compliant */
5619 		ret = nand_flash_detect_onfi(chip);
5620 		if (ret < 0)
5621 			return ret;
5622 		else if (ret)
5623 			goto ident_done;
5624 
5625 		/* Check if the chip is JEDEC compliant */
5626 		ret = nand_flash_detect_jedec(chip);
5627 		if (ret < 0)
5628 			return ret;
5629 		else if (ret)
5630 			goto ident_done;
5631 	}
5632 
5633 	if (!type->name)
5634 		return -ENODEV;
5635 
5636 	strncpy(chip->parameters.model, type->name,
5637 		sizeof(chip->parameters.model) - 1);
5638 
5639 	chip->chipsize = (uint64_t)type->chipsize << 20;
5640 
5641 	if (!type->pagesize)
5642 		nand_manufacturer_detect(chip);
5643 	else
5644 		nand_decode_id(chip, type);
5645 
5646 	/* Get chip options */
5647 	chip->options |= type->options;
5648 
5649 ident_done:
5650 	if (!mtd->name)
5651 		mtd->name = chip->parameters.model;
5652 
5653 	if (chip->options & NAND_BUSWIDTH_AUTO) {
5654 		WARN_ON(busw & NAND_BUSWIDTH_16);
5655 		nand_set_defaults(chip);
5656 	} else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
5657 		/*
5658 		 * Check, if buswidth is correct. Hardware drivers should set
5659 		 * chip correct!
5660 		 */
5661 		pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5662 			maf_id, dev_id);
5663 		pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
5664 			mtd->name);
5665 		pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
5666 			(chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
5667 		return -EINVAL;
5668 	}
5669 
5670 	nand_decode_bbm_options(chip);
5671 
5672 	/* Calculate the address shift from the page size */
5673 	chip->page_shift = ffs(mtd->writesize) - 1;
5674 	/* Convert chipsize to number of pages per chip -1 */
5675 	chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
5676 
5677 	chip->bbt_erase_shift = chip->phys_erase_shift =
5678 		ffs(mtd->erasesize) - 1;
5679 	if (chip->chipsize & 0xffffffff)
5680 		chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
5681 	else {
5682 		chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
5683 		chip->chip_shift += 32 - 1;
5684 	}
5685 
5686 	if (chip->chip_shift - chip->page_shift > 16)
5687 		chip->options |= NAND_ROW_ADDR_3;
5688 
5689 	chip->badblockbits = 8;
5690 	chip->erase = single_erase;
5691 
5692 	/* Do not replace user supplied command function! */
5693 	if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
5694 		chip->cmdfunc = nand_command_lp;
5695 
5696 	pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5697 		maf_id, dev_id);
5698 	pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
5699 		chip->parameters.model);
5700 	pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
5701 		(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
5702 		mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
5703 	return 0;
5704 }
5705 
5706 static const char * const nand_ecc_modes[] = {
5707 	[NAND_ECC_NONE]		= "none",
5708 	[NAND_ECC_SOFT]		= "soft",
5709 	[NAND_ECC_HW]		= "hw",
5710 	[NAND_ECC_HW_SYNDROME]	= "hw_syndrome",
5711 	[NAND_ECC_HW_OOB_FIRST]	= "hw_oob_first",
5712 	[NAND_ECC_ON_DIE]	= "on-die",
5713 };
5714 
5715 static int of_get_nand_ecc_mode(struct device_node *np)
5716 {
5717 	const char *pm;
5718 	int err, i;
5719 
5720 	err = of_property_read_string(np, "nand-ecc-mode", &pm);
5721 	if (err < 0)
5722 		return err;
5723 
5724 	for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
5725 		if (!strcasecmp(pm, nand_ecc_modes[i]))
5726 			return i;
5727 
5728 	/*
5729 	 * For backward compatibility we support few obsoleted values that don't
5730 	 * have their mappings into nand_ecc_modes_t anymore (they were merged
5731 	 * with other enums).
5732 	 */
5733 	if (!strcasecmp(pm, "soft_bch"))
5734 		return NAND_ECC_SOFT;
5735 
5736 	return -ENODEV;
5737 }
5738 
5739 static const char * const nand_ecc_algos[] = {
5740 	[NAND_ECC_HAMMING]	= "hamming",
5741 	[NAND_ECC_BCH]		= "bch",
5742 };
5743 
5744 static int of_get_nand_ecc_algo(struct device_node *np)
5745 {
5746 	const char *pm;
5747 	int err, i;
5748 
5749 	err = of_property_read_string(np, "nand-ecc-algo", &pm);
5750 	if (!err) {
5751 		for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
5752 			if (!strcasecmp(pm, nand_ecc_algos[i]))
5753 				return i;
5754 		return -ENODEV;
5755 	}
5756 
5757 	/*
5758 	 * For backward compatibility we also read "nand-ecc-mode" checking
5759 	 * for some obsoleted values that were specifying ECC algorithm.
5760 	 */
5761 	err = of_property_read_string(np, "nand-ecc-mode", &pm);
5762 	if (err < 0)
5763 		return err;
5764 
5765 	if (!strcasecmp(pm, "soft"))
5766 		return NAND_ECC_HAMMING;
5767 	else if (!strcasecmp(pm, "soft_bch"))
5768 		return NAND_ECC_BCH;
5769 
5770 	return -ENODEV;
5771 }
5772 
5773 static int of_get_nand_ecc_step_size(struct device_node *np)
5774 {
5775 	int ret;
5776 	u32 val;
5777 
5778 	ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
5779 	return ret ? ret : val;
5780 }
5781 
5782 static int of_get_nand_ecc_strength(struct device_node *np)
5783 {
5784 	int ret;
5785 	u32 val;
5786 
5787 	ret = of_property_read_u32(np, "nand-ecc-strength", &val);
5788 	return ret ? ret : val;
5789 }
5790 
5791 static int of_get_nand_bus_width(struct device_node *np)
5792 {
5793 	u32 val;
5794 
5795 	if (of_property_read_u32(np, "nand-bus-width", &val))
5796 		return 8;
5797 
5798 	switch (val) {
5799 	case 8:
5800 	case 16:
5801 		return val;
5802 	default:
5803 		return -EIO;
5804 	}
5805 }
5806 
5807 static bool of_get_nand_on_flash_bbt(struct device_node *np)
5808 {
5809 	return of_property_read_bool(np, "nand-on-flash-bbt");
5810 }
5811 
5812 static int nand_dt_init(struct nand_chip *chip)
5813 {
5814 	struct device_node *dn = nand_get_flash_node(chip);
5815 	int ecc_mode, ecc_algo, ecc_strength, ecc_step;
5816 
5817 	if (!dn)
5818 		return 0;
5819 
5820 	if (of_get_nand_bus_width(dn) == 16)
5821 		chip->options |= NAND_BUSWIDTH_16;
5822 
5823 	if (of_get_nand_on_flash_bbt(dn))
5824 		chip->bbt_options |= NAND_BBT_USE_FLASH;
5825 
5826 	ecc_mode = of_get_nand_ecc_mode(dn);
5827 	ecc_algo = of_get_nand_ecc_algo(dn);
5828 	ecc_strength = of_get_nand_ecc_strength(dn);
5829 	ecc_step = of_get_nand_ecc_step_size(dn);
5830 
5831 	if (ecc_mode >= 0)
5832 		chip->ecc.mode = ecc_mode;
5833 
5834 	if (ecc_algo >= 0)
5835 		chip->ecc.algo = ecc_algo;
5836 
5837 	if (ecc_strength >= 0)
5838 		chip->ecc.strength = ecc_strength;
5839 
5840 	if (ecc_step > 0)
5841 		chip->ecc.size = ecc_step;
5842 
5843 	if (of_property_read_bool(dn, "nand-ecc-maximize"))
5844 		chip->ecc.options |= NAND_ECC_MAXIMIZE;
5845 
5846 	return 0;
5847 }
5848 
5849 /**
5850  * nand_scan_ident - [NAND Interface] Scan for the NAND device
5851  * @mtd: MTD device structure
5852  * @maxchips: number of chips to scan for
5853  * @table: alternative NAND ID table
5854  *
5855  * This is the first phase of the normal nand_scan() function. It reads the
5856  * flash ID and sets up MTD fields accordingly.
5857  *
5858  */
5859 int nand_scan_ident(struct mtd_info *mtd, int maxchips,
5860 		    struct nand_flash_dev *table)
5861 {
5862 	int i, nand_maf_id, nand_dev_id;
5863 	struct nand_chip *chip = mtd_to_nand(mtd);
5864 	int ret;
5865 
5866 	/* Enforce the right timings for reset/detection */
5867 	onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
5868 
5869 	ret = nand_dt_init(chip);
5870 	if (ret)
5871 		return ret;
5872 
5873 	if (!mtd->name && mtd->dev.parent)
5874 		mtd->name = dev_name(mtd->dev.parent);
5875 
5876 	/*
5877 	 * ->cmdfunc() is legacy and will only be used if ->exec_op() is not
5878 	 * populated.
5879 	 */
5880 	if (!chip->exec_op) {
5881 		/*
5882 		 * Default functions assigned for ->cmdfunc() and
5883 		 * ->select_chip() both expect ->cmd_ctrl() to be populated.
5884 		 */
5885 		if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
5886 			pr_err("->cmd_ctrl() should be provided\n");
5887 			return -EINVAL;
5888 		}
5889 	}
5890 
5891 	/* Set the default functions */
5892 	nand_set_defaults(chip);
5893 
5894 	/* Read the flash type */
5895 	ret = nand_detect(chip, table);
5896 	if (ret) {
5897 		if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5898 			pr_warn("No NAND device found\n");
5899 		chip->select_chip(mtd, -1);
5900 		return ret;
5901 	}
5902 
5903 	nand_maf_id = chip->id.data[0];
5904 	nand_dev_id = chip->id.data[1];
5905 
5906 	chip->select_chip(mtd, -1);
5907 
5908 	/* Check for a chip array */
5909 	for (i = 1; i < maxchips; i++) {
5910 		u8 id[2];
5911 
5912 		/* See comment in nand_get_flash_type for reset */
5913 		nand_reset(chip, i);
5914 
5915 		chip->select_chip(mtd, i);
5916 		/* Send the command for reading device ID */
5917 		nand_readid_op(chip, 0, id, sizeof(id));
5918 		/* Read manufacturer and device IDs */
5919 		if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5920 			chip->select_chip(mtd, -1);
5921 			break;
5922 		}
5923 		chip->select_chip(mtd, -1);
5924 	}
5925 	if (i > 1)
5926 		pr_info("%d chips detected\n", i);
5927 
5928 	/* Store the number of chips and calc total size for mtd */
5929 	chip->numchips = i;
5930 	mtd->size = i * chip->chipsize;
5931 
5932 	return 0;
5933 }
5934 EXPORT_SYMBOL(nand_scan_ident);
5935 
5936 static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
5937 {
5938 	struct nand_chip *chip = mtd_to_nand(mtd);
5939 	struct nand_ecc_ctrl *ecc = &chip->ecc;
5940 
5941 	if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
5942 		return -EINVAL;
5943 
5944 	switch (ecc->algo) {
5945 	case NAND_ECC_HAMMING:
5946 		ecc->calculate = nand_calculate_ecc;
5947 		ecc->correct = nand_correct_data;
5948 		ecc->read_page = nand_read_page_swecc;
5949 		ecc->read_subpage = nand_read_subpage;
5950 		ecc->write_page = nand_write_page_swecc;
5951 		ecc->read_page_raw = nand_read_page_raw;
5952 		ecc->write_page_raw = nand_write_page_raw;
5953 		ecc->read_oob = nand_read_oob_std;
5954 		ecc->write_oob = nand_write_oob_std;
5955 		if (!ecc->size)
5956 			ecc->size = 256;
5957 		ecc->bytes = 3;
5958 		ecc->strength = 1;
5959 		return 0;
5960 	case NAND_ECC_BCH:
5961 		if (!mtd_nand_has_bch()) {
5962 			WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
5963 			return -EINVAL;
5964 		}
5965 		ecc->calculate = nand_bch_calculate_ecc;
5966 		ecc->correct = nand_bch_correct_data;
5967 		ecc->read_page = nand_read_page_swecc;
5968 		ecc->read_subpage = nand_read_subpage;
5969 		ecc->write_page = nand_write_page_swecc;
5970 		ecc->read_page_raw = nand_read_page_raw;
5971 		ecc->write_page_raw = nand_write_page_raw;
5972 		ecc->read_oob = nand_read_oob_std;
5973 		ecc->write_oob = nand_write_oob_std;
5974 
5975 		/*
5976 		* Board driver should supply ecc.size and ecc.strength
5977 		* values to select how many bits are correctable.
5978 		* Otherwise, default to 4 bits for large page devices.
5979 		*/
5980 		if (!ecc->size && (mtd->oobsize >= 64)) {
5981 			ecc->size = 512;
5982 			ecc->strength = 4;
5983 		}
5984 
5985 		/*
5986 		 * if no ecc placement scheme was provided pickup the default
5987 		 * large page one.
5988 		 */
5989 		if (!mtd->ooblayout) {
5990 			/* handle large page devices only */
5991 			if (mtd->oobsize < 64) {
5992 				WARN(1, "OOB layout is required when using software BCH on small pages\n");
5993 				return -EINVAL;
5994 			}
5995 
5996 			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
5997 
5998 		}
5999 
6000 		/*
6001 		 * We can only maximize ECC config when the default layout is
6002 		 * used, otherwise we don't know how many bytes can really be
6003 		 * used.
6004 		 */
6005 		if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
6006 		    ecc->options & NAND_ECC_MAXIMIZE) {
6007 			int steps, bytes;
6008 
6009 			/* Always prefer 1k blocks over 512bytes ones */
6010 			ecc->size = 1024;
6011 			steps = mtd->writesize / ecc->size;
6012 
6013 			/* Reserve 2 bytes for the BBM */
6014 			bytes = (mtd->oobsize - 2) / steps;
6015 			ecc->strength = bytes * 8 / fls(8 * ecc->size);
6016 		}
6017 
6018 		/* See nand_bch_init() for details. */
6019 		ecc->bytes = 0;
6020 		ecc->priv = nand_bch_init(mtd);
6021 		if (!ecc->priv) {
6022 			WARN(1, "BCH ECC initialization failed!\n");
6023 			return -EINVAL;
6024 		}
6025 		return 0;
6026 	default:
6027 		WARN(1, "Unsupported ECC algorithm!\n");
6028 		return -EINVAL;
6029 	}
6030 }
6031 
6032 /**
6033  * nand_check_ecc_caps - check the sanity of preset ECC settings
6034  * @chip: nand chip info structure
6035  * @caps: ECC caps info structure
6036  * @oobavail: OOB size that the ECC engine can use
6037  *
6038  * When ECC step size and strength are already set, check if they are supported
6039  * by the controller and the calculated ECC bytes fit within the chip's OOB.
6040  * On success, the calculated ECC bytes is set.
6041  */
6042 int nand_check_ecc_caps(struct nand_chip *chip,
6043 			const struct nand_ecc_caps *caps, int oobavail)
6044 {
6045 	struct mtd_info *mtd = nand_to_mtd(chip);
6046 	const struct nand_ecc_step_info *stepinfo;
6047 	int preset_step = chip->ecc.size;
6048 	int preset_strength = chip->ecc.strength;
6049 	int nsteps, ecc_bytes;
6050 	int i, j;
6051 
6052 	if (WARN_ON(oobavail < 0))
6053 		return -EINVAL;
6054 
6055 	if (!preset_step || !preset_strength)
6056 		return -ENODATA;
6057 
6058 	nsteps = mtd->writesize / preset_step;
6059 
6060 	for (i = 0; i < caps->nstepinfos; i++) {
6061 		stepinfo = &caps->stepinfos[i];
6062 
6063 		if (stepinfo->stepsize != preset_step)
6064 			continue;
6065 
6066 		for (j = 0; j < stepinfo->nstrengths; j++) {
6067 			if (stepinfo->strengths[j] != preset_strength)
6068 				continue;
6069 
6070 			ecc_bytes = caps->calc_ecc_bytes(preset_step,
6071 							 preset_strength);
6072 			if (WARN_ON_ONCE(ecc_bytes < 0))
6073 				return ecc_bytes;
6074 
6075 			if (ecc_bytes * nsteps > oobavail) {
6076 				pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
6077 				       preset_step, preset_strength);
6078 				return -ENOSPC;
6079 			}
6080 
6081 			chip->ecc.bytes = ecc_bytes;
6082 
6083 			return 0;
6084 		}
6085 	}
6086 
6087 	pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
6088 	       preset_step, preset_strength);
6089 
6090 	return -ENOTSUPP;
6091 }
6092 EXPORT_SYMBOL_GPL(nand_check_ecc_caps);
6093 
6094 /**
6095  * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
6096  * @chip: nand chip info structure
6097  * @caps: ECC engine caps info structure
6098  * @oobavail: OOB size that the ECC engine can use
6099  *
6100  * If a chip's ECC requirement is provided, try to meet it with the least
6101  * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
6102  * On success, the chosen ECC settings are set.
6103  */
6104 int nand_match_ecc_req(struct nand_chip *chip,
6105 		       const struct nand_ecc_caps *caps, int oobavail)
6106 {
6107 	struct mtd_info *mtd = nand_to_mtd(chip);
6108 	const struct nand_ecc_step_info *stepinfo;
6109 	int req_step = chip->ecc_step_ds;
6110 	int req_strength = chip->ecc_strength_ds;
6111 	int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
6112 	int best_step, best_strength, best_ecc_bytes;
6113 	int best_ecc_bytes_total = INT_MAX;
6114 	int i, j;
6115 
6116 	if (WARN_ON(oobavail < 0))
6117 		return -EINVAL;
6118 
6119 	/* No information provided by the NAND chip */
6120 	if (!req_step || !req_strength)
6121 		return -ENOTSUPP;
6122 
6123 	/* number of correctable bits the chip requires in a page */
6124 	req_corr = mtd->writesize / req_step * req_strength;
6125 
6126 	for (i = 0; i < caps->nstepinfos; i++) {
6127 		stepinfo = &caps->stepinfos[i];
6128 		step_size = stepinfo->stepsize;
6129 
6130 		for (j = 0; j < stepinfo->nstrengths; j++) {
6131 			strength = stepinfo->strengths[j];
6132 
6133 			/*
6134 			 * If both step size and strength are smaller than the
6135 			 * chip's requirement, it is not easy to compare the
6136 			 * resulted reliability.
6137 			 */
6138 			if (step_size < req_step && strength < req_strength)
6139 				continue;
6140 
6141 			if (mtd->writesize % step_size)
6142 				continue;
6143 
6144 			nsteps = mtd->writesize / step_size;
6145 
6146 			ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
6147 			if (WARN_ON_ONCE(ecc_bytes < 0))
6148 				continue;
6149 			ecc_bytes_total = ecc_bytes * nsteps;
6150 
6151 			if (ecc_bytes_total > oobavail ||
6152 			    strength * nsteps < req_corr)
6153 				continue;
6154 
6155 			/*
6156 			 * We assume the best is to meet the chip's requrement
6157 			 * with the least number of ECC bytes.
6158 			 */
6159 			if (ecc_bytes_total < best_ecc_bytes_total) {
6160 				best_ecc_bytes_total = ecc_bytes_total;
6161 				best_step = step_size;
6162 				best_strength = strength;
6163 				best_ecc_bytes = ecc_bytes;
6164 			}
6165 		}
6166 	}
6167 
6168 	if (best_ecc_bytes_total == INT_MAX)
6169 		return -ENOTSUPP;
6170 
6171 	chip->ecc.size = best_step;
6172 	chip->ecc.strength = best_strength;
6173 	chip->ecc.bytes = best_ecc_bytes;
6174 
6175 	return 0;
6176 }
6177 EXPORT_SYMBOL_GPL(nand_match_ecc_req);
6178 
6179 /**
6180  * nand_maximize_ecc - choose the max ECC strength available
6181  * @chip: nand chip info structure
6182  * @caps: ECC engine caps info structure
6183  * @oobavail: OOB size that the ECC engine can use
6184  *
6185  * Choose the max ECC strength that is supported on the controller, and can fit
6186  * within the chip's OOB.  On success, the chosen ECC settings are set.
6187  */
6188 int nand_maximize_ecc(struct nand_chip *chip,
6189 		      const struct nand_ecc_caps *caps, int oobavail)
6190 {
6191 	struct mtd_info *mtd = nand_to_mtd(chip);
6192 	const struct nand_ecc_step_info *stepinfo;
6193 	int step_size, strength, nsteps, ecc_bytes, corr;
6194 	int best_corr = 0;
6195 	int best_step = 0;
6196 	int best_strength, best_ecc_bytes;
6197 	int i, j;
6198 
6199 	if (WARN_ON(oobavail < 0))
6200 		return -EINVAL;
6201 
6202 	for (i = 0; i < caps->nstepinfos; i++) {
6203 		stepinfo = &caps->stepinfos[i];
6204 		step_size = stepinfo->stepsize;
6205 
6206 		/* If chip->ecc.size is already set, respect it */
6207 		if (chip->ecc.size && step_size != chip->ecc.size)
6208 			continue;
6209 
6210 		for (j = 0; j < stepinfo->nstrengths; j++) {
6211 			strength = stepinfo->strengths[j];
6212 
6213 			if (mtd->writesize % step_size)
6214 				continue;
6215 
6216 			nsteps = mtd->writesize / step_size;
6217 
6218 			ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
6219 			if (WARN_ON_ONCE(ecc_bytes < 0))
6220 				continue;
6221 
6222 			if (ecc_bytes * nsteps > oobavail)
6223 				continue;
6224 
6225 			corr = strength * nsteps;
6226 
6227 			/*
6228 			 * If the number of correctable bits is the same,
6229 			 * bigger step_size has more reliability.
6230 			 */
6231 			if (corr > best_corr ||
6232 			    (corr == best_corr && step_size > best_step)) {
6233 				best_corr = corr;
6234 				best_step = step_size;
6235 				best_strength = strength;
6236 				best_ecc_bytes = ecc_bytes;
6237 			}
6238 		}
6239 	}
6240 
6241 	if (!best_corr)
6242 		return -ENOTSUPP;
6243 
6244 	chip->ecc.size = best_step;
6245 	chip->ecc.strength = best_strength;
6246 	chip->ecc.bytes = best_ecc_bytes;
6247 
6248 	return 0;
6249 }
6250 EXPORT_SYMBOL_GPL(nand_maximize_ecc);
6251 
6252 /*
6253  * Check if the chip configuration meet the datasheet requirements.
6254 
6255  * If our configuration corrects A bits per B bytes and the minimum
6256  * required correction level is X bits per Y bytes, then we must ensure
6257  * both of the following are true:
6258  *
6259  * (1) A / B >= X / Y
6260  * (2) A >= X
6261  *
6262  * Requirement (1) ensures we can correct for the required bitflip density.
6263  * Requirement (2) ensures we can correct even when all bitflips are clumped
6264  * in the same sector.
6265  */
6266 static bool nand_ecc_strength_good(struct mtd_info *mtd)
6267 {
6268 	struct nand_chip *chip = mtd_to_nand(mtd);
6269 	struct nand_ecc_ctrl *ecc = &chip->ecc;
6270 	int corr, ds_corr;
6271 
6272 	if (ecc->size == 0 || chip->ecc_step_ds == 0)
6273 		/* Not enough information */
6274 		return true;
6275 
6276 	/*
6277 	 * We get the number of corrected bits per page to compare
6278 	 * the correction density.
6279 	 */
6280 	corr = (mtd->writesize * ecc->strength) / ecc->size;
6281 	ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
6282 
6283 	return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
6284 }
6285 
6286 /**
6287  * nand_scan_tail - [NAND Interface] Scan for the NAND device
6288  * @mtd: MTD device structure
6289  *
6290  * This is the second phase of the normal nand_scan() function. It fills out
6291  * all the uninitialized function pointers with the defaults and scans for a
6292  * bad block table if appropriate.
6293  */
6294 int nand_scan_tail(struct mtd_info *mtd)
6295 {
6296 	struct nand_chip *chip = mtd_to_nand(mtd);
6297 	struct nand_ecc_ctrl *ecc = &chip->ecc;
6298 	int ret, i;
6299 
6300 	/* New bad blocks should be marked in OOB, flash-based BBT, or both */
6301 	if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
6302 		   !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
6303 		return -EINVAL;
6304 	}
6305 
6306 	chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
6307 	if (!chip->data_buf)
6308 		return -ENOMEM;
6309 
6310 	/*
6311 	 * FIXME: some NAND manufacturer drivers expect the first die to be
6312 	 * selected when manufacturer->init() is called. They should be fixed
6313 	 * to explictly select the relevant die when interacting with the NAND
6314 	 * chip.
6315 	 */
6316 	chip->select_chip(mtd, 0);
6317 	ret = nand_manufacturer_init(chip);
6318 	chip->select_chip(mtd, -1);
6319 	if (ret)
6320 		goto err_free_buf;
6321 
6322 	/* Set the internal oob buffer location, just after the page data */
6323 	chip->oob_poi = chip->data_buf + mtd->writesize;
6324 
6325 	/*
6326 	 * If no default placement scheme is given, select an appropriate one.
6327 	 */
6328 	if (!mtd->ooblayout &&
6329 	    !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
6330 		switch (mtd->oobsize) {
6331 		case 8:
6332 		case 16:
6333 			mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
6334 			break;
6335 		case 64:
6336 		case 128:
6337 			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
6338 			break;
6339 		default:
6340 			/*
6341 			 * Expose the whole OOB area to users if ECC_NONE
6342 			 * is passed. We could do that for all kind of
6343 			 * ->oobsize, but we must keep the old large/small
6344 			 * page with ECC layout when ->oobsize <= 128 for
6345 			 * compatibility reasons.
6346 			 */
6347 			if (ecc->mode == NAND_ECC_NONE) {
6348 				mtd_set_ooblayout(mtd,
6349 						&nand_ooblayout_lp_ops);
6350 				break;
6351 			}
6352 
6353 			WARN(1, "No oob scheme defined for oobsize %d\n",
6354 				mtd->oobsize);
6355 			ret = -EINVAL;
6356 			goto err_nand_manuf_cleanup;
6357 		}
6358 	}
6359 
6360 	/*
6361 	 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
6362 	 * selected and we have 256 byte pagesize fallback to software ECC
6363 	 */
6364 
6365 	switch (ecc->mode) {
6366 	case NAND_ECC_HW_OOB_FIRST:
6367 		/* Similar to NAND_ECC_HW, but a separate read_page handle */
6368 		if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
6369 			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
6370 			ret = -EINVAL;
6371 			goto err_nand_manuf_cleanup;
6372 		}
6373 		if (!ecc->read_page)
6374 			ecc->read_page = nand_read_page_hwecc_oob_first;
6375 
6376 	case NAND_ECC_HW:
6377 		/* Use standard hwecc read page function? */
6378 		if (!ecc->read_page)
6379 			ecc->read_page = nand_read_page_hwecc;
6380 		if (!ecc->write_page)
6381 			ecc->write_page = nand_write_page_hwecc;
6382 		if (!ecc->read_page_raw)
6383 			ecc->read_page_raw = nand_read_page_raw;
6384 		if (!ecc->write_page_raw)
6385 			ecc->write_page_raw = nand_write_page_raw;
6386 		if (!ecc->read_oob)
6387 			ecc->read_oob = nand_read_oob_std;
6388 		if (!ecc->write_oob)
6389 			ecc->write_oob = nand_write_oob_std;
6390 		if (!ecc->read_subpage)
6391 			ecc->read_subpage = nand_read_subpage;
6392 		if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
6393 			ecc->write_subpage = nand_write_subpage_hwecc;
6394 
6395 	case NAND_ECC_HW_SYNDROME:
6396 		if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
6397 		    (!ecc->read_page ||
6398 		     ecc->read_page == nand_read_page_hwecc ||
6399 		     !ecc->write_page ||
6400 		     ecc->write_page == nand_write_page_hwecc)) {
6401 			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
6402 			ret = -EINVAL;
6403 			goto err_nand_manuf_cleanup;
6404 		}
6405 		/* Use standard syndrome read/write page function? */
6406 		if (!ecc->read_page)
6407 			ecc->read_page = nand_read_page_syndrome;
6408 		if (!ecc->write_page)
6409 			ecc->write_page = nand_write_page_syndrome;
6410 		if (!ecc->read_page_raw)
6411 			ecc->read_page_raw = nand_read_page_raw_syndrome;
6412 		if (!ecc->write_page_raw)
6413 			ecc->write_page_raw = nand_write_page_raw_syndrome;
6414 		if (!ecc->read_oob)
6415 			ecc->read_oob = nand_read_oob_syndrome;
6416 		if (!ecc->write_oob)
6417 			ecc->write_oob = nand_write_oob_syndrome;
6418 
6419 		if (mtd->writesize >= ecc->size) {
6420 			if (!ecc->strength) {
6421 				WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
6422 				ret = -EINVAL;
6423 				goto err_nand_manuf_cleanup;
6424 			}
6425 			break;
6426 		}
6427 		pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
6428 			ecc->size, mtd->writesize);
6429 		ecc->mode = NAND_ECC_SOFT;
6430 		ecc->algo = NAND_ECC_HAMMING;
6431 
6432 	case NAND_ECC_SOFT:
6433 		ret = nand_set_ecc_soft_ops(mtd);
6434 		if (ret) {
6435 			ret = -EINVAL;
6436 			goto err_nand_manuf_cleanup;
6437 		}
6438 		break;
6439 
6440 	case NAND_ECC_ON_DIE:
6441 		if (!ecc->read_page || !ecc->write_page) {
6442 			WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
6443 			ret = -EINVAL;
6444 			goto err_nand_manuf_cleanup;
6445 		}
6446 		if (!ecc->read_oob)
6447 			ecc->read_oob = nand_read_oob_std;
6448 		if (!ecc->write_oob)
6449 			ecc->write_oob = nand_write_oob_std;
6450 		break;
6451 
6452 	case NAND_ECC_NONE:
6453 		pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
6454 		ecc->read_page = nand_read_page_raw;
6455 		ecc->write_page = nand_write_page_raw;
6456 		ecc->read_oob = nand_read_oob_std;
6457 		ecc->read_page_raw = nand_read_page_raw;
6458 		ecc->write_page_raw = nand_write_page_raw;
6459 		ecc->write_oob = nand_write_oob_std;
6460 		ecc->size = mtd->writesize;
6461 		ecc->bytes = 0;
6462 		ecc->strength = 0;
6463 		break;
6464 
6465 	default:
6466 		WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
6467 		ret = -EINVAL;
6468 		goto err_nand_manuf_cleanup;
6469 	}
6470 
6471 	if (ecc->correct || ecc->calculate) {
6472 		ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6473 		ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6474 		if (!ecc->calc_buf || !ecc->code_buf) {
6475 			ret = -ENOMEM;
6476 			goto err_nand_manuf_cleanup;
6477 		}
6478 	}
6479 
6480 	/* For many systems, the standard OOB write also works for raw */
6481 	if (!ecc->read_oob_raw)
6482 		ecc->read_oob_raw = ecc->read_oob;
6483 	if (!ecc->write_oob_raw)
6484 		ecc->write_oob_raw = ecc->write_oob;
6485 
6486 	/* propagate ecc info to mtd_info */
6487 	mtd->ecc_strength = ecc->strength;
6488 	mtd->ecc_step_size = ecc->size;
6489 
6490 	/*
6491 	 * Set the number of read / write steps for one page depending on ECC
6492 	 * mode.
6493 	 */
6494 	ecc->steps = mtd->writesize / ecc->size;
6495 	if (ecc->steps * ecc->size != mtd->writesize) {
6496 		WARN(1, "Invalid ECC parameters\n");
6497 		ret = -EINVAL;
6498 		goto err_nand_manuf_cleanup;
6499 	}
6500 	ecc->total = ecc->steps * ecc->bytes;
6501 	if (ecc->total > mtd->oobsize) {
6502 		WARN(1, "Total number of ECC bytes exceeded oobsize\n");
6503 		ret = -EINVAL;
6504 		goto err_nand_manuf_cleanup;
6505 	}
6506 
6507 	/*
6508 	 * The number of bytes available for a client to place data into
6509 	 * the out of band area.
6510 	 */
6511 	ret = mtd_ooblayout_count_freebytes(mtd);
6512 	if (ret < 0)
6513 		ret = 0;
6514 
6515 	mtd->oobavail = ret;
6516 
6517 	/* ECC sanity check: warn if it's too weak */
6518 	if (!nand_ecc_strength_good(mtd))
6519 		pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
6520 			mtd->name);
6521 
6522 	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
6523 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
6524 		switch (ecc->steps) {
6525 		case 2:
6526 			mtd->subpage_sft = 1;
6527 			break;
6528 		case 4:
6529 		case 8:
6530 		case 16:
6531 			mtd->subpage_sft = 2;
6532 			break;
6533 		}
6534 	}
6535 	chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
6536 
6537 	/* Initialize state */
6538 	chip->state = FL_READY;
6539 
6540 	/* Invalidate the pagebuffer reference */
6541 	chip->pagebuf = -1;
6542 
6543 	/* Large page NAND with SOFT_ECC should support subpage reads */
6544 	switch (ecc->mode) {
6545 	case NAND_ECC_SOFT:
6546 		if (chip->page_shift > 9)
6547 			chip->options |= NAND_SUBPAGE_READ;
6548 		break;
6549 
6550 	default:
6551 		break;
6552 	}
6553 
6554 	/* Fill in remaining MTD driver data */
6555 	mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
6556 	mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
6557 						MTD_CAP_NANDFLASH;
6558 	mtd->_erase = nand_erase;
6559 	mtd->_point = NULL;
6560 	mtd->_unpoint = NULL;
6561 	mtd->_panic_write = panic_nand_write;
6562 	mtd->_read_oob = nand_read_oob;
6563 	mtd->_write_oob = nand_write_oob;
6564 	mtd->_sync = nand_sync;
6565 	mtd->_lock = NULL;
6566 	mtd->_unlock = NULL;
6567 	mtd->_suspend = nand_suspend;
6568 	mtd->_resume = nand_resume;
6569 	mtd->_reboot = nand_shutdown;
6570 	mtd->_block_isreserved = nand_block_isreserved;
6571 	mtd->_block_isbad = nand_block_isbad;
6572 	mtd->_block_markbad = nand_block_markbad;
6573 	mtd->_max_bad_blocks = nand_max_bad_blocks;
6574 	mtd->writebufsize = mtd->writesize;
6575 
6576 	/*
6577 	 * Initialize bitflip_threshold to its default prior scan_bbt() call.
6578 	 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
6579 	 * properly set.
6580 	 */
6581 	if (!mtd->bitflip_threshold)
6582 		mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
6583 
6584 	/* Initialize the ->data_interface field. */
6585 	ret = nand_init_data_interface(chip);
6586 	if (ret)
6587 		goto err_nand_manuf_cleanup;
6588 
6589 	/* Enter fastest possible mode on all dies. */
6590 	for (i = 0; i < chip->numchips; i++) {
6591 		ret = nand_setup_data_interface(chip, i);
6592 		if (ret)
6593 			goto err_nand_manuf_cleanup;
6594 	}
6595 
6596 	/* Check, if we should skip the bad block table scan */
6597 	if (chip->options & NAND_SKIP_BBTSCAN)
6598 		return 0;
6599 
6600 	/* Build bad block table */
6601 	ret = chip->scan_bbt(mtd);
6602 	if (ret)
6603 		goto err_nand_manuf_cleanup;
6604 
6605 	return 0;
6606 
6607 
6608 err_nand_manuf_cleanup:
6609 	nand_manufacturer_cleanup(chip);
6610 
6611 err_free_buf:
6612 	kfree(chip->data_buf);
6613 	kfree(ecc->code_buf);
6614 	kfree(ecc->calc_buf);
6615 
6616 	return ret;
6617 }
6618 EXPORT_SYMBOL(nand_scan_tail);
6619 
6620 /*
6621  * is_module_text_address() isn't exported, and it's mostly a pointless
6622  * test if this is a module _anyway_ -- they'd have to try _really_ hard
6623  * to call us from in-kernel code if the core NAND support is modular.
6624  */
6625 #ifdef MODULE
6626 #define caller_is_module() (1)
6627 #else
6628 #define caller_is_module() \
6629 	is_module_text_address((unsigned long)__builtin_return_address(0))
6630 #endif
6631 
6632 /**
6633  * nand_scan - [NAND Interface] Scan for the NAND device
6634  * @mtd: MTD device structure
6635  * @maxchips: number of chips to scan for
6636  *
6637  * This fills out all the uninitialized function pointers with the defaults.
6638  * The flash ID is read and the mtd/chip structures are filled with the
6639  * appropriate values.
6640  */
6641 int nand_scan(struct mtd_info *mtd, int maxchips)
6642 {
6643 	int ret;
6644 
6645 	ret = nand_scan_ident(mtd, maxchips, NULL);
6646 	if (!ret)
6647 		ret = nand_scan_tail(mtd);
6648 	return ret;
6649 }
6650 EXPORT_SYMBOL(nand_scan);
6651 
6652 /**
6653  * nand_cleanup - [NAND Interface] Free resources held by the NAND device
6654  * @chip: NAND chip object
6655  */
6656 void nand_cleanup(struct nand_chip *chip)
6657 {
6658 	if (chip->ecc.mode == NAND_ECC_SOFT &&
6659 	    chip->ecc.algo == NAND_ECC_BCH)
6660 		nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
6661 
6662 	/* Free bad block table memory */
6663 	kfree(chip->bbt);
6664 	kfree(chip->data_buf);
6665 	kfree(chip->ecc.code_buf);
6666 	kfree(chip->ecc.calc_buf);
6667 
6668 	/* Free bad block descriptor memory */
6669 	if (chip->badblock_pattern && chip->badblock_pattern->options
6670 			& NAND_BBT_DYNAMICSTRUCT)
6671 		kfree(chip->badblock_pattern);
6672 
6673 	/* Free manufacturer priv data. */
6674 	nand_manufacturer_cleanup(chip);
6675 }
6676 EXPORT_SYMBOL_GPL(nand_cleanup);
6677 
6678 /**
6679  * nand_release - [NAND Interface] Unregister the MTD device and free resources
6680  *		  held by the NAND device
6681  * @mtd: MTD device structure
6682  */
6683 void nand_release(struct mtd_info *mtd)
6684 {
6685 	mtd_device_unregister(mtd);
6686 	nand_cleanup(mtd_to_nand(mtd));
6687 }
6688 EXPORT_SYMBOL_GPL(nand_release);
6689 
6690 MODULE_LICENSE("GPL");
6691 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
6692 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
6693 MODULE_DESCRIPTION("Generic NAND flash driver code");
6694