1 /*
2  * davinci_nand.c - NAND Flash Driver for DaVinci family chips
3  *
4  * Copyright © 2006 Texas Instruments.
5  *
6  * Port to 2.6.23 Copyright © 2008 by:
7  *   Sander Huijsen <Shuijsen@optelecom-nkf.com>
8  *   Troy Kisky <troy.kisky@boundarydevices.com>
9  *   Dirk Behme <Dirk.Behme@gmail.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25 
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/err.h>
30 #include <linux/io.h>
31 #include <linux/mtd/rawnand.h>
32 #include <linux/mtd/partitions.h>
33 #include <linux/slab.h>
34 #include <linux/of_device.h>
35 #include <linux/of.h>
36 
37 #include <linux/platform_data/mtd-davinci.h>
38 #include <linux/platform_data/mtd-davinci-aemif.h>
39 
40 /*
41  * This is a device driver for the NAND flash controller found on the
42  * various DaVinci family chips.  It handles up to four SoC chipselects,
43  * and some flavors of secondary chipselect (e.g. based on A12) as used
44  * with multichip packages.
45  *
46  * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
47  * available on chips like the DM355 and OMAP-L137 and needed with the
48  * more error-prone MLC NAND chips.
49  *
50  * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
51  * outputs in a "wire-AND" configuration, with no per-chip signals.
52  */
53 struct davinci_nand_info {
54 	struct nand_chip	chip;
55 
56 	struct device		*dev;
57 
58 	bool			is_readmode;
59 
60 	void __iomem		*base;
61 	void __iomem		*vaddr;
62 
63 	uint32_t		ioaddr;
64 	uint32_t		current_cs;
65 
66 	uint32_t		mask_chipsel;
67 	uint32_t		mask_ale;
68 	uint32_t		mask_cle;
69 
70 	uint32_t		core_chipsel;
71 
72 	struct davinci_aemif_timing	*timing;
73 };
74 
75 static DEFINE_SPINLOCK(davinci_nand_lock);
76 static bool ecc4_busy;
77 
78 static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd)
79 {
80 	return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip);
81 }
82 
83 static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
84 		int offset)
85 {
86 	return __raw_readl(info->base + offset);
87 }
88 
89 static inline void davinci_nand_writel(struct davinci_nand_info *info,
90 		int offset, unsigned long value)
91 {
92 	__raw_writel(value, info->base + offset);
93 }
94 
95 /*----------------------------------------------------------------------*/
96 
97 /*
98  * Access to hardware control lines:  ALE, CLE, secondary chipselect.
99  */
100 
101 static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
102 				   unsigned int ctrl)
103 {
104 	struct davinci_nand_info	*info = to_davinci_nand(mtd);
105 	uint32_t			addr = info->current_cs;
106 	struct nand_chip		*nand = mtd_to_nand(mtd);
107 
108 	/* Did the control lines change? */
109 	if (ctrl & NAND_CTRL_CHANGE) {
110 		if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
111 			addr |= info->mask_cle;
112 		else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
113 			addr |= info->mask_ale;
114 
115 		nand->IO_ADDR_W = (void __iomem __force *)addr;
116 	}
117 
118 	if (cmd != NAND_CMD_NONE)
119 		iowrite8(cmd, nand->IO_ADDR_W);
120 }
121 
122 static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
123 {
124 	struct davinci_nand_info	*info = to_davinci_nand(mtd);
125 	uint32_t			addr = info->ioaddr;
126 
127 	/* maybe kick in a second chipselect */
128 	if (chip > 0)
129 		addr |= info->mask_chipsel;
130 	info->current_cs = addr;
131 
132 	info->chip.IO_ADDR_W = (void __iomem __force *)addr;
133 	info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
134 }
135 
136 /*----------------------------------------------------------------------*/
137 
138 /*
139  * 1-bit hardware ECC ... context maintained for each core chipselect
140  */
141 
142 static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
143 {
144 	struct davinci_nand_info *info = to_davinci_nand(mtd);
145 
146 	return davinci_nand_readl(info, NANDF1ECC_OFFSET
147 			+ 4 * info->core_chipsel);
148 }
149 
150 static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
151 {
152 	struct davinci_nand_info *info;
153 	uint32_t nandcfr;
154 	unsigned long flags;
155 
156 	info = to_davinci_nand(mtd);
157 
158 	/* Reset ECC hardware */
159 	nand_davinci_readecc_1bit(mtd);
160 
161 	spin_lock_irqsave(&davinci_nand_lock, flags);
162 
163 	/* Restart ECC hardware */
164 	nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
165 	nandcfr |= BIT(8 + info->core_chipsel);
166 	davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
167 
168 	spin_unlock_irqrestore(&davinci_nand_lock, flags);
169 }
170 
171 /*
172  * Read hardware ECC value and pack into three bytes
173  */
174 static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
175 				      const u_char *dat, u_char *ecc_code)
176 {
177 	unsigned int ecc_val = nand_davinci_readecc_1bit(mtd);
178 	unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
179 
180 	/* invert so that erased block ecc is correct */
181 	ecc24 = ~ecc24;
182 	ecc_code[0] = (u_char)(ecc24);
183 	ecc_code[1] = (u_char)(ecc24 >> 8);
184 	ecc_code[2] = (u_char)(ecc24 >> 16);
185 
186 	return 0;
187 }
188 
189 static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
190 				     u_char *read_ecc, u_char *calc_ecc)
191 {
192 	struct nand_chip *chip = mtd_to_nand(mtd);
193 	uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
194 					  (read_ecc[2] << 16);
195 	uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
196 					  (calc_ecc[2] << 16);
197 	uint32_t diff = eccCalc ^ eccNand;
198 
199 	if (diff) {
200 		if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
201 			/* Correctable error */
202 			if ((diff >> (12 + 3)) < chip->ecc.size) {
203 				dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
204 				return 1;
205 			} else {
206 				return -EBADMSG;
207 			}
208 		} else if (!(diff & (diff - 1))) {
209 			/* Single bit ECC error in the ECC itself,
210 			 * nothing to fix */
211 			return 1;
212 		} else {
213 			/* Uncorrectable error */
214 			return -EBADMSG;
215 		}
216 
217 	}
218 	return 0;
219 }
220 
221 /*----------------------------------------------------------------------*/
222 
223 /*
224  * 4-bit hardware ECC ... context maintained over entire AEMIF
225  *
226  * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
227  * since that forces use of a problematic "infix OOB" layout.
228  * Among other things, it trashes manufacturer bad block markers.
229  * Also, and specific to this hardware, it ECC-protects the "prepad"
230  * in the OOB ... while having ECC protection for parts of OOB would
231  * seem useful, the current MTD stack sometimes wants to update the
232  * OOB without recomputing ECC.
233  */
234 
235 static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
236 {
237 	struct davinci_nand_info *info = to_davinci_nand(mtd);
238 	unsigned long flags;
239 	u32 val;
240 
241 	/* Reset ECC hardware */
242 	davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
243 
244 	spin_lock_irqsave(&davinci_nand_lock, flags);
245 
246 	/* Start 4-bit ECC calculation for read/write */
247 	val = davinci_nand_readl(info, NANDFCR_OFFSET);
248 	val &= ~(0x03 << 4);
249 	val |= (info->core_chipsel << 4) | BIT(12);
250 	davinci_nand_writel(info, NANDFCR_OFFSET, val);
251 
252 	info->is_readmode = (mode == NAND_ECC_READ);
253 
254 	spin_unlock_irqrestore(&davinci_nand_lock, flags);
255 }
256 
257 /* Read raw ECC code after writing to NAND. */
258 static void
259 nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
260 {
261 	const u32 mask = 0x03ff03ff;
262 
263 	code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
264 	code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
265 	code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
266 	code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
267 }
268 
269 /* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
270 static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
271 		const u_char *dat, u_char *ecc_code)
272 {
273 	struct davinci_nand_info *info = to_davinci_nand(mtd);
274 	u32 raw_ecc[4], *p;
275 	unsigned i;
276 
277 	/* After a read, terminate ECC calculation by a dummy read
278 	 * of some 4-bit ECC register.  ECC covers everything that
279 	 * was read; correct() just uses the hardware state, so
280 	 * ecc_code is not needed.
281 	 */
282 	if (info->is_readmode) {
283 		davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
284 		return 0;
285 	}
286 
287 	/* Pack eight raw 10-bit ecc values into ten bytes, making
288 	 * two passes which each convert four values (in upper and
289 	 * lower halves of two 32-bit words) into five bytes.  The
290 	 * ROM boot loader uses this same packing scheme.
291 	 */
292 	nand_davinci_readecc_4bit(info, raw_ecc);
293 	for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
294 		*ecc_code++ =   p[0]        & 0xff;
295 		*ecc_code++ = ((p[0] >>  8) & 0x03) | ((p[0] >> 14) & 0xfc);
296 		*ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] <<  4) & 0xf0);
297 		*ecc_code++ = ((p[1] >>  4) & 0x3f) | ((p[1] >> 10) & 0xc0);
298 		*ecc_code++ =  (p[1] >> 18) & 0xff;
299 	}
300 
301 	return 0;
302 }
303 
304 /* Correct up to 4 bits in data we just read, using state left in the
305  * hardware plus the ecc_code computed when it was first written.
306  */
307 static int nand_davinci_correct_4bit(struct mtd_info *mtd,
308 		u_char *data, u_char *ecc_code, u_char *null)
309 {
310 	int i;
311 	struct davinci_nand_info *info = to_davinci_nand(mtd);
312 	unsigned short ecc10[8];
313 	unsigned short *ecc16;
314 	u32 syndrome[4];
315 	u32 ecc_state;
316 	unsigned num_errors, corrected;
317 	unsigned long timeo;
318 
319 	/* Unpack ten bytes into eight 10 bit values.  We know we're
320 	 * little-endian, and use type punning for less shifting/masking.
321 	 */
322 	if (WARN_ON(0x01 & (unsigned) ecc_code))
323 		return -EINVAL;
324 	ecc16 = (unsigned short *)ecc_code;
325 
326 	ecc10[0] =  (ecc16[0] >>  0) & 0x3ff;
327 	ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
328 	ecc10[2] =  (ecc16[1] >>  4) & 0x3ff;
329 	ecc10[3] = ((ecc16[1] >> 14) & 0x3)  | ((ecc16[2] << 2) & 0x3fc);
330 	ecc10[4] =  (ecc16[2] >>  8)         | ((ecc16[3] << 8) & 0x300);
331 	ecc10[5] =  (ecc16[3] >>  2) & 0x3ff;
332 	ecc10[6] = ((ecc16[3] >> 12) & 0xf)  | ((ecc16[4] << 4) & 0x3f0);
333 	ecc10[7] =  (ecc16[4] >>  6) & 0x3ff;
334 
335 	/* Tell ECC controller about the expected ECC codes. */
336 	for (i = 7; i >= 0; i--)
337 		davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
338 
339 	/* Allow time for syndrome calculation ... then read it.
340 	 * A syndrome of all zeroes 0 means no detected errors.
341 	 */
342 	davinci_nand_readl(info, NANDFSR_OFFSET);
343 	nand_davinci_readecc_4bit(info, syndrome);
344 	if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
345 		return 0;
346 
347 	/*
348 	 * Clear any previous address calculation by doing a dummy read of an
349 	 * error address register.
350 	 */
351 	davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
352 
353 	/* Start address calculation, and wait for it to complete.
354 	 * We _could_ start reading more data while this is working,
355 	 * to speed up the overall page read.
356 	 */
357 	davinci_nand_writel(info, NANDFCR_OFFSET,
358 			davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
359 
360 	/*
361 	 * ECC_STATE field reads 0x3 (Error correction complete) immediately
362 	 * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
363 	 * begin trying to poll for the state, you may fall right out of your
364 	 * loop without any of the correction calculations having taken place.
365 	 * The recommendation from the hardware team is to initially delay as
366 	 * long as ECC_STATE reads less than 4. After that, ECC HW has entered
367 	 * correction state.
368 	 */
369 	timeo = jiffies + usecs_to_jiffies(100);
370 	do {
371 		ecc_state = (davinci_nand_readl(info,
372 				NANDFSR_OFFSET) >> 8) & 0x0f;
373 		cpu_relax();
374 	} while ((ecc_state < 4) && time_before(jiffies, timeo));
375 
376 	for (;;) {
377 		u32	fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
378 
379 		switch ((fsr >> 8) & 0x0f) {
380 		case 0:		/* no error, should not happen */
381 			davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
382 			return 0;
383 		case 1:		/* five or more errors detected */
384 			davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
385 			return -EBADMSG;
386 		case 2:		/* error addresses computed */
387 		case 3:
388 			num_errors = 1 + ((fsr >> 16) & 0x03);
389 			goto correct;
390 		default:	/* still working on it */
391 			cpu_relax();
392 			continue;
393 		}
394 	}
395 
396 correct:
397 	/* correct each error */
398 	for (i = 0, corrected = 0; i < num_errors; i++) {
399 		int error_address, error_value;
400 
401 		if (i > 1) {
402 			error_address = davinci_nand_readl(info,
403 						NAND_ERR_ADD2_OFFSET);
404 			error_value = davinci_nand_readl(info,
405 						NAND_ERR_ERRVAL2_OFFSET);
406 		} else {
407 			error_address = davinci_nand_readl(info,
408 						NAND_ERR_ADD1_OFFSET);
409 			error_value = davinci_nand_readl(info,
410 						NAND_ERR_ERRVAL1_OFFSET);
411 		}
412 
413 		if (i & 1) {
414 			error_address >>= 16;
415 			error_value >>= 16;
416 		}
417 		error_address &= 0x3ff;
418 		error_address = (512 + 7) - error_address;
419 
420 		if (error_address < 512) {
421 			data[error_address] ^= error_value;
422 			corrected++;
423 		}
424 	}
425 
426 	return corrected;
427 }
428 
429 /*----------------------------------------------------------------------*/
430 
431 /*
432  * NOTE:  NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
433  * how these chips are normally wired.  This translates to both 8 and 16
434  * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
435  *
436  * For now we assume that configuration, or any other one which ignores
437  * the two LSBs for NAND access ... so we can issue 32-bit reads/writes
438  * and have that transparently morphed into multiple NAND operations.
439  */
440 static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
441 {
442 	struct nand_chip *chip = mtd_to_nand(mtd);
443 
444 	if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
445 		ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
446 	else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
447 		ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
448 	else
449 		ioread8_rep(chip->IO_ADDR_R, buf, len);
450 }
451 
452 static void nand_davinci_write_buf(struct mtd_info *mtd,
453 		const uint8_t *buf, int len)
454 {
455 	struct nand_chip *chip = mtd_to_nand(mtd);
456 
457 	if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
458 		iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
459 	else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
460 		iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
461 	else
462 		iowrite8_rep(chip->IO_ADDR_R, buf, len);
463 }
464 
465 /*
466  * Check hardware register for wait status. Returns 1 if device is ready,
467  * 0 if it is still busy.
468  */
469 static int nand_davinci_dev_ready(struct mtd_info *mtd)
470 {
471 	struct davinci_nand_info *info = to_davinci_nand(mtd);
472 
473 	return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
474 }
475 
476 /*----------------------------------------------------------------------*/
477 
478 /* An ECC layout for using 4-bit ECC with small-page flash, storing
479  * ten ECC bytes plus the manufacturer's bad block marker byte, and
480  * and not overlapping the default BBT markers.
481  */
482 static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
483 				      struct mtd_oob_region *oobregion)
484 {
485 	if (section > 2)
486 		return -ERANGE;
487 
488 	if (!section) {
489 		oobregion->offset = 0;
490 		oobregion->length = 5;
491 	} else if (section == 1) {
492 		oobregion->offset = 6;
493 		oobregion->length = 2;
494 	} else {
495 		oobregion->offset = 13;
496 		oobregion->length = 3;
497 	}
498 
499 	return 0;
500 }
501 
502 static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
503 				       struct mtd_oob_region *oobregion)
504 {
505 	if (section > 1)
506 		return -ERANGE;
507 
508 	if (!section) {
509 		oobregion->offset = 8;
510 		oobregion->length = 5;
511 	} else {
512 		oobregion->offset = 16;
513 		oobregion->length = mtd->oobsize - 16;
514 	}
515 
516 	return 0;
517 }
518 
519 static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
520 	.ecc = hwecc4_ooblayout_small_ecc,
521 	.free = hwecc4_ooblayout_small_free,
522 };
523 
524 #if defined(CONFIG_OF)
525 static const struct of_device_id davinci_nand_of_match[] = {
526 	{.compatible = "ti,davinci-nand", },
527 	{.compatible = "ti,keystone-nand", },
528 	{},
529 };
530 MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
531 
532 static struct davinci_nand_pdata
533 	*nand_davinci_get_pdata(struct platform_device *pdev)
534 {
535 	if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
536 		struct davinci_nand_pdata *pdata;
537 		const char *mode;
538 		u32 prop;
539 
540 		pdata =  devm_kzalloc(&pdev->dev,
541 				sizeof(struct davinci_nand_pdata),
542 				GFP_KERNEL);
543 		pdev->dev.platform_data = pdata;
544 		if (!pdata)
545 			return ERR_PTR(-ENOMEM);
546 		if (!of_property_read_u32(pdev->dev.of_node,
547 			"ti,davinci-chipselect", &prop))
548 			pdata->core_chipsel = prop;
549 		else
550 			return ERR_PTR(-EINVAL);
551 
552 		if (!of_property_read_u32(pdev->dev.of_node,
553 			"ti,davinci-mask-ale", &prop))
554 			pdata->mask_ale = prop;
555 		if (!of_property_read_u32(pdev->dev.of_node,
556 			"ti,davinci-mask-cle", &prop))
557 			pdata->mask_cle = prop;
558 		if (!of_property_read_u32(pdev->dev.of_node,
559 			"ti,davinci-mask-chipsel", &prop))
560 			pdata->mask_chipsel = prop;
561 		if (!of_property_read_string(pdev->dev.of_node,
562 			"ti,davinci-ecc-mode", &mode)) {
563 			if (!strncmp("none", mode, 4))
564 				pdata->ecc_mode = NAND_ECC_NONE;
565 			if (!strncmp("soft", mode, 4))
566 				pdata->ecc_mode = NAND_ECC_SOFT;
567 			if (!strncmp("hw", mode, 2))
568 				pdata->ecc_mode = NAND_ECC_HW;
569 		}
570 		if (!of_property_read_u32(pdev->dev.of_node,
571 			"ti,davinci-ecc-bits", &prop))
572 			pdata->ecc_bits = prop;
573 
574 		if (!of_property_read_u32(pdev->dev.of_node,
575 			"ti,davinci-nand-buswidth", &prop) && prop == 16)
576 			pdata->options |= NAND_BUSWIDTH_16;
577 
578 		if (of_property_read_bool(pdev->dev.of_node,
579 			"ti,davinci-nand-use-bbt"))
580 			pdata->bbt_options = NAND_BBT_USE_FLASH;
581 
582 		/*
583 		 * Since kernel v4.8, this driver has been fixed to enable
584 		 * use of 4-bit hardware ECC with subpages and verified on
585 		 * TI's keystone EVMs (K2L, K2HK and K2E).
586 		 * However, in the interest of not breaking systems using
587 		 * existing UBI partitions, sub-page writes are not being
588 		 * (re)enabled. If you want to use subpage writes on Keystone
589 		 * platforms (i.e. do not have any existing UBI partitions),
590 		 * then use "ti,davinci-nand" as the compatible in your
591 		 * device-tree file.
592 		 */
593 		if (of_device_is_compatible(pdev->dev.of_node,
594 					    "ti,keystone-nand")) {
595 			pdata->options |= NAND_NO_SUBPAGE_WRITE;
596 		}
597 	}
598 
599 	return dev_get_platdata(&pdev->dev);
600 }
601 #else
602 static struct davinci_nand_pdata
603 	*nand_davinci_get_pdata(struct platform_device *pdev)
604 {
605 	return dev_get_platdata(&pdev->dev);
606 }
607 #endif
608 
609 static int nand_davinci_probe(struct platform_device *pdev)
610 {
611 	struct davinci_nand_pdata	*pdata;
612 	struct davinci_nand_info	*info;
613 	struct resource			*res1;
614 	struct resource			*res2;
615 	void __iomem			*vaddr;
616 	void __iomem			*base;
617 	int				ret;
618 	uint32_t			val;
619 	struct mtd_info			*mtd;
620 
621 	pdata = nand_davinci_get_pdata(pdev);
622 	if (IS_ERR(pdata))
623 		return PTR_ERR(pdata);
624 
625 	/* insist on board-specific configuration */
626 	if (!pdata)
627 		return -ENODEV;
628 
629 	/* which external chipselect will we be managing? */
630 	if (pdata->core_chipsel < 0 || pdata->core_chipsel > 3)
631 		return -ENODEV;
632 
633 	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
634 	if (!info)
635 		return -ENOMEM;
636 
637 	platform_set_drvdata(pdev, info);
638 
639 	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
640 	res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
641 	if (!res1 || !res2) {
642 		dev_err(&pdev->dev, "resource missing\n");
643 		return -EINVAL;
644 	}
645 
646 	vaddr = devm_ioremap_resource(&pdev->dev, res1);
647 	if (IS_ERR(vaddr))
648 		return PTR_ERR(vaddr);
649 
650 	/*
651 	 * This registers range is used to setup NAND settings. In case with
652 	 * TI AEMIF driver, the same memory address range is requested already
653 	 * by AEMIF, so we cannot request it twice, just ioremap.
654 	 * The AEMIF and NAND drivers not use the same registers in this range.
655 	 */
656 	base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
657 	if (!base) {
658 		dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
659 		return -EADDRNOTAVAIL;
660 	}
661 
662 	info->dev		= &pdev->dev;
663 	info->base		= base;
664 	info->vaddr		= vaddr;
665 
666 	mtd			= nand_to_mtd(&info->chip);
667 	mtd->dev.parent		= &pdev->dev;
668 	nand_set_flash_node(&info->chip, pdev->dev.of_node);
669 
670 	info->chip.IO_ADDR_R	= vaddr;
671 	info->chip.IO_ADDR_W	= vaddr;
672 	info->chip.chip_delay	= 0;
673 	info->chip.select_chip	= nand_davinci_select_chip;
674 
675 	/* options such as NAND_BBT_USE_FLASH */
676 	info->chip.bbt_options	= pdata->bbt_options;
677 	/* options such as 16-bit widths */
678 	info->chip.options	= pdata->options;
679 	info->chip.bbt_td	= pdata->bbt_td;
680 	info->chip.bbt_md	= pdata->bbt_md;
681 	info->timing		= pdata->timing;
682 
683 	info->ioaddr		= (uint32_t __force) vaddr;
684 
685 	info->current_cs	= info->ioaddr;
686 	info->core_chipsel	= pdata->core_chipsel;
687 	info->mask_chipsel	= pdata->mask_chipsel;
688 
689 	/* use nandboot-capable ALE/CLE masks by default */
690 	info->mask_ale		= pdata->mask_ale ? : MASK_ALE;
691 	info->mask_cle		= pdata->mask_cle ? : MASK_CLE;
692 
693 	/* Set address of hardware control function */
694 	info->chip.cmd_ctrl	= nand_davinci_hwcontrol;
695 	info->chip.dev_ready	= nand_davinci_dev_ready;
696 
697 	/* Speed up buffer I/O */
698 	info->chip.read_buf     = nand_davinci_read_buf;
699 	info->chip.write_buf    = nand_davinci_write_buf;
700 
701 	/* Use board-specific ECC config */
702 	info->chip.ecc.mode	= pdata->ecc_mode;
703 
704 	spin_lock_irq(&davinci_nand_lock);
705 
706 	/* put CSxNAND into NAND mode */
707 	val = davinci_nand_readl(info, NANDFCR_OFFSET);
708 	val |= BIT(info->core_chipsel);
709 	davinci_nand_writel(info, NANDFCR_OFFSET, val);
710 
711 	spin_unlock_irq(&davinci_nand_lock);
712 
713 	/* Scan to find existence of the device(s) */
714 	ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
715 	if (ret < 0) {
716 		dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
717 		return ret;
718 	}
719 
720 	switch (info->chip.ecc.mode) {
721 	case NAND_ECC_NONE:
722 		pdata->ecc_bits = 0;
723 		break;
724 	case NAND_ECC_SOFT:
725 		pdata->ecc_bits = 0;
726 		/*
727 		 * This driver expects Hamming based ECC when ecc_mode is set
728 		 * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
729 		 * avoid adding an extra ->ecc_algo field to
730 		 * davinci_nand_pdata.
731 		 */
732 		info->chip.ecc.algo = NAND_ECC_HAMMING;
733 		break;
734 	case NAND_ECC_HW:
735 		if (pdata->ecc_bits == 4) {
736 			/* No sanity checks:  CPUs must support this,
737 			 * and the chips may not use NAND_BUSWIDTH_16.
738 			 */
739 
740 			/* No sharing 4-bit hardware between chipselects yet */
741 			spin_lock_irq(&davinci_nand_lock);
742 			if (ecc4_busy)
743 				ret = -EBUSY;
744 			else
745 				ecc4_busy = true;
746 			spin_unlock_irq(&davinci_nand_lock);
747 
748 			if (ret == -EBUSY)
749 				return ret;
750 
751 			info->chip.ecc.calculate = nand_davinci_calculate_4bit;
752 			info->chip.ecc.correct = nand_davinci_correct_4bit;
753 			info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
754 			info->chip.ecc.bytes = 10;
755 			info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
756 			info->chip.ecc.algo = NAND_ECC_BCH;
757 		} else {
758 			/* 1bit ecc hamming */
759 			info->chip.ecc.calculate = nand_davinci_calculate_1bit;
760 			info->chip.ecc.correct = nand_davinci_correct_1bit;
761 			info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
762 			info->chip.ecc.bytes = 3;
763 			info->chip.ecc.algo = NAND_ECC_HAMMING;
764 		}
765 		info->chip.ecc.size = 512;
766 		info->chip.ecc.strength = pdata->ecc_bits;
767 		break;
768 	default:
769 		return -EINVAL;
770 	}
771 
772 	/* Update ECC layout if needed ... for 1-bit HW ECC, the default
773 	 * is OK, but it allocates 6 bytes when only 3 are needed (for
774 	 * each 512 bytes).  For the 4-bit HW ECC, that default is not
775 	 * usable:  10 bytes are needed, not 6.
776 	 */
777 	if (pdata->ecc_bits == 4) {
778 		int	chunks = mtd->writesize / 512;
779 
780 		if (!chunks || mtd->oobsize < 16) {
781 			dev_dbg(&pdev->dev, "too small\n");
782 			ret = -EINVAL;
783 			goto err;
784 		}
785 
786 		/* For small page chips, preserve the manufacturer's
787 		 * badblock marking data ... and make sure a flash BBT
788 		 * table marker fits in the free bytes.
789 		 */
790 		if (chunks == 1) {
791 			mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
792 		} else if (chunks == 4 || chunks == 8) {
793 			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
794 			info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
795 		} else {
796 			ret = -EIO;
797 			goto err;
798 		}
799 	}
800 
801 	ret = nand_scan_tail(mtd);
802 	if (ret < 0)
803 		goto err;
804 
805 	if (pdata->parts)
806 		ret = mtd_device_parse_register(mtd, NULL, NULL,
807 					pdata->parts, pdata->nr_parts);
808 	else
809 		ret = mtd_device_register(mtd, NULL, 0);
810 	if (ret < 0)
811 		goto err_cleanup_nand;
812 
813 	val = davinci_nand_readl(info, NRCSR_OFFSET);
814 	dev_info(&pdev->dev, "controller rev. %d.%d\n",
815 	       (val >> 8) & 0xff, val & 0xff);
816 
817 	return 0;
818 
819 err_cleanup_nand:
820 	nand_cleanup(&info->chip);
821 
822 err:
823 	spin_lock_irq(&davinci_nand_lock);
824 	if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
825 		ecc4_busy = false;
826 	spin_unlock_irq(&davinci_nand_lock);
827 	return ret;
828 }
829 
830 static int nand_davinci_remove(struct platform_device *pdev)
831 {
832 	struct davinci_nand_info *info = platform_get_drvdata(pdev);
833 
834 	spin_lock_irq(&davinci_nand_lock);
835 	if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
836 		ecc4_busy = false;
837 	spin_unlock_irq(&davinci_nand_lock);
838 
839 	nand_release(nand_to_mtd(&info->chip));
840 
841 	return 0;
842 }
843 
844 static struct platform_driver nand_davinci_driver = {
845 	.probe		= nand_davinci_probe,
846 	.remove		= nand_davinci_remove,
847 	.driver		= {
848 		.name	= "davinci_nand",
849 		.of_match_table = of_match_ptr(davinci_nand_of_match),
850 	},
851 };
852 MODULE_ALIAS("platform:davinci_nand");
853 
854 module_platform_driver(nand_davinci_driver);
855 
856 MODULE_LICENSE("GPL");
857 MODULE_AUTHOR("Texas Instruments");
858 MODULE_DESCRIPTION("Davinci NAND flash driver");
859 
860