xref: /openbmc/linux/drivers/mtd/nand/raw/omap2.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
4   * Copyright © 2004 Micron Technology Inc.
5   * Copyright © 2004 David Brownell
6   */
7  
8  #include <linux/platform_device.h>
9  #include <linux/dmaengine.h>
10  #include <linux/dma-mapping.h>
11  #include <linux/delay.h>
12  #include <linux/gpio/consumer.h>
13  #include <linux/module.h>
14  #include <linux/interrupt.h>
15  #include <linux/jiffies.h>
16  #include <linux/sched.h>
17  #include <linux/mtd/mtd.h>
18  #include <linux/mtd/nand-ecc-sw-bch.h>
19  #include <linux/mtd/rawnand.h>
20  #include <linux/mtd/partitions.h>
21  #include <linux/omap-dma.h>
22  #include <linux/iopoll.h>
23  #include <linux/slab.h>
24  #include <linux/of.h>
25  #include <linux/of_platform.h>
26  
27  #include <linux/platform_data/elm.h>
28  
29  #include <linux/omap-gpmc.h>
30  #include <linux/platform_data/mtd-nand-omap2.h>
31  
32  #define	DRIVER_NAME	"omap2-nand"
33  #define	OMAP_NAND_TIMEOUT_MS	5000
34  
35  #define NAND_Ecc_P1e		(1 << 0)
36  #define NAND_Ecc_P2e		(1 << 1)
37  #define NAND_Ecc_P4e		(1 << 2)
38  #define NAND_Ecc_P8e		(1 << 3)
39  #define NAND_Ecc_P16e		(1 << 4)
40  #define NAND_Ecc_P32e		(1 << 5)
41  #define NAND_Ecc_P64e		(1 << 6)
42  #define NAND_Ecc_P128e		(1 << 7)
43  #define NAND_Ecc_P256e		(1 << 8)
44  #define NAND_Ecc_P512e		(1 << 9)
45  #define NAND_Ecc_P1024e		(1 << 10)
46  #define NAND_Ecc_P2048e		(1 << 11)
47  
48  #define NAND_Ecc_P1o		(1 << 16)
49  #define NAND_Ecc_P2o		(1 << 17)
50  #define NAND_Ecc_P4o		(1 << 18)
51  #define NAND_Ecc_P8o		(1 << 19)
52  #define NAND_Ecc_P16o		(1 << 20)
53  #define NAND_Ecc_P32o		(1 << 21)
54  #define NAND_Ecc_P64o		(1 << 22)
55  #define NAND_Ecc_P128o		(1 << 23)
56  #define NAND_Ecc_P256o		(1 << 24)
57  #define NAND_Ecc_P512o		(1 << 25)
58  #define NAND_Ecc_P1024o		(1 << 26)
59  #define NAND_Ecc_P2048o		(1 << 27)
60  
61  #define TF(value)	(value ? 1 : 0)
62  
63  #define P2048e(a)	(TF(a & NAND_Ecc_P2048e)	<< 0)
64  #define P2048o(a)	(TF(a & NAND_Ecc_P2048o)	<< 1)
65  #define P1e(a)		(TF(a & NAND_Ecc_P1e)		<< 2)
66  #define P1o(a)		(TF(a & NAND_Ecc_P1o)		<< 3)
67  #define P2e(a)		(TF(a & NAND_Ecc_P2e)		<< 4)
68  #define P2o(a)		(TF(a & NAND_Ecc_P2o)		<< 5)
69  #define P4e(a)		(TF(a & NAND_Ecc_P4e)		<< 6)
70  #define P4o(a)		(TF(a & NAND_Ecc_P4o)		<< 7)
71  
72  #define P8e(a)		(TF(a & NAND_Ecc_P8e)		<< 0)
73  #define P8o(a)		(TF(a & NAND_Ecc_P8o)		<< 1)
74  #define P16e(a)		(TF(a & NAND_Ecc_P16e)		<< 2)
75  #define P16o(a)		(TF(a & NAND_Ecc_P16o)		<< 3)
76  #define P32e(a)		(TF(a & NAND_Ecc_P32e)		<< 4)
77  #define P32o(a)		(TF(a & NAND_Ecc_P32o)		<< 5)
78  #define P64e(a)		(TF(a & NAND_Ecc_P64e)		<< 6)
79  #define P64o(a)		(TF(a & NAND_Ecc_P64o)		<< 7)
80  
81  #define P128e(a)	(TF(a & NAND_Ecc_P128e)		<< 0)
82  #define P128o(a)	(TF(a & NAND_Ecc_P128o)		<< 1)
83  #define P256e(a)	(TF(a & NAND_Ecc_P256e)		<< 2)
84  #define P256o(a)	(TF(a & NAND_Ecc_P256o)		<< 3)
85  #define P512e(a)	(TF(a & NAND_Ecc_P512e)		<< 4)
86  #define P512o(a)	(TF(a & NAND_Ecc_P512o)		<< 5)
87  #define P1024e(a)	(TF(a & NAND_Ecc_P1024e)	<< 6)
88  #define P1024o(a)	(TF(a & NAND_Ecc_P1024o)	<< 7)
89  
90  #define P8e_s(a)	(TF(a & NAND_Ecc_P8e)		<< 0)
91  #define P8o_s(a)	(TF(a & NAND_Ecc_P8o)		<< 1)
92  #define P16e_s(a)	(TF(a & NAND_Ecc_P16e)		<< 2)
93  #define P16o_s(a)	(TF(a & NAND_Ecc_P16o)		<< 3)
94  #define P1e_s(a)	(TF(a & NAND_Ecc_P1e)		<< 4)
95  #define P1o_s(a)	(TF(a & NAND_Ecc_P1o)		<< 5)
96  #define P2e_s(a)	(TF(a & NAND_Ecc_P2e)		<< 6)
97  #define P2o_s(a)	(TF(a & NAND_Ecc_P2o)		<< 7)
98  
99  #define P4e_s(a)	(TF(a & NAND_Ecc_P4e)		<< 0)
100  #define P4o_s(a)	(TF(a & NAND_Ecc_P4o)		<< 1)
101  
102  #define	PREFETCH_CONFIG1_CS_SHIFT	24
103  #define	ECC_CONFIG_CS_SHIFT		1
104  #define	CS_MASK				0x7
105  #define	ENABLE_PREFETCH			(0x1 << 7)
106  #define	DMA_MPU_MODE_SHIFT		2
107  #define	ECCSIZE0_SHIFT			12
108  #define	ECCSIZE1_SHIFT			22
109  #define	ECC1RESULTSIZE			0x1
110  #define	ECCCLEAR			0x100
111  #define	ECC1				0x1
112  #define	PREFETCH_FIFOTHRESHOLD_MAX	0x40
113  #define	PREFETCH_FIFOTHRESHOLD(val)	((val) << 8)
114  #define	PREFETCH_STATUS_COUNT(val)	(val & 0x00003fff)
115  #define	PREFETCH_STATUS_FIFO_CNT(val)	((val >> 24) & 0x7F)
116  #define	STATUS_BUFF_EMPTY		0x00000001
117  
118  #define SECTOR_BYTES		512
119  /* 4 bit padding to make byte aligned, 56 = 52 + 4 */
120  #define BCH4_BIT_PAD		4
121  
122  /* GPMC ecc engine settings for read */
123  #define BCH_WRAPMODE_1		1	/* BCH wrap mode 1 */
124  #define BCH8R_ECC_SIZE0		0x1a	/* ecc_size0 = 26 */
125  #define BCH8R_ECC_SIZE1		0x2	/* ecc_size1 = 2 */
126  #define BCH4R_ECC_SIZE0		0xd	/* ecc_size0 = 13 */
127  #define BCH4R_ECC_SIZE1		0x3	/* ecc_size1 = 3 */
128  
129  /* GPMC ecc engine settings for write */
130  #define BCH_WRAPMODE_6		6	/* BCH wrap mode 6 */
131  #define BCH_ECC_SIZE0		0x0	/* ecc_size0 = 0, no oob protection */
132  #define BCH_ECC_SIZE1		0x20	/* ecc_size1 = 32 */
133  
134  #define BBM_LEN			2
135  
136  static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55,
137  				0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78,
138  				0x48, 0x76, 0xa9, 0x3b, 0x97, 0xd1, 0x7a, 0x93,
139  				0x07, 0x0e};
140  static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
141  	0xac, 0x6b, 0xff, 0x99, 0x7b};
142  static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
143  
144  struct omap_nand_info {
145  	struct nand_chip		nand;
146  	struct platform_device		*pdev;
147  
148  	int				gpmc_cs;
149  	bool				dev_ready;
150  	enum nand_io			xfer_type;
151  	enum omap_ecc			ecc_opt;
152  	struct device_node		*elm_of_node;
153  
154  	unsigned long			phys_base;
155  	struct completion		comp;
156  	struct dma_chan			*dma;
157  	int				gpmc_irq_fifo;
158  	int				gpmc_irq_count;
159  	enum {
160  		OMAP_NAND_IO_READ = 0,	/* read */
161  		OMAP_NAND_IO_WRITE,	/* write */
162  	} iomode;
163  	u_char				*buf;
164  	int					buf_len;
165  	/* Interface to GPMC */
166  	void __iomem			*fifo;
167  	struct gpmc_nand_regs		reg;
168  	struct gpmc_nand_ops		*ops;
169  	bool				flash_bbt;
170  	/* fields specific for BCHx_HW ECC scheme */
171  	struct device			*elm_dev;
172  	/* NAND ready gpio */
173  	struct gpio_desc		*ready_gpiod;
174  	unsigned int			neccpg;
175  	unsigned int			nsteps_per_eccpg;
176  	unsigned int			eccpg_size;
177  	unsigned int			eccpg_bytes;
178  	void (*data_in)(struct nand_chip *chip, void *buf,
179  			unsigned int len, bool force_8bit);
180  	void (*data_out)(struct nand_chip *chip,
181  			 const void *buf, unsigned int len,
182  			 bool force_8bit);
183  };
184  
mtd_to_omap(struct mtd_info * mtd)185  static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
186  {
187  	return container_of(mtd_to_nand(mtd), struct omap_nand_info, nand);
188  }
189  
190  static void omap_nand_data_in(struct nand_chip *chip, void *buf,
191  			      unsigned int len, bool force_8bit);
192  
193  static void omap_nand_data_out(struct nand_chip *chip,
194  			       const void *buf, unsigned int len,
195  			       bool force_8bit);
196  
197  /**
198   * omap_prefetch_enable - configures and starts prefetch transfer
199   * @cs: cs (chip select) number
200   * @fifo_th: fifo threshold to be used for read/ write
201   * @dma_mode: dma mode enable (1) or disable (0)
202   * @u32_count: number of bytes to be transferred
203   * @is_write: prefetch read(0) or write post(1) mode
204   * @info: NAND device structure containing platform data
205   */
omap_prefetch_enable(int cs,int fifo_th,int dma_mode,unsigned int u32_count,int is_write,struct omap_nand_info * info)206  static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
207  	unsigned int u32_count, int is_write, struct omap_nand_info *info)
208  {
209  	u32 val;
210  
211  	if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
212  		return -1;
213  
214  	if (readl(info->reg.gpmc_prefetch_control))
215  		return -EBUSY;
216  
217  	/* Set the amount of bytes to be prefetched */
218  	writel(u32_count, info->reg.gpmc_prefetch_config2);
219  
220  	/* Set dma/mpu mode, the prefetch read / post write and
221  	 * enable the engine. Set which cs is has requested for.
222  	 */
223  	val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
224  		PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
225  		(dma_mode << DMA_MPU_MODE_SHIFT) | (is_write & 0x1));
226  	writel(val, info->reg.gpmc_prefetch_config1);
227  
228  	/*  Start the prefetch engine */
229  	writel(0x1, info->reg.gpmc_prefetch_control);
230  
231  	return 0;
232  }
233  
234  /*
235   * omap_prefetch_reset - disables and stops the prefetch engine
236   */
omap_prefetch_reset(int cs,struct omap_nand_info * info)237  static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
238  {
239  	u32 config1;
240  
241  	/* check if the same module/cs is trying to reset */
242  	config1 = readl(info->reg.gpmc_prefetch_config1);
243  	if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
244  		return -EINVAL;
245  
246  	/* Stop the PFPW engine */
247  	writel(0x0, info->reg.gpmc_prefetch_control);
248  
249  	/* Reset/disable the PFPW engine */
250  	writel(0x0, info->reg.gpmc_prefetch_config1);
251  
252  	return 0;
253  }
254  
255  /**
256   * omap_nand_data_in_pref - NAND data in using prefetch engine
257   */
omap_nand_data_in_pref(struct nand_chip * chip,void * buf,unsigned int len,bool force_8bit)258  static void omap_nand_data_in_pref(struct nand_chip *chip, void *buf,
259  				   unsigned int len, bool force_8bit)
260  {
261  	struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
262  	uint32_t r_count = 0;
263  	int ret = 0;
264  	u32 *p = (u32 *)buf;
265  	unsigned int pref_len;
266  
267  	if (force_8bit) {
268  		omap_nand_data_in(chip, buf, len, force_8bit);
269  		return;
270  	}
271  
272  	/* read 32-bit words using prefetch and remaining bytes normally */
273  
274  	/* configure and start prefetch transfer */
275  	pref_len = len - (len & 3);
276  	ret = omap_prefetch_enable(info->gpmc_cs,
277  			PREFETCH_FIFOTHRESHOLD_MAX, 0x0, pref_len, 0x0, info);
278  	if (ret) {
279  		/* prefetch engine is busy, use CPU copy method */
280  		omap_nand_data_in(chip, buf, len, false);
281  	} else {
282  		do {
283  			r_count = readl(info->reg.gpmc_prefetch_status);
284  			r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
285  			r_count = r_count >> 2;
286  			ioread32_rep(info->fifo, p, r_count);
287  			p += r_count;
288  			pref_len -= r_count << 2;
289  		} while (pref_len);
290  		/* disable and stop the Prefetch engine */
291  		omap_prefetch_reset(info->gpmc_cs, info);
292  		/* fetch any remaining bytes */
293  		if (len & 3)
294  			omap_nand_data_in(chip, p, len & 3, false);
295  	}
296  }
297  
298  /**
299   * omap_nand_data_out_pref - NAND data out using Write Posting engine
300   */
omap_nand_data_out_pref(struct nand_chip * chip,const void * buf,unsigned int len,bool force_8bit)301  static void omap_nand_data_out_pref(struct nand_chip *chip,
302  				    const void *buf, unsigned int len,
303  				    bool force_8bit)
304  {
305  	struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
306  	uint32_t w_count = 0;
307  	int i = 0, ret = 0;
308  	u16 *p = (u16 *)buf;
309  	unsigned long tim, limit;
310  	u32 val;
311  
312  	if (force_8bit) {
313  		omap_nand_data_out(chip, buf, len, force_8bit);
314  		return;
315  	}
316  
317  	/* take care of subpage writes */
318  	if (len % 2 != 0) {
319  		writeb(*(u8 *)buf, info->fifo);
320  		p = (u16 *)(buf + 1);
321  		len--;
322  	}
323  
324  	/*  configure and start prefetch transfer */
325  	ret = omap_prefetch_enable(info->gpmc_cs,
326  			PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
327  	if (ret) {
328  		/* write posting engine is busy, use CPU copy method */
329  		omap_nand_data_out(chip, buf, len, false);
330  	} else {
331  		while (len) {
332  			w_count = readl(info->reg.gpmc_prefetch_status);
333  			w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
334  			w_count = w_count >> 1;
335  			for (i = 0; (i < w_count) && len; i++, len -= 2)
336  				iowrite16(*p++, info->fifo);
337  		}
338  		/* wait for data to flushed-out before reset the prefetch */
339  		tim = 0;
340  		limit = (loops_per_jiffy *
341  					msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
342  		do {
343  			cpu_relax();
344  			val = readl(info->reg.gpmc_prefetch_status);
345  			val = PREFETCH_STATUS_COUNT(val);
346  		} while (val && (tim++ < limit));
347  
348  		/* disable and stop the PFPW engine */
349  		omap_prefetch_reset(info->gpmc_cs, info);
350  	}
351  }
352  
353  /*
354   * omap_nand_dma_callback: callback on the completion of dma transfer
355   * @data: pointer to completion data structure
356   */
omap_nand_dma_callback(void * data)357  static void omap_nand_dma_callback(void *data)
358  {
359  	complete((struct completion *) data);
360  }
361  
362  /*
363   * omap_nand_dma_transfer: configure and start dma transfer
364   * @chip: nand chip structure
365   * @addr: virtual address in RAM of source/destination
366   * @len: number of data bytes to be transferred
367   * @is_write: flag for read/write operation
368   */
omap_nand_dma_transfer(struct nand_chip * chip,const void * addr,unsigned int len,int is_write)369  static inline int omap_nand_dma_transfer(struct nand_chip *chip,
370  					 const void *addr, unsigned int len,
371  					 int is_write)
372  {
373  	struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
374  	struct dma_async_tx_descriptor *tx;
375  	enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
376  							DMA_FROM_DEVICE;
377  	struct scatterlist sg;
378  	unsigned long tim, limit;
379  	unsigned n;
380  	int ret;
381  	u32 val;
382  
383  	if (!virt_addr_valid(addr))
384  		goto out_copy;
385  
386  	sg_init_one(&sg, addr, len);
387  	n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
388  	if (n == 0) {
389  		dev_err(&info->pdev->dev,
390  			"Couldn't DMA map a %d byte buffer\n", len);
391  		goto out_copy;
392  	}
393  
394  	tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
395  		is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
396  		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
397  	if (!tx)
398  		goto out_copy_unmap;
399  
400  	tx->callback = omap_nand_dma_callback;
401  	tx->callback_param = &info->comp;
402  	dmaengine_submit(tx);
403  
404  	init_completion(&info->comp);
405  
406  	/* setup and start DMA using dma_addr */
407  	dma_async_issue_pending(info->dma);
408  
409  	/*  configure and start prefetch transfer */
410  	ret = omap_prefetch_enable(info->gpmc_cs,
411  		PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
412  	if (ret)
413  		/* PFPW engine is busy, use cpu copy method */
414  		goto out_copy_unmap;
415  
416  	wait_for_completion(&info->comp);
417  	tim = 0;
418  	limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
419  
420  	do {
421  		cpu_relax();
422  		val = readl(info->reg.gpmc_prefetch_status);
423  		val = PREFETCH_STATUS_COUNT(val);
424  	} while (val && (tim++ < limit));
425  
426  	/* disable and stop the PFPW engine */
427  	omap_prefetch_reset(info->gpmc_cs, info);
428  
429  	dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
430  	return 0;
431  
432  out_copy_unmap:
433  	dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
434  out_copy:
435  	is_write == 0 ? omap_nand_data_in(chip, (void *)addr, len, false)
436  		      : omap_nand_data_out(chip, addr, len, false);
437  
438  	return 0;
439  }
440  
441  /**
442   * omap_nand_data_in_dma_pref - NAND data in using DMA and Prefetch
443   */
omap_nand_data_in_dma_pref(struct nand_chip * chip,void * buf,unsigned int len,bool force_8bit)444  static void omap_nand_data_in_dma_pref(struct nand_chip *chip, void *buf,
445  				       unsigned int len, bool force_8bit)
446  {
447  	struct mtd_info *mtd = nand_to_mtd(chip);
448  
449  	if (force_8bit) {
450  		omap_nand_data_in(chip, buf, len, force_8bit);
451  		return;
452  	}
453  
454  	if (len <= mtd->oobsize)
455  		omap_nand_data_in_pref(chip, buf, len, false);
456  	else
457  		/* start transfer in DMA mode */
458  		omap_nand_dma_transfer(chip, buf, len, 0x0);
459  }
460  
461  /**
462   * omap_nand_data_out_dma_pref - NAND data out using DMA and write posting
463   */
omap_nand_data_out_dma_pref(struct nand_chip * chip,const void * buf,unsigned int len,bool force_8bit)464  static void omap_nand_data_out_dma_pref(struct nand_chip *chip,
465  					const void *buf, unsigned int len,
466  					bool force_8bit)
467  {
468  	struct mtd_info *mtd = nand_to_mtd(chip);
469  
470  	if (force_8bit) {
471  		omap_nand_data_out(chip, buf, len, force_8bit);
472  		return;
473  	}
474  
475  	if (len <= mtd->oobsize)
476  		omap_nand_data_out_pref(chip, buf, len, false);
477  	else
478  		/* start transfer in DMA mode */
479  		omap_nand_dma_transfer(chip, buf, len, 0x1);
480  }
481  
482  /*
483   * omap_nand_irq - GPMC irq handler
484   * @this_irq: gpmc irq number
485   * @dev: omap_nand_info structure pointer is passed here
486   */
omap_nand_irq(int this_irq,void * dev)487  static irqreturn_t omap_nand_irq(int this_irq, void *dev)
488  {
489  	struct omap_nand_info *info = (struct omap_nand_info *) dev;
490  	u32 bytes;
491  
492  	bytes = readl(info->reg.gpmc_prefetch_status);
493  	bytes = PREFETCH_STATUS_FIFO_CNT(bytes);
494  	bytes = bytes  & 0xFFFC; /* io in multiple of 4 bytes */
495  	if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
496  		if (this_irq == info->gpmc_irq_count)
497  			goto done;
498  
499  		if (info->buf_len && (info->buf_len < bytes))
500  			bytes = info->buf_len;
501  		else if (!info->buf_len)
502  			bytes = 0;
503  		iowrite32_rep(info->fifo, (u32 *)info->buf,
504  			      bytes >> 2);
505  		info->buf = info->buf + bytes;
506  		info->buf_len -= bytes;
507  
508  	} else {
509  		ioread32_rep(info->fifo, (u32 *)info->buf,
510  			     bytes >> 2);
511  		info->buf = info->buf + bytes;
512  
513  		if (this_irq == info->gpmc_irq_count)
514  			goto done;
515  	}
516  
517  	return IRQ_HANDLED;
518  
519  done:
520  	complete(&info->comp);
521  
522  	disable_irq_nosync(info->gpmc_irq_fifo);
523  	disable_irq_nosync(info->gpmc_irq_count);
524  
525  	return IRQ_HANDLED;
526  }
527  
528  /*
529   * omap_nand_data_in_irq_pref - NAND data in using Prefetch and IRQ
530   */
omap_nand_data_in_irq_pref(struct nand_chip * chip,void * buf,unsigned int len,bool force_8bit)531  static void omap_nand_data_in_irq_pref(struct nand_chip *chip, void *buf,
532  				       unsigned int len, bool force_8bit)
533  {
534  	struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
535  	struct mtd_info *mtd = nand_to_mtd(&info->nand);
536  	int ret = 0;
537  
538  	if (len <= mtd->oobsize || force_8bit) {
539  		omap_nand_data_in(chip, buf, len, force_8bit);
540  		return;
541  	}
542  
543  	info->iomode = OMAP_NAND_IO_READ;
544  	info->buf = buf;
545  	init_completion(&info->comp);
546  
547  	/*  configure and start prefetch transfer */
548  	ret = omap_prefetch_enable(info->gpmc_cs,
549  			PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
550  	if (ret) {
551  		/* PFPW engine is busy, use cpu copy method */
552  		omap_nand_data_in(chip, buf, len, false);
553  		return;
554  	}
555  
556  	info->buf_len = len;
557  
558  	enable_irq(info->gpmc_irq_count);
559  	enable_irq(info->gpmc_irq_fifo);
560  
561  	/* waiting for read to complete */
562  	wait_for_completion(&info->comp);
563  
564  	/* disable and stop the PFPW engine */
565  	omap_prefetch_reset(info->gpmc_cs, info);
566  	return;
567  }
568  
569  /*
570   * omap_nand_data_out_irq_pref - NAND out using write posting and IRQ
571   */
omap_nand_data_out_irq_pref(struct nand_chip * chip,const void * buf,unsigned int len,bool force_8bit)572  static void omap_nand_data_out_irq_pref(struct nand_chip *chip,
573  					const void *buf, unsigned int len,
574  					bool force_8bit)
575  {
576  	struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
577  	struct mtd_info *mtd = nand_to_mtd(&info->nand);
578  	int ret = 0;
579  	unsigned long tim, limit;
580  	u32 val;
581  
582  	if (len <= mtd->oobsize || force_8bit) {
583  		omap_nand_data_out(chip, buf, len, force_8bit);
584  		return;
585  	}
586  
587  	info->iomode = OMAP_NAND_IO_WRITE;
588  	info->buf = (u_char *) buf;
589  	init_completion(&info->comp);
590  
591  	/* configure and start prefetch transfer : size=24 */
592  	ret = omap_prefetch_enable(info->gpmc_cs,
593  		(PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
594  	if (ret) {
595  		/* PFPW engine is busy, use cpu copy method */
596  		omap_nand_data_out(chip, buf, len, false);
597  		return;
598  	}
599  
600  	info->buf_len = len;
601  
602  	enable_irq(info->gpmc_irq_count);
603  	enable_irq(info->gpmc_irq_fifo);
604  
605  	/* waiting for write to complete */
606  	wait_for_completion(&info->comp);
607  
608  	/* wait for data to flushed-out before reset the prefetch */
609  	tim = 0;
610  	limit = (loops_per_jiffy *  msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
611  	do {
612  		val = readl(info->reg.gpmc_prefetch_status);
613  		val = PREFETCH_STATUS_COUNT(val);
614  		cpu_relax();
615  	} while (val && (tim++ < limit));
616  
617  	/* disable and stop the PFPW engine */
618  	omap_prefetch_reset(info->gpmc_cs, info);
619  	return;
620  }
621  
622  /**
623   * gen_true_ecc - This function will generate true ECC value
624   * @ecc_buf: buffer to store ecc code
625   *
626   * This generated true ECC value can be used when correcting
627   * data read from NAND flash memory core
628   */
gen_true_ecc(u8 * ecc_buf)629  static void gen_true_ecc(u8 *ecc_buf)
630  {
631  	u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
632  		((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
633  
634  	ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
635  			P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
636  	ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
637  			P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
638  	ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
639  			P1e(tmp) | P2048o(tmp) | P2048e(tmp));
640  }
641  
642  /**
643   * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
644   * @ecc_data1:  ecc code from nand spare area
645   * @ecc_data2:  ecc code from hardware register obtained from hardware ecc
646   * @page_data:  page data
647   *
648   * This function compares two ECC's and indicates if there is an error.
649   * If the error can be corrected it will be corrected to the buffer.
650   * If there is no error, %0 is returned. If there is an error but it
651   * was corrected, %1 is returned. Otherwise, %-1 is returned.
652   */
omap_compare_ecc(u8 * ecc_data1,u8 * ecc_data2,u8 * page_data)653  static int omap_compare_ecc(u8 *ecc_data1,	/* read from NAND memory */
654  			    u8 *ecc_data2,	/* read from register */
655  			    u8 *page_data)
656  {
657  	uint	i;
658  	u8	tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
659  	u8	comp0_bit[8], comp1_bit[8], comp2_bit[8];
660  	u8	ecc_bit[24];
661  	u8	ecc_sum = 0;
662  	u8	find_bit = 0;
663  	uint	find_byte = 0;
664  	int	isEccFF;
665  
666  	isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
667  
668  	gen_true_ecc(ecc_data1);
669  	gen_true_ecc(ecc_data2);
670  
671  	for (i = 0; i <= 2; i++) {
672  		*(ecc_data1 + i) = ~(*(ecc_data1 + i));
673  		*(ecc_data2 + i) = ~(*(ecc_data2 + i));
674  	}
675  
676  	for (i = 0; i < 8; i++) {
677  		tmp0_bit[i]     = *ecc_data1 % 2;
678  		*ecc_data1	= *ecc_data1 / 2;
679  	}
680  
681  	for (i = 0; i < 8; i++) {
682  		tmp1_bit[i]	 = *(ecc_data1 + 1) % 2;
683  		*(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
684  	}
685  
686  	for (i = 0; i < 8; i++) {
687  		tmp2_bit[i]	 = *(ecc_data1 + 2) % 2;
688  		*(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
689  	}
690  
691  	for (i = 0; i < 8; i++) {
692  		comp0_bit[i]     = *ecc_data2 % 2;
693  		*ecc_data2       = *ecc_data2 / 2;
694  	}
695  
696  	for (i = 0; i < 8; i++) {
697  		comp1_bit[i]     = *(ecc_data2 + 1) % 2;
698  		*(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
699  	}
700  
701  	for (i = 0; i < 8; i++) {
702  		comp2_bit[i]     = *(ecc_data2 + 2) % 2;
703  		*(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
704  	}
705  
706  	for (i = 0; i < 6; i++)
707  		ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
708  
709  	for (i = 0; i < 8; i++)
710  		ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
711  
712  	for (i = 0; i < 8; i++)
713  		ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
714  
715  	ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
716  	ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
717  
718  	for (i = 0; i < 24; i++)
719  		ecc_sum += ecc_bit[i];
720  
721  	switch (ecc_sum) {
722  	case 0:
723  		/* Not reached because this function is not called if
724  		 *  ECC values are equal
725  		 */
726  		return 0;
727  
728  	case 1:
729  		/* Uncorrectable error */
730  		pr_debug("ECC UNCORRECTED_ERROR 1\n");
731  		return -EBADMSG;
732  
733  	case 11:
734  		/* UN-Correctable error */
735  		pr_debug("ECC UNCORRECTED_ERROR B\n");
736  		return -EBADMSG;
737  
738  	case 12:
739  		/* Correctable error */
740  		find_byte = (ecc_bit[23] << 8) +
741  			    (ecc_bit[21] << 7) +
742  			    (ecc_bit[19] << 6) +
743  			    (ecc_bit[17] << 5) +
744  			    (ecc_bit[15] << 4) +
745  			    (ecc_bit[13] << 3) +
746  			    (ecc_bit[11] << 2) +
747  			    (ecc_bit[9]  << 1) +
748  			    ecc_bit[7];
749  
750  		find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
751  
752  		pr_debug("Correcting single bit ECC error at offset: "
753  				"%d, bit: %d\n", find_byte, find_bit);
754  
755  		page_data[find_byte] ^= (1 << find_bit);
756  
757  		return 1;
758  	default:
759  		if (isEccFF) {
760  			if (ecc_data2[0] == 0 &&
761  			    ecc_data2[1] == 0 &&
762  			    ecc_data2[2] == 0)
763  				return 0;
764  		}
765  		pr_debug("UNCORRECTED_ERROR default\n");
766  		return -EBADMSG;
767  	}
768  }
769  
770  /**
771   * omap_correct_data - Compares the ECC read with HW generated ECC
772   * @chip: NAND chip object
773   * @dat: page data
774   * @read_ecc: ecc read from nand flash
775   * @calc_ecc: ecc read from HW ECC registers
776   *
777   * Compares the ecc read from nand spare area with ECC registers values
778   * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
779   * detection and correction. If there are no errors, %0 is returned. If
780   * there were errors and all of the errors were corrected, the number of
781   * corrected errors is returned. If uncorrectable errors exist, %-1 is
782   * returned.
783   */
omap_correct_data(struct nand_chip * chip,u_char * dat,u_char * read_ecc,u_char * calc_ecc)784  static int omap_correct_data(struct nand_chip *chip, u_char *dat,
785  			     u_char *read_ecc, u_char *calc_ecc)
786  {
787  	struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
788  	int blockCnt = 0, i = 0, ret = 0;
789  	int stat = 0;
790  
791  	/* Ex NAND_ECC_HW12_2048 */
792  	if (info->nand.ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST &&
793  	    info->nand.ecc.size == 2048)
794  		blockCnt = 4;
795  	else
796  		blockCnt = 1;
797  
798  	for (i = 0; i < blockCnt; i++) {
799  		if (memcmp(read_ecc, calc_ecc, 3) != 0) {
800  			ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
801  			if (ret < 0)
802  				return ret;
803  			/* keep track of the number of corrected errors */
804  			stat += ret;
805  		}
806  		read_ecc += 3;
807  		calc_ecc += 3;
808  		dat      += 512;
809  	}
810  	return stat;
811  }
812  
813  /**
814   * omap_calculate_ecc - Generate non-inverted ECC bytes.
815   * @chip: NAND chip object
816   * @dat: The pointer to data on which ecc is computed
817   * @ecc_code: The ecc_code buffer
818   *
819   * Using noninverted ECC can be considered ugly since writing a blank
820   * page ie. padding will clear the ECC bytes. This is no problem as long
821   * nobody is trying to write data on the seemingly unused page. Reading
822   * an erased page will produce an ECC mismatch between generated and read
823   * ECC bytes that has to be dealt with separately.
824   */
omap_calculate_ecc(struct nand_chip * chip,const u_char * dat,u_char * ecc_code)825  static int omap_calculate_ecc(struct nand_chip *chip, const u_char *dat,
826  			      u_char *ecc_code)
827  {
828  	struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
829  	u32 val;
830  
831  	val = readl(info->reg.gpmc_ecc_config);
832  	if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
833  		return -EINVAL;
834  
835  	/* read ecc result */
836  	val = readl(info->reg.gpmc_ecc1_result);
837  	*ecc_code++ = val;          /* P128e, ..., P1e */
838  	*ecc_code++ = val >> 16;    /* P128o, ..., P1o */
839  	/* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
840  	*ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
841  
842  	return 0;
843  }
844  
845  /**
846   * omap_enable_hwecc - This function enables the hardware ecc functionality
847   * @chip: NAND chip object
848   * @mode: Read/Write mode
849   */
omap_enable_hwecc(struct nand_chip * chip,int mode)850  static void omap_enable_hwecc(struct nand_chip *chip, int mode)
851  {
852  	struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
853  	unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
854  	u32 val;
855  
856  	/* clear ecc and enable bits */
857  	val = ECCCLEAR | ECC1;
858  	writel(val, info->reg.gpmc_ecc_control);
859  
860  	/* program ecc and result sizes */
861  	val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
862  			 ECC1RESULTSIZE);
863  	writel(val, info->reg.gpmc_ecc_size_config);
864  
865  	switch (mode) {
866  	case NAND_ECC_READ:
867  	case NAND_ECC_WRITE:
868  		writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
869  		break;
870  	case NAND_ECC_READSYN:
871  		writel(ECCCLEAR, info->reg.gpmc_ecc_control);
872  		break;
873  	default:
874  		dev_info(&info->pdev->dev,
875  			"error: unrecognized Mode[%d]!\n", mode);
876  		break;
877  	}
878  
879  	/* (ECC 16 or 8 bit col) | ( CS  )  | ECC Enable */
880  	val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
881  	writel(val, info->reg.gpmc_ecc_config);
882  }
883  
884  /**
885   * omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
886   * @chip: NAND chip object
887   * @mode: Read/Write mode
888   *
889   * When using BCH with SW correction (i.e. no ELM), sector size is set
890   * to 512 bytes and we use BCH_WRAPMODE_6 wrapping mode
891   * for both reading and writing with:
892   * eccsize0 = 0  (no additional protected byte in spare area)
893   * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
894   */
omap_enable_hwecc_bch(struct nand_chip * chip,int mode)895  static void __maybe_unused omap_enable_hwecc_bch(struct nand_chip *chip,
896  						 int mode)
897  {
898  	unsigned int bch_type;
899  	unsigned int dev_width, nsectors;
900  	struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
901  	enum omap_ecc ecc_opt = info->ecc_opt;
902  	u32 val, wr_mode;
903  	unsigned int ecc_size1, ecc_size0;
904  
905  	/* GPMC configurations for calculating ECC */
906  	switch (ecc_opt) {
907  	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
908  		bch_type = 0;
909  		nsectors = 1;
910  		wr_mode	  = BCH_WRAPMODE_6;
911  		ecc_size0 = BCH_ECC_SIZE0;
912  		ecc_size1 = BCH_ECC_SIZE1;
913  		break;
914  	case OMAP_ECC_BCH4_CODE_HW:
915  		bch_type = 0;
916  		nsectors = chip->ecc.steps;
917  		if (mode == NAND_ECC_READ) {
918  			wr_mode	  = BCH_WRAPMODE_1;
919  			ecc_size0 = BCH4R_ECC_SIZE0;
920  			ecc_size1 = BCH4R_ECC_SIZE1;
921  		} else {
922  			wr_mode   = BCH_WRAPMODE_6;
923  			ecc_size0 = BCH_ECC_SIZE0;
924  			ecc_size1 = BCH_ECC_SIZE1;
925  		}
926  		break;
927  	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
928  		bch_type = 1;
929  		nsectors = 1;
930  		wr_mode	  = BCH_WRAPMODE_6;
931  		ecc_size0 = BCH_ECC_SIZE0;
932  		ecc_size1 = BCH_ECC_SIZE1;
933  		break;
934  	case OMAP_ECC_BCH8_CODE_HW:
935  		bch_type = 1;
936  		nsectors = chip->ecc.steps;
937  		if (mode == NAND_ECC_READ) {
938  			wr_mode	  = BCH_WRAPMODE_1;
939  			ecc_size0 = BCH8R_ECC_SIZE0;
940  			ecc_size1 = BCH8R_ECC_SIZE1;
941  		} else {
942  			wr_mode   = BCH_WRAPMODE_6;
943  			ecc_size0 = BCH_ECC_SIZE0;
944  			ecc_size1 = BCH_ECC_SIZE1;
945  		}
946  		break;
947  	case OMAP_ECC_BCH16_CODE_HW:
948  		bch_type = 0x2;
949  		nsectors = chip->ecc.steps;
950  		if (mode == NAND_ECC_READ) {
951  			wr_mode	  = 0x01;
952  			ecc_size0 = 52; /* ECC bits in nibbles per sector */
953  			ecc_size1 = 0;  /* non-ECC bits in nibbles per sector */
954  		} else {
955  			wr_mode	  = 0x01;
956  			ecc_size0 = 0;  /* extra bits in nibbles per sector */
957  			ecc_size1 = 52; /* OOB bits in nibbles per sector */
958  		}
959  		break;
960  	default:
961  		return;
962  	}
963  
964  	writel(ECC1, info->reg.gpmc_ecc_control);
965  
966  	/* Configure ecc size for BCH */
967  	val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
968  	writel(val, info->reg.gpmc_ecc_size_config);
969  
970  	dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
971  
972  	/* BCH configuration */
973  	val = ((1                        << 16) | /* enable BCH */
974  	       (bch_type		 << 12) | /* BCH4/BCH8/BCH16 */
975  	       (wr_mode                  <<  8) | /* wrap mode */
976  	       (dev_width                <<  7) | /* bus width */
977  	       (((nsectors-1) & 0x7)     <<  4) | /* number of sectors */
978  	       (info->gpmc_cs            <<  1) | /* ECC CS */
979  	       (0x1));                            /* enable ECC */
980  
981  	writel(val, info->reg.gpmc_ecc_config);
982  
983  	/* Clear ecc and enable bits */
984  	writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
985  }
986  
987  static u8  bch4_polynomial[] = {0x28, 0x13, 0xcc, 0x39, 0x96, 0xac, 0x7f};
988  static u8  bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
989  				0x97, 0x79, 0xe5, 0x24, 0xb5};
990  
991  /**
992   * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
993   * @mtd:	MTD device structure
994   * @dat:	The pointer to data on which ecc is computed
995   * @ecc_calc:	The ecc_code buffer
996   * @i:		The sector number (for a multi sector page)
997   *
998   * Support calculating of BCH4/8/16 ECC vectors for one sector
999   * within a page. Sector number is in @i.
1000   */
_omap_calculate_ecc_bch(struct mtd_info * mtd,const u_char * dat,u_char * ecc_calc,int i)1001  static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
1002  				   const u_char *dat, u_char *ecc_calc, int i)
1003  {
1004  	struct omap_nand_info *info = mtd_to_omap(mtd);
1005  	int eccbytes	= info->nand.ecc.bytes;
1006  	struct gpmc_nand_regs	*gpmc_regs = &info->reg;
1007  	u8 *ecc_code;
1008  	unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
1009  	u32 val;
1010  	int j;
1011  
1012  	ecc_code = ecc_calc;
1013  	switch (info->ecc_opt) {
1014  	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1015  	case OMAP_ECC_BCH8_CODE_HW:
1016  		bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
1017  		bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
1018  		bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
1019  		bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
1020  		*ecc_code++ = (bch_val4 & 0xFF);
1021  		*ecc_code++ = ((bch_val3 >> 24) & 0xFF);
1022  		*ecc_code++ = ((bch_val3 >> 16) & 0xFF);
1023  		*ecc_code++ = ((bch_val3 >> 8) & 0xFF);
1024  		*ecc_code++ = (bch_val3 & 0xFF);
1025  		*ecc_code++ = ((bch_val2 >> 24) & 0xFF);
1026  		*ecc_code++ = ((bch_val2 >> 16) & 0xFF);
1027  		*ecc_code++ = ((bch_val2 >> 8) & 0xFF);
1028  		*ecc_code++ = (bch_val2 & 0xFF);
1029  		*ecc_code++ = ((bch_val1 >> 24) & 0xFF);
1030  		*ecc_code++ = ((bch_val1 >> 16) & 0xFF);
1031  		*ecc_code++ = ((bch_val1 >> 8) & 0xFF);
1032  		*ecc_code++ = (bch_val1 & 0xFF);
1033  		break;
1034  	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1035  	case OMAP_ECC_BCH4_CODE_HW:
1036  		bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
1037  		bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
1038  		*ecc_code++ = ((bch_val2 >> 12) & 0xFF);
1039  		*ecc_code++ = ((bch_val2 >> 4) & 0xFF);
1040  		*ecc_code++ = ((bch_val2 & 0xF) << 4) |
1041  			((bch_val1 >> 28) & 0xF);
1042  		*ecc_code++ = ((bch_val1 >> 20) & 0xFF);
1043  		*ecc_code++ = ((bch_val1 >> 12) & 0xFF);
1044  		*ecc_code++ = ((bch_val1 >> 4) & 0xFF);
1045  		*ecc_code++ = ((bch_val1 & 0xF) << 4);
1046  		break;
1047  	case OMAP_ECC_BCH16_CODE_HW:
1048  		val = readl(gpmc_regs->gpmc_bch_result6[i]);
1049  		ecc_code[0]  = ((val >>  8) & 0xFF);
1050  		ecc_code[1]  = ((val >>  0) & 0xFF);
1051  		val = readl(gpmc_regs->gpmc_bch_result5[i]);
1052  		ecc_code[2]  = ((val >> 24) & 0xFF);
1053  		ecc_code[3]  = ((val >> 16) & 0xFF);
1054  		ecc_code[4]  = ((val >>  8) & 0xFF);
1055  		ecc_code[5]  = ((val >>  0) & 0xFF);
1056  		val = readl(gpmc_regs->gpmc_bch_result4[i]);
1057  		ecc_code[6]  = ((val >> 24) & 0xFF);
1058  		ecc_code[7]  = ((val >> 16) & 0xFF);
1059  		ecc_code[8]  = ((val >>  8) & 0xFF);
1060  		ecc_code[9]  = ((val >>  0) & 0xFF);
1061  		val = readl(gpmc_regs->gpmc_bch_result3[i]);
1062  		ecc_code[10] = ((val >> 24) & 0xFF);
1063  		ecc_code[11] = ((val >> 16) & 0xFF);
1064  		ecc_code[12] = ((val >>  8) & 0xFF);
1065  		ecc_code[13] = ((val >>  0) & 0xFF);
1066  		val = readl(gpmc_regs->gpmc_bch_result2[i]);
1067  		ecc_code[14] = ((val >> 24) & 0xFF);
1068  		ecc_code[15] = ((val >> 16) & 0xFF);
1069  		ecc_code[16] = ((val >>  8) & 0xFF);
1070  		ecc_code[17] = ((val >>  0) & 0xFF);
1071  		val = readl(gpmc_regs->gpmc_bch_result1[i]);
1072  		ecc_code[18] = ((val >> 24) & 0xFF);
1073  		ecc_code[19] = ((val >> 16) & 0xFF);
1074  		ecc_code[20] = ((val >>  8) & 0xFF);
1075  		ecc_code[21] = ((val >>  0) & 0xFF);
1076  		val = readl(gpmc_regs->gpmc_bch_result0[i]);
1077  		ecc_code[22] = ((val >> 24) & 0xFF);
1078  		ecc_code[23] = ((val >> 16) & 0xFF);
1079  		ecc_code[24] = ((val >>  8) & 0xFF);
1080  		ecc_code[25] = ((val >>  0) & 0xFF);
1081  		break;
1082  	default:
1083  		return -EINVAL;
1084  	}
1085  
1086  	/* ECC scheme specific syndrome customizations */
1087  	switch (info->ecc_opt) {
1088  	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1089  		/* Add constant polynomial to remainder, so that
1090  		 * ECC of blank pages results in 0x0 on reading back
1091  		 */
1092  		for (j = 0; j < eccbytes; j++)
1093  			ecc_calc[j] ^= bch4_polynomial[j];
1094  		break;
1095  	case OMAP_ECC_BCH4_CODE_HW:
1096  		/* Set  8th ECC byte as 0x0 for ROM compatibility */
1097  		ecc_calc[eccbytes - 1] = 0x0;
1098  		break;
1099  	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1100  		/* Add constant polynomial to remainder, so that
1101  		 * ECC of blank pages results in 0x0 on reading back
1102  		 */
1103  		for (j = 0; j < eccbytes; j++)
1104  			ecc_calc[j] ^= bch8_polynomial[j];
1105  		break;
1106  	case OMAP_ECC_BCH8_CODE_HW:
1107  		/* Set 14th ECC byte as 0x0 for ROM compatibility */
1108  		ecc_calc[eccbytes - 1] = 0x0;
1109  		break;
1110  	case OMAP_ECC_BCH16_CODE_HW:
1111  		break;
1112  	default:
1113  		return -EINVAL;
1114  	}
1115  
1116  	return 0;
1117  }
1118  
1119  /**
1120   * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
1121   * @chip:	NAND chip object
1122   * @dat:	The pointer to data on which ecc is computed
1123   * @ecc_calc:	Buffer storing the calculated ECC bytes
1124   *
1125   * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
1126   * when SW based correction is required as ECC is required for one sector
1127   * at a time.
1128   */
omap_calculate_ecc_bch_sw(struct nand_chip * chip,const u_char * dat,u_char * ecc_calc)1129  static int omap_calculate_ecc_bch_sw(struct nand_chip *chip,
1130  				     const u_char *dat, u_char *ecc_calc)
1131  {
1132  	return _omap_calculate_ecc_bch(nand_to_mtd(chip), dat, ecc_calc, 0);
1133  }
1134  
1135  /**
1136   * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
1137   * @mtd:	MTD device structure
1138   * @dat:	The pointer to data on which ecc is computed
1139   * @ecc_calc:	Buffer storing the calculated ECC bytes
1140   *
1141   * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
1142   */
omap_calculate_ecc_bch_multi(struct mtd_info * mtd,const u_char * dat,u_char * ecc_calc)1143  static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
1144  					const u_char *dat, u_char *ecc_calc)
1145  {
1146  	struct omap_nand_info *info = mtd_to_omap(mtd);
1147  	int eccbytes = info->nand.ecc.bytes;
1148  	unsigned long nsectors;
1149  	int i, ret;
1150  
1151  	nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
1152  	for (i = 0; i < nsectors; i++) {
1153  		ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
1154  		if (ret)
1155  			return ret;
1156  
1157  		ecc_calc += eccbytes;
1158  	}
1159  
1160  	return 0;
1161  }
1162  
1163  /**
1164   * erased_sector_bitflips - count bit flips
1165   * @data:	data sector buffer
1166   * @oob:	oob buffer
1167   * @info:	omap_nand_info
1168   *
1169   * Check the bit flips in erased page falls below correctable level.
1170   * If falls below, report the page as erased with correctable bit
1171   * flip, else report as uncorrectable page.
1172   */
erased_sector_bitflips(u_char * data,u_char * oob,struct omap_nand_info * info)1173  static int erased_sector_bitflips(u_char *data, u_char *oob,
1174  		struct omap_nand_info *info)
1175  {
1176  	int flip_bits = 0, i;
1177  
1178  	for (i = 0; i < info->nand.ecc.size; i++) {
1179  		flip_bits += hweight8(~data[i]);
1180  		if (flip_bits > info->nand.ecc.strength)
1181  			return 0;
1182  	}
1183  
1184  	for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
1185  		flip_bits += hweight8(~oob[i]);
1186  		if (flip_bits > info->nand.ecc.strength)
1187  			return 0;
1188  	}
1189  
1190  	/*
1191  	 * Bit flips falls in correctable level.
1192  	 * Fill data area with 0xFF
1193  	 */
1194  	if (flip_bits) {
1195  		memset(data, 0xFF, info->nand.ecc.size);
1196  		memset(oob, 0xFF, info->nand.ecc.bytes);
1197  	}
1198  
1199  	return flip_bits;
1200  }
1201  
1202  /**
1203   * omap_elm_correct_data - corrects page data area in case error reported
1204   * @chip:	NAND chip object
1205   * @data:	page data
1206   * @read_ecc:	ecc read from nand flash
1207   * @calc_ecc:	ecc read from HW ECC registers
1208   *
1209   * Calculated ecc vector reported as zero in case of non-error pages.
1210   * In case of non-zero ecc vector, first filter out erased-pages, and
1211   * then process data via ELM to detect bit-flips.
1212   */
omap_elm_correct_data(struct nand_chip * chip,u_char * data,u_char * read_ecc,u_char * calc_ecc)1213  static int omap_elm_correct_data(struct nand_chip *chip, u_char *data,
1214  				 u_char *read_ecc, u_char *calc_ecc)
1215  {
1216  	struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
1217  	struct nand_ecc_ctrl *ecc = &info->nand.ecc;
1218  	int eccsteps = info->nsteps_per_eccpg;
1219  	int i , j, stat = 0;
1220  	int eccflag, actual_eccbytes;
1221  	struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
1222  	u_char *ecc_vec = calc_ecc;
1223  	u_char *spare_ecc = read_ecc;
1224  	u_char *erased_ecc_vec;
1225  	u_char *buf;
1226  	int bitflip_count;
1227  	bool is_error_reported = false;
1228  	u32 bit_pos, byte_pos, error_max, pos;
1229  	int err;
1230  
1231  	switch (info->ecc_opt) {
1232  	case OMAP_ECC_BCH4_CODE_HW:
1233  		/* omit  7th ECC byte reserved for ROM code compatibility */
1234  		actual_eccbytes = ecc->bytes - 1;
1235  		erased_ecc_vec = bch4_vector;
1236  		break;
1237  	case OMAP_ECC_BCH8_CODE_HW:
1238  		/* omit 14th ECC byte reserved for ROM code compatibility */
1239  		actual_eccbytes = ecc->bytes - 1;
1240  		erased_ecc_vec = bch8_vector;
1241  		break;
1242  	case OMAP_ECC_BCH16_CODE_HW:
1243  		actual_eccbytes = ecc->bytes;
1244  		erased_ecc_vec = bch16_vector;
1245  		break;
1246  	default:
1247  		dev_err(&info->pdev->dev, "invalid driver configuration\n");
1248  		return -EINVAL;
1249  	}
1250  
1251  	/* Initialize elm error vector to zero */
1252  	memset(err_vec, 0, sizeof(err_vec));
1253  
1254  	for (i = 0; i < eccsteps ; i++) {
1255  		eccflag = 0;	/* initialize eccflag */
1256  
1257  		/*
1258  		 * Check any error reported,
1259  		 * In case of error, non zero ecc reported.
1260  		 */
1261  		for (j = 0; j < actual_eccbytes; j++) {
1262  			if (calc_ecc[j] != 0) {
1263  				eccflag = 1; /* non zero ecc, error present */
1264  				break;
1265  			}
1266  		}
1267  
1268  		if (eccflag == 1) {
1269  			if (memcmp(calc_ecc, erased_ecc_vec,
1270  						actual_eccbytes) == 0) {
1271  				/*
1272  				 * calc_ecc[] matches pattern for ECC(all 0xff)
1273  				 * so this is definitely an erased-page
1274  				 */
1275  			} else {
1276  				buf = &data[info->nand.ecc.size * i];
1277  				/*
1278  				 * count number of 0-bits in read_buf.
1279  				 * This check can be removed once a similar
1280  				 * check is introduced in generic NAND driver
1281  				 */
1282  				bitflip_count = erased_sector_bitflips(
1283  						buf, read_ecc, info);
1284  				if (bitflip_count) {
1285  					/*
1286  					 * number of 0-bits within ECC limits
1287  					 * So this may be an erased-page
1288  					 */
1289  					stat += bitflip_count;
1290  				} else {
1291  					/*
1292  					 * Too many 0-bits. It may be a
1293  					 * - programmed-page, OR
1294  					 * - erased-page with many bit-flips
1295  					 * So this page requires check by ELM
1296  					 */
1297  					err_vec[i].error_reported = true;
1298  					is_error_reported = true;
1299  				}
1300  			}
1301  		}
1302  
1303  		/* Update the ecc vector */
1304  		calc_ecc += ecc->bytes;
1305  		read_ecc += ecc->bytes;
1306  	}
1307  
1308  	/* Check if any error reported */
1309  	if (!is_error_reported)
1310  		return stat;
1311  
1312  	/* Decode BCH error using ELM module */
1313  	elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
1314  
1315  	err = 0;
1316  	for (i = 0; i < eccsteps; i++) {
1317  		if (err_vec[i].error_uncorrectable) {
1318  			dev_err(&info->pdev->dev,
1319  				"uncorrectable bit-flips found\n");
1320  			err = -EBADMSG;
1321  		} else if (err_vec[i].error_reported) {
1322  			for (j = 0; j < err_vec[i].error_count; j++) {
1323  				switch (info->ecc_opt) {
1324  				case OMAP_ECC_BCH4_CODE_HW:
1325  					/* Add 4 bits to take care of padding */
1326  					pos = err_vec[i].error_loc[j] +
1327  						BCH4_BIT_PAD;
1328  					break;
1329  				case OMAP_ECC_BCH8_CODE_HW:
1330  				case OMAP_ECC_BCH16_CODE_HW:
1331  					pos = err_vec[i].error_loc[j];
1332  					break;
1333  				default:
1334  					return -EINVAL;
1335  				}
1336  				error_max = (ecc->size + actual_eccbytes) * 8;
1337  				/* Calculate bit position of error */
1338  				bit_pos = pos % 8;
1339  
1340  				/* Calculate byte position of error */
1341  				byte_pos = (error_max - pos - 1) / 8;
1342  
1343  				if (pos < error_max) {
1344  					if (byte_pos < 512) {
1345  						pr_debug("bitflip@dat[%d]=%x\n",
1346  						     byte_pos, data[byte_pos]);
1347  						data[byte_pos] ^= 1 << bit_pos;
1348  					} else {
1349  						pr_debug("bitflip@oob[%d]=%x\n",
1350  							(byte_pos - 512),
1351  						     spare_ecc[byte_pos - 512]);
1352  						spare_ecc[byte_pos - 512] ^=
1353  							1 << bit_pos;
1354  					}
1355  				} else {
1356  					dev_err(&info->pdev->dev,
1357  						"invalid bit-flip @ %d:%d\n",
1358  						byte_pos, bit_pos);
1359  					err = -EBADMSG;
1360  				}
1361  			}
1362  		}
1363  
1364  		/* Update number of correctable errors */
1365  		stat = max_t(unsigned int, stat, err_vec[i].error_count);
1366  
1367  		/* Update page data with sector size */
1368  		data += ecc->size;
1369  		spare_ecc += ecc->bytes;
1370  	}
1371  
1372  	return (err) ? err : stat;
1373  }
1374  
1375  /**
1376   * omap_write_page_bch - BCH ecc based write page function for entire page
1377   * @chip:		nand chip info structure
1378   * @buf:		data buffer
1379   * @oob_required:	must write chip->oob_poi to OOB
1380   * @page:		page
1381   *
1382   * Custom write page method evolved to support multi sector writing in one shot
1383   */
omap_write_page_bch(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)1384  static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
1385  			       int oob_required, int page)
1386  {
1387  	struct mtd_info *mtd = nand_to_mtd(chip);
1388  	struct omap_nand_info *info = mtd_to_omap(mtd);
1389  	uint8_t *ecc_calc = chip->ecc.calc_buf;
1390  	unsigned int eccpg;
1391  	int ret;
1392  
1393  	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1394  	if (ret)
1395  		return ret;
1396  
1397  	for (eccpg = 0; eccpg < info->neccpg; eccpg++) {
1398  		/* Enable GPMC ecc engine */
1399  		chip->ecc.hwctl(chip, NAND_ECC_WRITE);
1400  
1401  		/* Write data */
1402  		info->data_out(chip, buf + (eccpg * info->eccpg_size),
1403  			       info->eccpg_size, false);
1404  
1405  		/* Update ecc vector from GPMC result registers */
1406  		ret = omap_calculate_ecc_bch_multi(mtd,
1407  						   buf + (eccpg * info->eccpg_size),
1408  						   ecc_calc);
1409  		if (ret)
1410  			return ret;
1411  
1412  		ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc,
1413  						 chip->oob_poi,
1414  						 eccpg * info->eccpg_bytes,
1415  						 info->eccpg_bytes);
1416  		if (ret)
1417  			return ret;
1418  	}
1419  
1420  	/* Write ecc vector to OOB area */
1421  	info->data_out(chip, chip->oob_poi, mtd->oobsize, false);
1422  
1423  	return nand_prog_page_end_op(chip);
1424  }
1425  
1426  /**
1427   * omap_write_subpage_bch - BCH hardware ECC based subpage write
1428   * @chip:	nand chip info structure
1429   * @offset:	column address of subpage within the page
1430   * @data_len:	data length
1431   * @buf:	data buffer
1432   * @oob_required: must write chip->oob_poi to OOB
1433   * @page: page number to write
1434   *
1435   * OMAP optimized subpage write method.
1436   */
omap_write_subpage_bch(struct nand_chip * chip,u32 offset,u32 data_len,const u8 * buf,int oob_required,int page)1437  static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
1438  				  u32 data_len, const u8 *buf,
1439  				  int oob_required, int page)
1440  {
1441  	struct mtd_info *mtd = nand_to_mtd(chip);
1442  	struct omap_nand_info *info = mtd_to_omap(mtd);
1443  	u8 *ecc_calc = chip->ecc.calc_buf;
1444  	int ecc_size      = chip->ecc.size;
1445  	int ecc_bytes     = chip->ecc.bytes;
1446  	u32 start_step = offset / ecc_size;
1447  	u32 end_step   = (offset + data_len - 1) / ecc_size;
1448  	unsigned int eccpg;
1449  	int step, ret = 0;
1450  
1451  	/*
1452  	 * Write entire page at one go as it would be optimal
1453  	 * as ECC is calculated by hardware.
1454  	 * ECC is calculated for all subpages but we choose
1455  	 * only what we want.
1456  	 */
1457  	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1458  	if (ret)
1459  		return ret;
1460  
1461  	for (eccpg = 0; eccpg < info->neccpg; eccpg++) {
1462  		/* Enable GPMC ECC engine */
1463  		chip->ecc.hwctl(chip, NAND_ECC_WRITE);
1464  
1465  		/* Write data */
1466  		info->data_out(chip, buf + (eccpg * info->eccpg_size),
1467  			       info->eccpg_size, false);
1468  
1469  		for (step = 0; step < info->nsteps_per_eccpg; step++) {
1470  			unsigned int base_step = eccpg * info->nsteps_per_eccpg;
1471  			const u8 *bufoffs = buf + (eccpg * info->eccpg_size);
1472  
1473  			/* Mask ECC of un-touched subpages with 0xFFs */
1474  			if ((step + base_step) < start_step ||
1475  			    (step + base_step) > end_step)
1476  				memset(ecc_calc + (step * ecc_bytes), 0xff,
1477  				       ecc_bytes);
1478  			else
1479  				ret = _omap_calculate_ecc_bch(mtd,
1480  							      bufoffs + (step * ecc_size),
1481  							      ecc_calc + (step * ecc_bytes),
1482  							      step);
1483  
1484  			if (ret)
1485  				return ret;
1486  		}
1487  
1488  		/*
1489  		 * Copy the calculated ECC for the whole page including the
1490  		 * masked values (0xFF) corresponding to unwritten subpages.
1491  		 */
1492  		ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi,
1493  						 eccpg * info->eccpg_bytes,
1494  						 info->eccpg_bytes);
1495  		if (ret)
1496  			return ret;
1497  	}
1498  
1499  	/* write OOB buffer to NAND device */
1500  	info->data_out(chip, chip->oob_poi, mtd->oobsize, false);
1501  
1502  	return nand_prog_page_end_op(chip);
1503  }
1504  
1505  /**
1506   * omap_read_page_bch - BCH ecc based page read function for entire page
1507   * @chip:		nand chip info structure
1508   * @buf:		buffer to store read data
1509   * @oob_required:	caller requires OOB data read to chip->oob_poi
1510   * @page:		page number to read
1511   *
1512   * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
1513   * used for error correction.
1514   * Custom method evolved to support ELM error correction & multi sector
1515   * reading. On reading page data area is read along with OOB data with
1516   * ecc engine enabled. ecc vector updated after read of OOB data.
1517   * For non error pages ecc vector reported as zero.
1518   */
omap_read_page_bch(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1519  static int omap_read_page_bch(struct nand_chip *chip, uint8_t *buf,
1520  			      int oob_required, int page)
1521  {
1522  	struct mtd_info *mtd = nand_to_mtd(chip);
1523  	struct omap_nand_info *info = mtd_to_omap(mtd);
1524  	uint8_t *ecc_calc = chip->ecc.calc_buf;
1525  	uint8_t *ecc_code = chip->ecc.code_buf;
1526  	unsigned int max_bitflips = 0, eccpg;
1527  	int stat, ret;
1528  
1529  	ret = nand_read_page_op(chip, page, 0, NULL, 0);
1530  	if (ret)
1531  		return ret;
1532  
1533  	for (eccpg = 0; eccpg < info->neccpg; eccpg++) {
1534  		/* Enable GPMC ecc engine */
1535  		chip->ecc.hwctl(chip, NAND_ECC_READ);
1536  
1537  		/* Read data */
1538  		ret = nand_change_read_column_op(chip, eccpg * info->eccpg_size,
1539  						 buf + (eccpg * info->eccpg_size),
1540  						 info->eccpg_size, false);
1541  		if (ret)
1542  			return ret;
1543  
1544  		/* Read oob bytes */
1545  		ret = nand_change_read_column_op(chip,
1546  						 mtd->writesize + BBM_LEN +
1547  						 (eccpg * info->eccpg_bytes),
1548  						 chip->oob_poi + BBM_LEN +
1549  						 (eccpg * info->eccpg_bytes),
1550  						 info->eccpg_bytes, false);
1551  		if (ret)
1552  			return ret;
1553  
1554  		/* Calculate ecc bytes */
1555  		ret = omap_calculate_ecc_bch_multi(mtd,
1556  						   buf + (eccpg * info->eccpg_size),
1557  						   ecc_calc);
1558  		if (ret)
1559  			return ret;
1560  
1561  		ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code,
1562  						 chip->oob_poi,
1563  						 eccpg * info->eccpg_bytes,
1564  						 info->eccpg_bytes);
1565  		if (ret)
1566  			return ret;
1567  
1568  		stat = chip->ecc.correct(chip,
1569  					 buf + (eccpg * info->eccpg_size),
1570  					 ecc_code, ecc_calc);
1571  		if (stat < 0) {
1572  			mtd->ecc_stats.failed++;
1573  		} else {
1574  			mtd->ecc_stats.corrected += stat;
1575  			max_bitflips = max_t(unsigned int, max_bitflips, stat);
1576  		}
1577  	}
1578  
1579  	return max_bitflips;
1580  }
1581  
1582  /**
1583   * is_elm_present - checks for presence of ELM module by scanning DT nodes
1584   * @info: NAND device structure containing platform data
1585   * @elm_node: ELM's DT node
1586   */
is_elm_present(struct omap_nand_info * info,struct device_node * elm_node)1587  static bool is_elm_present(struct omap_nand_info *info,
1588  			   struct device_node *elm_node)
1589  {
1590  	struct platform_device *pdev;
1591  
1592  	/* check whether elm-id is passed via DT */
1593  	if (!elm_node) {
1594  		dev_err(&info->pdev->dev, "ELM devicetree node not found\n");
1595  		return false;
1596  	}
1597  	pdev = of_find_device_by_node(elm_node);
1598  	/* check whether ELM device is registered */
1599  	if (!pdev) {
1600  		dev_err(&info->pdev->dev, "ELM device not found\n");
1601  		return false;
1602  	}
1603  	/* ELM module available, now configure it */
1604  	info->elm_dev = &pdev->dev;
1605  	return true;
1606  }
1607  
omap2_nand_ecc_check(struct omap_nand_info * info)1608  static bool omap2_nand_ecc_check(struct omap_nand_info *info)
1609  {
1610  	bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
1611  
1612  	switch (info->ecc_opt) {
1613  	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1614  	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1615  		ecc_needs_omap_bch = false;
1616  		ecc_needs_bch = true;
1617  		ecc_needs_elm = false;
1618  		break;
1619  	case OMAP_ECC_BCH4_CODE_HW:
1620  	case OMAP_ECC_BCH8_CODE_HW:
1621  	case OMAP_ECC_BCH16_CODE_HW:
1622  		ecc_needs_omap_bch = true;
1623  		ecc_needs_bch = false;
1624  		ecc_needs_elm = true;
1625  		break;
1626  	default:
1627  		ecc_needs_omap_bch = false;
1628  		ecc_needs_bch = false;
1629  		ecc_needs_elm = false;
1630  		break;
1631  	}
1632  
1633  	if (ecc_needs_bch && !IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
1634  		dev_err(&info->pdev->dev,
1635  			"CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
1636  		return false;
1637  	}
1638  	if (ecc_needs_omap_bch && !IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)) {
1639  		dev_err(&info->pdev->dev,
1640  			"CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
1641  		return false;
1642  	}
1643  	if (ecc_needs_elm && !is_elm_present(info, info->elm_of_node)) {
1644  		dev_err(&info->pdev->dev, "ELM not available\n");
1645  		return false;
1646  	}
1647  
1648  	return true;
1649  }
1650  
1651  static const char * const nand_xfer_types[] = {
1652  	[NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
1653  	[NAND_OMAP_POLLED] = "polled",
1654  	[NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
1655  	[NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
1656  };
1657  
omap_get_dt_info(struct device * dev,struct omap_nand_info * info)1658  static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info)
1659  {
1660  	struct device_node *child = dev->of_node;
1661  	int i;
1662  	const char *s;
1663  	u32 cs;
1664  
1665  	if (of_property_read_u32(child, "reg", &cs) < 0) {
1666  		dev_err(dev, "reg not found in DT\n");
1667  		return -EINVAL;
1668  	}
1669  
1670  	info->gpmc_cs = cs;
1671  
1672  	/* detect availability of ELM module. Won't be present pre-OMAP4 */
1673  	info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
1674  	if (!info->elm_of_node) {
1675  		info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
1676  		if (!info->elm_of_node)
1677  			dev_dbg(dev, "ti,elm-id not in DT\n");
1678  	}
1679  
1680  	/* select ecc-scheme for NAND */
1681  	if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
1682  		dev_err(dev, "ti,nand-ecc-opt not found\n");
1683  		return -EINVAL;
1684  	}
1685  
1686  	if (!strcmp(s, "sw")) {
1687  		info->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
1688  	} else if (!strcmp(s, "ham1") ||
1689  		   !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) {
1690  		info->ecc_opt =	OMAP_ECC_HAM1_CODE_HW;
1691  	} else if (!strcmp(s, "bch4")) {
1692  		if (info->elm_of_node)
1693  			info->ecc_opt = OMAP_ECC_BCH4_CODE_HW;
1694  		else
1695  			info->ecc_opt = OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
1696  	} else if (!strcmp(s, "bch8")) {
1697  		if (info->elm_of_node)
1698  			info->ecc_opt = OMAP_ECC_BCH8_CODE_HW;
1699  		else
1700  			info->ecc_opt = OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
1701  	} else if (!strcmp(s, "bch16")) {
1702  		info->ecc_opt =	OMAP_ECC_BCH16_CODE_HW;
1703  	} else {
1704  		dev_err(dev, "unrecognized value for ti,nand-ecc-opt\n");
1705  		return -EINVAL;
1706  	}
1707  
1708  	/* select data transfer mode */
1709  	if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) {
1710  		for (i = 0; i < ARRAY_SIZE(nand_xfer_types); i++) {
1711  			if (!strcasecmp(s, nand_xfer_types[i])) {
1712  				info->xfer_type = i;
1713  				return 0;
1714  			}
1715  		}
1716  
1717  		dev_err(dev, "unrecognized value for ti,nand-xfer-type\n");
1718  		return -EINVAL;
1719  	}
1720  
1721  	return 0;
1722  }
1723  
omap_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)1724  static int omap_ooblayout_ecc(struct mtd_info *mtd, int section,
1725  			      struct mtd_oob_region *oobregion)
1726  {
1727  	struct omap_nand_info *info = mtd_to_omap(mtd);
1728  	struct nand_chip *chip = &info->nand;
1729  	int off = BBM_LEN;
1730  
1731  	if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
1732  	    !(chip->options & NAND_BUSWIDTH_16))
1733  		off = 1;
1734  
1735  	if (section)
1736  		return -ERANGE;
1737  
1738  	oobregion->offset = off;
1739  	oobregion->length = chip->ecc.total;
1740  
1741  	return 0;
1742  }
1743  
omap_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)1744  static int omap_ooblayout_free(struct mtd_info *mtd, int section,
1745  			       struct mtd_oob_region *oobregion)
1746  {
1747  	struct omap_nand_info *info = mtd_to_omap(mtd);
1748  	struct nand_chip *chip = &info->nand;
1749  	int off = BBM_LEN;
1750  
1751  	if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
1752  	    !(chip->options & NAND_BUSWIDTH_16))
1753  		off = 1;
1754  
1755  	if (section)
1756  		return -ERANGE;
1757  
1758  	off += chip->ecc.total;
1759  	if (off >= mtd->oobsize)
1760  		return -ERANGE;
1761  
1762  	oobregion->offset = off;
1763  	oobregion->length = mtd->oobsize - off;
1764  
1765  	return 0;
1766  }
1767  
1768  static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
1769  	.ecc = omap_ooblayout_ecc,
1770  	.free = omap_ooblayout_free,
1771  };
1772  
omap_sw_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)1773  static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
1774  				 struct mtd_oob_region *oobregion)
1775  {
1776  	struct nand_device *nand = mtd_to_nanddev(mtd);
1777  	unsigned int nsteps = nanddev_get_ecc_nsteps(nand);
1778  	unsigned int ecc_bytes = nanddev_get_ecc_bytes_per_step(nand);
1779  	int off = BBM_LEN;
1780  
1781  	if (section >= nsteps)
1782  		return -ERANGE;
1783  
1784  	/*
1785  	 * When SW correction is employed, one OMAP specific marker byte is
1786  	 * reserved after each ECC step.
1787  	 */
1788  	oobregion->offset = off + (section * (ecc_bytes + 1));
1789  	oobregion->length = ecc_bytes;
1790  
1791  	return 0;
1792  }
1793  
omap_sw_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)1794  static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
1795  				  struct mtd_oob_region *oobregion)
1796  {
1797  	struct nand_device *nand = mtd_to_nanddev(mtd);
1798  	unsigned int nsteps = nanddev_get_ecc_nsteps(nand);
1799  	unsigned int ecc_bytes = nanddev_get_ecc_bytes_per_step(nand);
1800  	int off = BBM_LEN;
1801  
1802  	if (section)
1803  		return -ERANGE;
1804  
1805  	/*
1806  	 * When SW correction is employed, one OMAP specific marker byte is
1807  	 * reserved after each ECC step.
1808  	 */
1809  	off += ((ecc_bytes + 1) * nsteps);
1810  	if (off >= mtd->oobsize)
1811  		return -ERANGE;
1812  
1813  	oobregion->offset = off;
1814  	oobregion->length = mtd->oobsize - off;
1815  
1816  	return 0;
1817  }
1818  
1819  static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
1820  	.ecc = omap_sw_ooblayout_ecc,
1821  	.free = omap_sw_ooblayout_free,
1822  };
1823  
omap_nand_attach_chip(struct nand_chip * chip)1824  static int omap_nand_attach_chip(struct nand_chip *chip)
1825  {
1826  	struct mtd_info *mtd = nand_to_mtd(chip);
1827  	struct omap_nand_info *info = mtd_to_omap(mtd);
1828  	struct device *dev = &info->pdev->dev;
1829  	int min_oobbytes = BBM_LEN;
1830  	int elm_bch_strength = -1;
1831  	int oobbytes_per_step;
1832  	dma_cap_mask_t mask;
1833  	int err;
1834  
1835  	if (chip->bbt_options & NAND_BBT_USE_FLASH)
1836  		chip->bbt_options |= NAND_BBT_NO_OOB;
1837  	else
1838  		chip->options |= NAND_SKIP_BBTSCAN;
1839  
1840  	/* Re-populate low-level callbacks based on xfer modes */
1841  	switch (info->xfer_type) {
1842  	case NAND_OMAP_PREFETCH_POLLED:
1843  		info->data_in = omap_nand_data_in_pref;
1844  		info->data_out = omap_nand_data_out_pref;
1845  		break;
1846  
1847  	case NAND_OMAP_POLLED:
1848  		/* Use nand_base defaults for {read,write}_buf */
1849  		break;
1850  
1851  	case NAND_OMAP_PREFETCH_DMA:
1852  		dma_cap_zero(mask);
1853  		dma_cap_set(DMA_SLAVE, mask);
1854  		info->dma = dma_request_chan(dev->parent, "rxtx");
1855  
1856  		if (IS_ERR(info->dma)) {
1857  			dev_err(dev, "DMA engine request failed\n");
1858  			return PTR_ERR(info->dma);
1859  		} else {
1860  			struct dma_slave_config cfg;
1861  
1862  			memset(&cfg, 0, sizeof(cfg));
1863  			cfg.src_addr = info->phys_base;
1864  			cfg.dst_addr = info->phys_base;
1865  			cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1866  			cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1867  			cfg.src_maxburst = 16;
1868  			cfg.dst_maxburst = 16;
1869  			err = dmaengine_slave_config(info->dma, &cfg);
1870  			if (err) {
1871  				dev_err(dev,
1872  					"DMA engine slave config failed: %d\n",
1873  					err);
1874  				return err;
1875  			}
1876  
1877  			info->data_in = omap_nand_data_in_dma_pref;
1878  			info->data_out = omap_nand_data_out_dma_pref;
1879  		}
1880  		break;
1881  
1882  	case NAND_OMAP_PREFETCH_IRQ:
1883  		info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
1884  		if (info->gpmc_irq_fifo <= 0)
1885  			return -ENODEV;
1886  		err = devm_request_irq(dev, info->gpmc_irq_fifo,
1887  				       omap_nand_irq, IRQF_SHARED,
1888  				       "gpmc-nand-fifo", info);
1889  		if (err) {
1890  			dev_err(dev, "Requesting IRQ %d, error %d\n",
1891  				info->gpmc_irq_fifo, err);
1892  			info->gpmc_irq_fifo = 0;
1893  			return err;
1894  		}
1895  
1896  		info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
1897  		if (info->gpmc_irq_count <= 0)
1898  			return -ENODEV;
1899  		err = devm_request_irq(dev, info->gpmc_irq_count,
1900  				       omap_nand_irq, IRQF_SHARED,
1901  				       "gpmc-nand-count", info);
1902  		if (err) {
1903  			dev_err(dev, "Requesting IRQ %d, error %d\n",
1904  				info->gpmc_irq_count, err);
1905  			info->gpmc_irq_count = 0;
1906  			return err;
1907  		}
1908  
1909  		info->data_in = omap_nand_data_in_irq_pref;
1910  		info->data_out = omap_nand_data_out_irq_pref;
1911  		break;
1912  
1913  	default:
1914  		dev_err(dev, "xfer_type %d not supported!\n", info->xfer_type);
1915  		return -EINVAL;
1916  	}
1917  
1918  	if (!omap2_nand_ecc_check(info))
1919  		return -EINVAL;
1920  
1921  	/*
1922  	 * Bail out earlier to let NAND_ECC_ENGINE_TYPE_SOFT code create its own
1923  	 * ooblayout instead of using ours.
1924  	 */
1925  	if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
1926  		chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
1927  		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
1928  		return 0;
1929  	}
1930  
1931  	/* Populate MTD interface based on ECC scheme */
1932  	switch (info->ecc_opt) {
1933  	case OMAP_ECC_HAM1_CODE_HW:
1934  		dev_info(dev, "nand: using OMAP_ECC_HAM1_CODE_HW\n");
1935  		chip->ecc.engine_type	= NAND_ECC_ENGINE_TYPE_ON_HOST;
1936  		chip->ecc.bytes		= 3;
1937  		chip->ecc.size		= 512;
1938  		chip->ecc.strength	= 1;
1939  		chip->ecc.calculate	= omap_calculate_ecc;
1940  		chip->ecc.hwctl		= omap_enable_hwecc;
1941  		chip->ecc.correct	= omap_correct_data;
1942  		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
1943  		oobbytes_per_step	= chip->ecc.bytes;
1944  
1945  		if (!(chip->options & NAND_BUSWIDTH_16))
1946  			min_oobbytes	= 1;
1947  
1948  		break;
1949  
1950  	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1951  		pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
1952  		chip->ecc.engine_type	= NAND_ECC_ENGINE_TYPE_ON_HOST;
1953  		chip->ecc.size		= 512;
1954  		chip->ecc.bytes		= 7;
1955  		chip->ecc.strength	= 4;
1956  		chip->ecc.hwctl		= omap_enable_hwecc_bch;
1957  		chip->ecc.correct	= rawnand_sw_bch_correct;
1958  		chip->ecc.calculate	= omap_calculate_ecc_bch_sw;
1959  		mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
1960  		/* Reserve one byte for the OMAP marker */
1961  		oobbytes_per_step	= chip->ecc.bytes + 1;
1962  		/* Software BCH library is used for locating errors */
1963  		err = rawnand_sw_bch_init(chip);
1964  		if (err) {
1965  			dev_err(dev, "Unable to use BCH library\n");
1966  			return err;
1967  		}
1968  		break;
1969  
1970  	case OMAP_ECC_BCH4_CODE_HW:
1971  		pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
1972  		chip->ecc.engine_type	= NAND_ECC_ENGINE_TYPE_ON_HOST;
1973  		chip->ecc.size		= 512;
1974  		/* 14th bit is kept reserved for ROM-code compatibility */
1975  		chip->ecc.bytes		= 7 + 1;
1976  		chip->ecc.strength	= 4;
1977  		chip->ecc.hwctl		= omap_enable_hwecc_bch;
1978  		chip->ecc.correct	= omap_elm_correct_data;
1979  		chip->ecc.read_page	= omap_read_page_bch;
1980  		chip->ecc.write_page	= omap_write_page_bch;
1981  		chip->ecc.write_subpage	= omap_write_subpage_bch;
1982  		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
1983  		oobbytes_per_step	= chip->ecc.bytes;
1984  		elm_bch_strength = BCH4_ECC;
1985  		break;
1986  
1987  	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1988  		pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
1989  		chip->ecc.engine_type	= NAND_ECC_ENGINE_TYPE_ON_HOST;
1990  		chip->ecc.size		= 512;
1991  		chip->ecc.bytes		= 13;
1992  		chip->ecc.strength	= 8;
1993  		chip->ecc.hwctl		= omap_enable_hwecc_bch;
1994  		chip->ecc.correct	= rawnand_sw_bch_correct;
1995  		chip->ecc.calculate	= omap_calculate_ecc_bch_sw;
1996  		mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
1997  		/* Reserve one byte for the OMAP marker */
1998  		oobbytes_per_step	= chip->ecc.bytes + 1;
1999  		/* Software BCH library is used for locating errors */
2000  		err = rawnand_sw_bch_init(chip);
2001  		if (err) {
2002  			dev_err(dev, "unable to use BCH library\n");
2003  			return err;
2004  		}
2005  		break;
2006  
2007  	case OMAP_ECC_BCH8_CODE_HW:
2008  		pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
2009  		chip->ecc.engine_type	= NAND_ECC_ENGINE_TYPE_ON_HOST;
2010  		chip->ecc.size		= 512;
2011  		/* 14th bit is kept reserved for ROM-code compatibility */
2012  		chip->ecc.bytes		= 13 + 1;
2013  		chip->ecc.strength	= 8;
2014  		chip->ecc.hwctl		= omap_enable_hwecc_bch;
2015  		chip->ecc.correct	= omap_elm_correct_data;
2016  		chip->ecc.read_page	= omap_read_page_bch;
2017  		chip->ecc.write_page	= omap_write_page_bch;
2018  		chip->ecc.write_subpage	= omap_write_subpage_bch;
2019  		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
2020  		oobbytes_per_step	= chip->ecc.bytes;
2021  		elm_bch_strength = BCH8_ECC;
2022  		break;
2023  
2024  	case OMAP_ECC_BCH16_CODE_HW:
2025  		pr_info("Using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
2026  		chip->ecc.engine_type	= NAND_ECC_ENGINE_TYPE_ON_HOST;
2027  		chip->ecc.size		= 512;
2028  		chip->ecc.bytes		= 26;
2029  		chip->ecc.strength	= 16;
2030  		chip->ecc.hwctl		= omap_enable_hwecc_bch;
2031  		chip->ecc.correct	= omap_elm_correct_data;
2032  		chip->ecc.read_page	= omap_read_page_bch;
2033  		chip->ecc.write_page	= omap_write_page_bch;
2034  		chip->ecc.write_subpage	= omap_write_subpage_bch;
2035  		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
2036  		oobbytes_per_step	= chip->ecc.bytes;
2037  		elm_bch_strength = BCH16_ECC;
2038  		break;
2039  	default:
2040  		dev_err(dev, "Invalid or unsupported ECC scheme\n");
2041  		return -EINVAL;
2042  	}
2043  
2044  	if (elm_bch_strength >= 0) {
2045  		chip->ecc.steps = mtd->writesize / chip->ecc.size;
2046  		info->neccpg = chip->ecc.steps / ERROR_VECTOR_MAX;
2047  		if (info->neccpg) {
2048  			info->nsteps_per_eccpg = ERROR_VECTOR_MAX;
2049  		} else {
2050  			info->neccpg = 1;
2051  			info->nsteps_per_eccpg = chip->ecc.steps;
2052  		}
2053  		info->eccpg_size = info->nsteps_per_eccpg * chip->ecc.size;
2054  		info->eccpg_bytes = info->nsteps_per_eccpg * chip->ecc.bytes;
2055  
2056  		err = elm_config(info->elm_dev, elm_bch_strength,
2057  				 info->nsteps_per_eccpg, chip->ecc.size,
2058  				 chip->ecc.bytes);
2059  		if (err < 0)
2060  			return err;
2061  	}
2062  
2063  	/* Check if NAND device's OOB is enough to store ECC signatures */
2064  	min_oobbytes += (oobbytes_per_step *
2065  			 (mtd->writesize / chip->ecc.size));
2066  	if (mtd->oobsize < min_oobbytes) {
2067  		dev_err(dev,
2068  			"Not enough OOB bytes: required = %d, available=%d\n",
2069  			min_oobbytes, mtd->oobsize);
2070  		return -EINVAL;
2071  	}
2072  
2073  	return 0;
2074  }
2075  
omap_nand_data_in(struct nand_chip * chip,void * buf,unsigned int len,bool force_8bit)2076  static void omap_nand_data_in(struct nand_chip *chip, void *buf,
2077  			      unsigned int len, bool force_8bit)
2078  {
2079  	struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
2080  	u32 alignment = ((uintptr_t)buf | len) & 3;
2081  
2082  	if (force_8bit || (alignment & 1))
2083  		ioread8_rep(info->fifo, buf, len);
2084  	else if (alignment & 3)
2085  		ioread16_rep(info->fifo, buf, len >> 1);
2086  	else
2087  		ioread32_rep(info->fifo, buf, len >> 2);
2088  }
2089  
omap_nand_data_out(struct nand_chip * chip,const void * buf,unsigned int len,bool force_8bit)2090  static void omap_nand_data_out(struct nand_chip *chip,
2091  			       const void *buf, unsigned int len,
2092  			       bool force_8bit)
2093  {
2094  	struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
2095  	u32 alignment = ((uintptr_t)buf | len) & 3;
2096  
2097  	if (force_8bit || (alignment & 1))
2098  		iowrite8_rep(info->fifo, buf, len);
2099  	else if (alignment & 3)
2100  		iowrite16_rep(info->fifo, buf, len >> 1);
2101  	else
2102  		iowrite32_rep(info->fifo, buf, len >> 2);
2103  }
2104  
omap_nand_exec_instr(struct nand_chip * chip,const struct nand_op_instr * instr)2105  static int omap_nand_exec_instr(struct nand_chip *chip,
2106  				const struct nand_op_instr *instr)
2107  {
2108  	struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
2109  	unsigned int i;
2110  	int ret;
2111  
2112  	switch (instr->type) {
2113  	case NAND_OP_CMD_INSTR:
2114  		iowrite8(instr->ctx.cmd.opcode,
2115  			 info->reg.gpmc_nand_command);
2116  		break;
2117  
2118  	case NAND_OP_ADDR_INSTR:
2119  		for (i = 0; i < instr->ctx.addr.naddrs; i++) {
2120  			iowrite8(instr->ctx.addr.addrs[i],
2121  				 info->reg.gpmc_nand_address);
2122  		}
2123  		break;
2124  
2125  	case NAND_OP_DATA_IN_INSTR:
2126  		info->data_in(chip, instr->ctx.data.buf.in,
2127  			      instr->ctx.data.len,
2128  			      instr->ctx.data.force_8bit);
2129  		break;
2130  
2131  	case NAND_OP_DATA_OUT_INSTR:
2132  		info->data_out(chip, instr->ctx.data.buf.out,
2133  			       instr->ctx.data.len,
2134  			       instr->ctx.data.force_8bit);
2135  		break;
2136  
2137  	case NAND_OP_WAITRDY_INSTR:
2138  		ret = info->ready_gpiod ?
2139  			nand_gpio_waitrdy(chip, info->ready_gpiod, instr->ctx.waitrdy.timeout_ms) :
2140  			nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
2141  		if (ret)
2142  			return ret;
2143  		break;
2144  	}
2145  
2146  	if (instr->delay_ns)
2147  		ndelay(instr->delay_ns);
2148  
2149  	return 0;
2150  }
2151  
omap_nand_exec_op(struct nand_chip * chip,const struct nand_operation * op,bool check_only)2152  static int omap_nand_exec_op(struct nand_chip *chip,
2153  			     const struct nand_operation *op,
2154  			     bool check_only)
2155  {
2156  	unsigned int i;
2157  
2158  	if (check_only)
2159  		return 0;
2160  
2161  	for (i = 0; i < op->ninstrs; i++) {
2162  		int ret;
2163  
2164  		ret = omap_nand_exec_instr(chip, &op->instrs[i]);
2165  		if (ret)
2166  			return ret;
2167  	}
2168  
2169  	return 0;
2170  }
2171  
2172  static const struct nand_controller_ops omap_nand_controller_ops = {
2173  	.attach_chip = omap_nand_attach_chip,
2174  	.exec_op = omap_nand_exec_op,
2175  };
2176  
2177  /* Shared among all NAND instances to synchronize access to the ECC Engine */
2178  static struct nand_controller omap_gpmc_controller;
2179  static bool omap_gpmc_controller_initialized;
2180  
omap_nand_probe(struct platform_device * pdev)2181  static int omap_nand_probe(struct platform_device *pdev)
2182  {
2183  	struct omap_nand_info		*info;
2184  	struct mtd_info			*mtd;
2185  	struct nand_chip		*nand_chip;
2186  	int				err;
2187  	struct resource			*res;
2188  	struct device			*dev = &pdev->dev;
2189  	void __iomem *vaddr;
2190  
2191  	info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
2192  				GFP_KERNEL);
2193  	if (!info)
2194  		return -ENOMEM;
2195  
2196  	info->pdev = pdev;
2197  
2198  	err = omap_get_dt_info(dev, info);
2199  	if (err)
2200  		return err;
2201  
2202  	info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
2203  	if (!info->ops) {
2204  		dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
2205  		return -ENODEV;
2206  	}
2207  
2208  	nand_chip		= &info->nand;
2209  	mtd			= nand_to_mtd(nand_chip);
2210  	mtd->dev.parent		= &pdev->dev;
2211  	nand_set_flash_node(nand_chip, dev->of_node);
2212  
2213  	if (!mtd->name) {
2214  		mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
2215  					   "omap2-nand.%d", info->gpmc_cs);
2216  		if (!mtd->name) {
2217  			dev_err(&pdev->dev, "Failed to set MTD name\n");
2218  			return -ENOMEM;
2219  		}
2220  	}
2221  
2222  	vaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2223  	if (IS_ERR(vaddr))
2224  		return PTR_ERR(vaddr);
2225  
2226  	info->fifo = vaddr;
2227  	info->phys_base = res->start;
2228  
2229  	if (!omap_gpmc_controller_initialized) {
2230  		omap_gpmc_controller.ops = &omap_nand_controller_ops;
2231  		nand_controller_init(&omap_gpmc_controller);
2232  		omap_gpmc_controller_initialized = true;
2233  	}
2234  
2235  	nand_chip->controller = &omap_gpmc_controller;
2236  
2237  	info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
2238  						    GPIOD_IN);
2239  	if (IS_ERR(info->ready_gpiod)) {
2240  		dev_err(dev, "failed to get ready gpio\n");
2241  		return PTR_ERR(info->ready_gpiod);
2242  	}
2243  
2244  	if (info->flash_bbt)
2245  		nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
2246  
2247  	/* default operations */
2248  	info->data_in = omap_nand_data_in;
2249  	info->data_out = omap_nand_data_out;
2250  
2251  	err = nand_scan(nand_chip, 1);
2252  	if (err)
2253  		goto return_error;
2254  
2255  	err = mtd_device_register(mtd, NULL, 0);
2256  	if (err)
2257  		goto cleanup_nand;
2258  
2259  	platform_set_drvdata(pdev, mtd);
2260  
2261  	return 0;
2262  
2263  cleanup_nand:
2264  	nand_cleanup(nand_chip);
2265  
2266  return_error:
2267  	if (!IS_ERR_OR_NULL(info->dma))
2268  		dma_release_channel(info->dma);
2269  
2270  	rawnand_sw_bch_cleanup(nand_chip);
2271  
2272  	return err;
2273  }
2274  
omap_nand_remove(struct platform_device * pdev)2275  static void omap_nand_remove(struct platform_device *pdev)
2276  {
2277  	struct mtd_info *mtd = platform_get_drvdata(pdev);
2278  	struct nand_chip *nand_chip = mtd_to_nand(mtd);
2279  	struct omap_nand_info *info = mtd_to_omap(mtd);
2280  
2281  	rawnand_sw_bch_cleanup(nand_chip);
2282  
2283  	if (info->dma)
2284  		dma_release_channel(info->dma);
2285  	WARN_ON(mtd_device_unregister(mtd));
2286  	nand_cleanup(nand_chip);
2287  }
2288  
2289  /* omap_nand_ids defined in linux/platform_data/mtd-nand-omap2.h */
2290  MODULE_DEVICE_TABLE(of, omap_nand_ids);
2291  
2292  static struct platform_driver omap_nand_driver = {
2293  	.probe		= omap_nand_probe,
2294  	.remove_new	= omap_nand_remove,
2295  	.driver		= {
2296  		.name	= DRIVER_NAME,
2297  		.of_match_table = omap_nand_ids,
2298  	},
2299  };
2300  
2301  module_platform_driver(omap_nand_driver);
2302  
2303  MODULE_ALIAS("platform:" DRIVER_NAME);
2304  MODULE_LICENSE("GPL");
2305  MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
2306