10b1039f0SRamuthevar Vadivel Murugan // SPDX-License-Identifier: GPL-2.0+
20b1039f0SRamuthevar Vadivel Murugan /* Copyright (c) 2020 Intel Corporation. */
30b1039f0SRamuthevar Vadivel Murugan 
40b1039f0SRamuthevar Vadivel Murugan #include <linux/clk.h>
50b1039f0SRamuthevar Vadivel Murugan #include <linux/completion.h>
60b1039f0SRamuthevar Vadivel Murugan #include <linux/dmaengine.h>
70b1039f0SRamuthevar Vadivel Murugan #include <linux/dma-direction.h>
80b1039f0SRamuthevar Vadivel Murugan #include <linux/dma-mapping.h>
90b1039f0SRamuthevar Vadivel Murugan #include <linux/err.h>
100b1039f0SRamuthevar Vadivel Murugan #include <linux/init.h>
110b1039f0SRamuthevar Vadivel Murugan #include <linux/iopoll.h>
120b1039f0SRamuthevar Vadivel Murugan #include <linux/kernel.h>
130b1039f0SRamuthevar Vadivel Murugan #include <linux/module.h>
140b1039f0SRamuthevar Vadivel Murugan 
150b1039f0SRamuthevar Vadivel Murugan #include <linux/mtd/mtd.h>
160b1039f0SRamuthevar Vadivel Murugan #include <linux/mtd/rawnand.h>
170b1039f0SRamuthevar Vadivel Murugan #include <linux/mtd/nand.h>
180b1039f0SRamuthevar Vadivel Murugan 
19bfc618fcSMartin Blumenstingl #include <linux/of.h>
200b1039f0SRamuthevar Vadivel Murugan #include <linux/platform_device.h>
210b1039f0SRamuthevar Vadivel Murugan #include <linux/sched.h>
220b1039f0SRamuthevar Vadivel Murugan #include <linux/slab.h>
230b1039f0SRamuthevar Vadivel Murugan #include <linux/types.h>
249ef347c3SDaniel Lezcano #include <linux/units.h>
250b1039f0SRamuthevar Vadivel Murugan #include <asm/unaligned.h>
260b1039f0SRamuthevar Vadivel Murugan 
270b1039f0SRamuthevar Vadivel Murugan #define EBU_CLC			0x000
280b1039f0SRamuthevar Vadivel Murugan #define EBU_CLC_RST		0x00000000u
290b1039f0SRamuthevar Vadivel Murugan 
300b1039f0SRamuthevar Vadivel Murugan #define EBU_ADDR_SEL(n)		(0x020 + (n) * 4)
310b1039f0SRamuthevar Vadivel Murugan /* 5 bits 26:22 included for comparison in the ADDR_SELx */
320b1039f0SRamuthevar Vadivel Murugan #define EBU_ADDR_MASK(x)	((x) << 4)
330b1039f0SRamuthevar Vadivel Murugan #define EBU_ADDR_SEL_REGEN	0x1
340b1039f0SRamuthevar Vadivel Murugan 
350b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON(n)		(0x060 + (n) * 4)
360b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON_CMULT_V4	0x1
370b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON_RECOVC(n)	((n) << 2)
380b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON_HOLDC(n)	((n) << 4)
390b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON_WAITRDC(n)	((n) << 6)
400b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON_WAITWRC(n)	((n) << 8)
410b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON_BCGEN_CS	0x0
420b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON_SETUP_EN	BIT(22)
430b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON_ALEC		0xC000
440b1039f0SRamuthevar Vadivel Murugan 
450b1039f0SRamuthevar Vadivel Murugan #define EBU_CON			0x0B0
460b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_NANDM_EN	BIT(0)
470b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_NANDM_DIS	0x0
480b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_CSMUX_E_EN	BIT(1)
490b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_ALE_P_LOW	BIT(2)
500b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_CLE_P_LOW	BIT(3)
510b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_CS_P_LOW	BIT(4)
520b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_SE_P_LOW	BIT(5)
530b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_WP_P_LOW	BIT(6)
540b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_PRE_P_LOW	BIT(7)
550b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_IN_CS_S(n)	((n) << 8)
560b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_OUT_CS_S(n)	((n) << 10)
570b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_LAT_EN_CS_P	((0x3D) << 18)
580b1039f0SRamuthevar Vadivel Murugan 
590b1039f0SRamuthevar Vadivel Murugan #define EBU_WAIT		0x0B4
600b1039f0SRamuthevar Vadivel Murugan #define EBU_WAIT_RDBY		BIT(0)
610b1039f0SRamuthevar Vadivel Murugan #define EBU_WAIT_WR_C		BIT(3)
620b1039f0SRamuthevar Vadivel Murugan 
630b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL1		0x110
640b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL1_ADDR_SHIFT	24
650b1039f0SRamuthevar Vadivel Murugan 
660b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL2		0x114
670b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL2_ADDR_SHIFT	8
680b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL2_CYC_N_V5	(0x2 << 16)
690b1039f0SRamuthevar Vadivel Murugan 
700b1039f0SRamuthevar Vadivel Murugan #define HSNAND_INT_MSK_CTL	0x124
710b1039f0SRamuthevar Vadivel Murugan #define HSNAND_INT_MSK_CTL_WR_C	BIT(4)
720b1039f0SRamuthevar Vadivel Murugan 
730b1039f0SRamuthevar Vadivel Murugan #define HSNAND_INT_STA		0x128
740b1039f0SRamuthevar Vadivel Murugan #define HSNAND_INT_STA_WR_C	BIT(4)
750b1039f0SRamuthevar Vadivel Murugan 
760b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL		0x130
770b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL_ENABLE_ECC	BIT(0)
780b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL_GO		BIT(2)
790b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL_CE_SEL_CS(n)	BIT(3 + (n))
800b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL_RW_READ	0x0
810b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL_RW_WRITE	BIT(10)
820b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL_ECC_OFF_V8TH	BIT(11)
830b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL_CKFF_EN	0x0
840b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL_MSG_EN	BIT(17)
850b1039f0SRamuthevar Vadivel Murugan 
860b1039f0SRamuthevar Vadivel Murugan #define HSNAND_PARA0		0x13c
870b1039f0SRamuthevar Vadivel Murugan #define HSNAND_PARA0_PAGE_V8192	0x3
880b1039f0SRamuthevar Vadivel Murugan #define HSNAND_PARA0_PIB_V256	(0x3 << 4)
890b1039f0SRamuthevar Vadivel Murugan #define HSNAND_PARA0_BYP_EN_NP	0x0
900b1039f0SRamuthevar Vadivel Murugan #define HSNAND_PARA0_BYP_DEC_NP	0x0
910b1039f0SRamuthevar Vadivel Murugan #define HSNAND_PARA0_TYPE_ONFI	BIT(18)
920b1039f0SRamuthevar Vadivel Murugan #define HSNAND_PARA0_ADEP_EN	BIT(21)
930b1039f0SRamuthevar Vadivel Murugan 
940b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CMSG_0		0x150
950b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CMSG_1		0x154
960b1039f0SRamuthevar Vadivel Murugan 
970b1039f0SRamuthevar Vadivel Murugan #define HSNAND_ALE_OFFS		BIT(2)
980b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CLE_OFFS		BIT(3)
990b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CS_OFFS		BIT(4)
1000b1039f0SRamuthevar Vadivel Murugan 
1010b1039f0SRamuthevar Vadivel Murugan #define HSNAND_ECC_OFFSET	0x008
1020b1039f0SRamuthevar Vadivel Murugan 
1030b1039f0SRamuthevar Vadivel Murugan #define MAX_CS	2
1040b1039f0SRamuthevar Vadivel Murugan 
1050b1039f0SRamuthevar Vadivel Murugan #define USEC_PER_SEC	1000000L
1060b1039f0SRamuthevar Vadivel Murugan 
1070b1039f0SRamuthevar Vadivel Murugan struct ebu_nand_cs {
1080b1039f0SRamuthevar Vadivel Murugan 	void __iomem *chipaddr;
1090b1039f0SRamuthevar Vadivel Murugan 	u32 addr_sel;
1100b1039f0SRamuthevar Vadivel Murugan };
1110b1039f0SRamuthevar Vadivel Murugan 
1120b1039f0SRamuthevar Vadivel Murugan struct ebu_nand_controller {
1130b1039f0SRamuthevar Vadivel Murugan 	struct nand_controller controller;
1140b1039f0SRamuthevar Vadivel Murugan 	struct nand_chip chip;
1150b1039f0SRamuthevar Vadivel Murugan 	struct device *dev;
1160b1039f0SRamuthevar Vadivel Murugan 	void __iomem *ebu;
1170b1039f0SRamuthevar Vadivel Murugan 	void __iomem *hsnand;
1180b1039f0SRamuthevar Vadivel Murugan 	struct dma_chan *dma_tx;
1190b1039f0SRamuthevar Vadivel Murugan 	struct dma_chan *dma_rx;
1200b1039f0SRamuthevar Vadivel Murugan 	struct completion dma_access_complete;
1210b1039f0SRamuthevar Vadivel Murugan 	struct clk *clk;
1220b1039f0SRamuthevar Vadivel Murugan 	u32 nd_para0;
1230b1039f0SRamuthevar Vadivel Murugan 	u8 cs_num;
1240b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_cs cs[MAX_CS];
1250b1039f0SRamuthevar Vadivel Murugan };
1260b1039f0SRamuthevar Vadivel Murugan 
nand_to_ebu(struct nand_chip * chip)1270b1039f0SRamuthevar Vadivel Murugan static inline struct ebu_nand_controller *nand_to_ebu(struct nand_chip *chip)
1280b1039f0SRamuthevar Vadivel Murugan {
1290b1039f0SRamuthevar Vadivel Murugan 	return container_of(chip, struct ebu_nand_controller, chip);
1300b1039f0SRamuthevar Vadivel Murugan }
1310b1039f0SRamuthevar Vadivel Murugan 
ebu_nand_waitrdy(struct nand_chip * chip,int timeout_ms)1320b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_waitrdy(struct nand_chip *chip, int timeout_ms)
1330b1039f0SRamuthevar Vadivel Murugan {
1340b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ctrl = nand_to_ebu(chip);
1350b1039f0SRamuthevar Vadivel Murugan 	u32 status;
1360b1039f0SRamuthevar Vadivel Murugan 
1370b1039f0SRamuthevar Vadivel Murugan 	return readl_poll_timeout(ctrl->ebu + EBU_WAIT, status,
1380b1039f0SRamuthevar Vadivel Murugan 				  (status & EBU_WAIT_RDBY) ||
1390b1039f0SRamuthevar Vadivel Murugan 				  (status & EBU_WAIT_WR_C), 20, timeout_ms);
1400b1039f0SRamuthevar Vadivel Murugan }
1410b1039f0SRamuthevar Vadivel Murugan 
ebu_nand_readb(struct nand_chip * chip)1420b1039f0SRamuthevar Vadivel Murugan static u8 ebu_nand_readb(struct nand_chip *chip)
1430b1039f0SRamuthevar Vadivel Murugan {
1440b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
1450b1039f0SRamuthevar Vadivel Murugan 	u8 cs_num = ebu_host->cs_num;
1460b1039f0SRamuthevar Vadivel Murugan 	u8 val;
1470b1039f0SRamuthevar Vadivel Murugan 
1480b1039f0SRamuthevar Vadivel Murugan 	val = readb(ebu_host->cs[cs_num].chipaddr + HSNAND_CS_OFFS);
1490b1039f0SRamuthevar Vadivel Murugan 	ebu_nand_waitrdy(chip, 1000);
1500b1039f0SRamuthevar Vadivel Murugan 	return val;
1510b1039f0SRamuthevar Vadivel Murugan }
1520b1039f0SRamuthevar Vadivel Murugan 
ebu_nand_writeb(struct nand_chip * chip,u32 offset,u8 value)1530b1039f0SRamuthevar Vadivel Murugan static void ebu_nand_writeb(struct nand_chip *chip, u32 offset, u8 value)
1540b1039f0SRamuthevar Vadivel Murugan {
1550b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
1560b1039f0SRamuthevar Vadivel Murugan 	u8 cs_num = ebu_host->cs_num;
1570b1039f0SRamuthevar Vadivel Murugan 
1580b1039f0SRamuthevar Vadivel Murugan 	writeb(value, ebu_host->cs[cs_num].chipaddr + offset);
1590b1039f0SRamuthevar Vadivel Murugan 	ebu_nand_waitrdy(chip, 1000);
1600b1039f0SRamuthevar Vadivel Murugan }
1610b1039f0SRamuthevar Vadivel Murugan 
ebu_read_buf(struct nand_chip * chip,u_char * buf,unsigned int len)1620b1039f0SRamuthevar Vadivel Murugan static void ebu_read_buf(struct nand_chip *chip, u_char *buf, unsigned int len)
1630b1039f0SRamuthevar Vadivel Murugan {
1640b1039f0SRamuthevar Vadivel Murugan 	int i;
1650b1039f0SRamuthevar Vadivel Murugan 
1660b1039f0SRamuthevar Vadivel Murugan 	for (i = 0; i < len; i++)
1670b1039f0SRamuthevar Vadivel Murugan 		buf[i] = ebu_nand_readb(chip);
1680b1039f0SRamuthevar Vadivel Murugan }
1690b1039f0SRamuthevar Vadivel Murugan 
ebu_write_buf(struct nand_chip * chip,const u_char * buf,int len)1700b1039f0SRamuthevar Vadivel Murugan static void ebu_write_buf(struct nand_chip *chip, const u_char *buf, int len)
1710b1039f0SRamuthevar Vadivel Murugan {
1720b1039f0SRamuthevar Vadivel Murugan 	int i;
1730b1039f0SRamuthevar Vadivel Murugan 
1740b1039f0SRamuthevar Vadivel Murugan 	for (i = 0; i < len; i++)
1750b1039f0SRamuthevar Vadivel Murugan 		ebu_nand_writeb(chip, HSNAND_CS_OFFS, buf[i]);
1760b1039f0SRamuthevar Vadivel Murugan }
1770b1039f0SRamuthevar Vadivel Murugan 
ebu_nand_disable(struct nand_chip * chip)1780b1039f0SRamuthevar Vadivel Murugan static void ebu_nand_disable(struct nand_chip *chip)
1790b1039f0SRamuthevar Vadivel Murugan {
1800b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
1810b1039f0SRamuthevar Vadivel Murugan 
1820b1039f0SRamuthevar Vadivel Murugan 	writel(0, ebu_host->ebu + EBU_CON);
1830b1039f0SRamuthevar Vadivel Murugan }
1840b1039f0SRamuthevar Vadivel Murugan 
ebu_select_chip(struct nand_chip * chip)1850b1039f0SRamuthevar Vadivel Murugan static void ebu_select_chip(struct nand_chip *chip)
1860b1039f0SRamuthevar Vadivel Murugan {
1870b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
1880b1039f0SRamuthevar Vadivel Murugan 	void __iomem *nand_con = ebu_host->ebu + EBU_CON;
1890b1039f0SRamuthevar Vadivel Murugan 	u32 cs = ebu_host->cs_num;
1900b1039f0SRamuthevar Vadivel Murugan 
1910b1039f0SRamuthevar Vadivel Murugan 	writel(EBU_CON_NANDM_EN | EBU_CON_CSMUX_E_EN | EBU_CON_CS_P_LOW |
1920b1039f0SRamuthevar Vadivel Murugan 	       EBU_CON_SE_P_LOW | EBU_CON_WP_P_LOW | EBU_CON_PRE_P_LOW |
1930b1039f0SRamuthevar Vadivel Murugan 	       EBU_CON_IN_CS_S(cs) | EBU_CON_OUT_CS_S(cs) |
1940b1039f0SRamuthevar Vadivel Murugan 	       EBU_CON_LAT_EN_CS_P, nand_con);
1950b1039f0SRamuthevar Vadivel Murugan }
1960b1039f0SRamuthevar Vadivel Murugan 
ebu_nand_set_timings(struct nand_chip * chip,int csline,const struct nand_interface_config * conf)1970b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_set_timings(struct nand_chip *chip, int csline,
1980b1039f0SRamuthevar Vadivel Murugan 				const struct nand_interface_config *conf)
1990b1039f0SRamuthevar Vadivel Murugan {
2000b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ctrl = nand_to_ebu(chip);
2010b1039f0SRamuthevar Vadivel Murugan 	unsigned int rate = clk_get_rate(ctrl->clk) / HZ_PER_MHZ;
2020b1039f0SRamuthevar Vadivel Murugan 	unsigned int period = DIV_ROUND_UP(USEC_PER_SEC, rate);
2030b1039f0SRamuthevar Vadivel Murugan 	const struct nand_sdr_timings *timings;
2040b1039f0SRamuthevar Vadivel Murugan 	u32 trecov, thold, twrwait, trdwait;
2050b1039f0SRamuthevar Vadivel Murugan 	u32 reg = 0;
2060b1039f0SRamuthevar Vadivel Murugan 
2070b1039f0SRamuthevar Vadivel Murugan 	timings = nand_get_sdr_timings(conf);
2080b1039f0SRamuthevar Vadivel Murugan 	if (IS_ERR(timings))
2090b1039f0SRamuthevar Vadivel Murugan 		return PTR_ERR(timings);
2100b1039f0SRamuthevar Vadivel Murugan 
2110b1039f0SRamuthevar Vadivel Murugan 	if (csline == NAND_DATA_IFACE_CHECK_ONLY)
2120b1039f0SRamuthevar Vadivel Murugan 		return 0;
2130b1039f0SRamuthevar Vadivel Murugan 
2140b1039f0SRamuthevar Vadivel Murugan 	trecov = DIV_ROUND_UP(max(timings->tREA_max, timings->tREH_min),
2150b1039f0SRamuthevar Vadivel Murugan 			      period);
2160b1039f0SRamuthevar Vadivel Murugan 	reg |= EBU_BUSCON_RECOVC(trecov);
2170b1039f0SRamuthevar Vadivel Murugan 
2180b1039f0SRamuthevar Vadivel Murugan 	thold = DIV_ROUND_UP(max(timings->tDH_min, timings->tDS_min), period);
2190b1039f0SRamuthevar Vadivel Murugan 	reg |= EBU_BUSCON_HOLDC(thold);
2200b1039f0SRamuthevar Vadivel Murugan 
2210b1039f0SRamuthevar Vadivel Murugan 	trdwait = DIV_ROUND_UP(max(timings->tRC_min, timings->tREH_min),
2220b1039f0SRamuthevar Vadivel Murugan 			       period);
2230b1039f0SRamuthevar Vadivel Murugan 	reg |= EBU_BUSCON_WAITRDC(trdwait);
2240b1039f0SRamuthevar Vadivel Murugan 
2250b1039f0SRamuthevar Vadivel Murugan 	twrwait = DIV_ROUND_UP(max(timings->tWC_min, timings->tWH_min), period);
2260b1039f0SRamuthevar Vadivel Murugan 	reg |= EBU_BUSCON_WAITWRC(twrwait);
2270b1039f0SRamuthevar Vadivel Murugan 
2280b1039f0SRamuthevar Vadivel Murugan 	reg |= EBU_BUSCON_CMULT_V4 | EBU_BUSCON_BCGEN_CS | EBU_BUSCON_ALEC |
2290b1039f0SRamuthevar Vadivel Murugan 		EBU_BUSCON_SETUP_EN;
2300b1039f0SRamuthevar Vadivel Murugan 
2310b1039f0SRamuthevar Vadivel Murugan 	writel(reg, ctrl->ebu + EBU_BUSCON(ctrl->cs_num));
2320b1039f0SRamuthevar Vadivel Murugan 
2330b1039f0SRamuthevar Vadivel Murugan 	return 0;
2340b1039f0SRamuthevar Vadivel Murugan }
2350b1039f0SRamuthevar Vadivel Murugan 
ebu_nand_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)2360b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2370b1039f0SRamuthevar Vadivel Murugan 				  struct mtd_oob_region *oobregion)
2380b1039f0SRamuthevar Vadivel Murugan {
2390b1039f0SRamuthevar Vadivel Murugan 	struct nand_chip *chip = mtd_to_nand(mtd);
2400b1039f0SRamuthevar Vadivel Murugan 
2410b1039f0SRamuthevar Vadivel Murugan 	if (section)
2420b1039f0SRamuthevar Vadivel Murugan 		return -ERANGE;
2430b1039f0SRamuthevar Vadivel Murugan 
2440b1039f0SRamuthevar Vadivel Murugan 	oobregion->offset = HSNAND_ECC_OFFSET;
2450b1039f0SRamuthevar Vadivel Murugan 	oobregion->length = chip->ecc.total;
2460b1039f0SRamuthevar Vadivel Murugan 
2470b1039f0SRamuthevar Vadivel Murugan 	return 0;
2480b1039f0SRamuthevar Vadivel Murugan }
2490b1039f0SRamuthevar Vadivel Murugan 
ebu_nand_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)2500b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_ooblayout_free(struct mtd_info *mtd, int section,
2510b1039f0SRamuthevar Vadivel Murugan 				   struct mtd_oob_region *oobregion)
2520b1039f0SRamuthevar Vadivel Murugan {
2530b1039f0SRamuthevar Vadivel Murugan 	struct nand_chip *chip = mtd_to_nand(mtd);
2540b1039f0SRamuthevar Vadivel Murugan 
2550b1039f0SRamuthevar Vadivel Murugan 	if (section)
2560b1039f0SRamuthevar Vadivel Murugan 		return -ERANGE;
2570b1039f0SRamuthevar Vadivel Murugan 
2580b1039f0SRamuthevar Vadivel Murugan 	oobregion->offset = chip->ecc.total + HSNAND_ECC_OFFSET;
2590b1039f0SRamuthevar Vadivel Murugan 	oobregion->length = mtd->oobsize - oobregion->offset;
2600b1039f0SRamuthevar Vadivel Murugan 
2610b1039f0SRamuthevar Vadivel Murugan 	return 0;
2620b1039f0SRamuthevar Vadivel Murugan }
2630b1039f0SRamuthevar Vadivel Murugan 
2640b1039f0SRamuthevar Vadivel Murugan static const struct mtd_ooblayout_ops ebu_nand_ooblayout_ops = {
2650b1039f0SRamuthevar Vadivel Murugan 	.ecc = ebu_nand_ooblayout_ecc,
2660b1039f0SRamuthevar Vadivel Murugan 	.free = ebu_nand_ooblayout_free,
2670b1039f0SRamuthevar Vadivel Murugan };
2680b1039f0SRamuthevar Vadivel Murugan 
ebu_dma_rx_callback(void * cookie)2690b1039f0SRamuthevar Vadivel Murugan static void ebu_dma_rx_callback(void *cookie)
2700b1039f0SRamuthevar Vadivel Murugan {
2710b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = cookie;
2720b1039f0SRamuthevar Vadivel Murugan 
2730b1039f0SRamuthevar Vadivel Murugan 	dmaengine_terminate_async(ebu_host->dma_rx);
2740b1039f0SRamuthevar Vadivel Murugan 
2750b1039f0SRamuthevar Vadivel Murugan 	complete(&ebu_host->dma_access_complete);
2760b1039f0SRamuthevar Vadivel Murugan }
2770b1039f0SRamuthevar Vadivel Murugan 
ebu_dma_tx_callback(void * cookie)2780b1039f0SRamuthevar Vadivel Murugan static void ebu_dma_tx_callback(void *cookie)
2790b1039f0SRamuthevar Vadivel Murugan {
2800b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = cookie;
2810b1039f0SRamuthevar Vadivel Murugan 
2820b1039f0SRamuthevar Vadivel Murugan 	dmaengine_terminate_async(ebu_host->dma_tx);
2830b1039f0SRamuthevar Vadivel Murugan 
2840b1039f0SRamuthevar Vadivel Murugan 	complete(&ebu_host->dma_access_complete);
2850b1039f0SRamuthevar Vadivel Murugan }
2860b1039f0SRamuthevar Vadivel Murugan 
ebu_dma_start(struct ebu_nand_controller * ebu_host,u32 dir,const u8 * buf,u32 len)2870b1039f0SRamuthevar Vadivel Murugan static int ebu_dma_start(struct ebu_nand_controller *ebu_host, u32 dir,
2880b1039f0SRamuthevar Vadivel Murugan 			 const u8 *buf, u32 len)
2890b1039f0SRamuthevar Vadivel Murugan {
2900b1039f0SRamuthevar Vadivel Murugan 	struct dma_async_tx_descriptor *tx;
2910b1039f0SRamuthevar Vadivel Murugan 	struct completion *dma_completion;
2920b1039f0SRamuthevar Vadivel Murugan 	dma_async_tx_callback callback;
2930b1039f0SRamuthevar Vadivel Murugan 	struct dma_chan *chan;
2940b1039f0SRamuthevar Vadivel Murugan 	dma_cookie_t cookie;
2950b1039f0SRamuthevar Vadivel Murugan 	unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
2960b1039f0SRamuthevar Vadivel Murugan 	dma_addr_t buf_dma;
2970b1039f0SRamuthevar Vadivel Murugan 	int ret;
2980b1039f0SRamuthevar Vadivel Murugan 	u32 timeout;
2990b1039f0SRamuthevar Vadivel Murugan 
3000b1039f0SRamuthevar Vadivel Murugan 	if (dir == DMA_DEV_TO_MEM) {
3010b1039f0SRamuthevar Vadivel Murugan 		chan = ebu_host->dma_rx;
3020b1039f0SRamuthevar Vadivel Murugan 		dma_completion = &ebu_host->dma_access_complete;
3030b1039f0SRamuthevar Vadivel Murugan 		callback = ebu_dma_rx_callback;
3040b1039f0SRamuthevar Vadivel Murugan 	} else {
3050b1039f0SRamuthevar Vadivel Murugan 		chan = ebu_host->dma_tx;
3060b1039f0SRamuthevar Vadivel Murugan 		dma_completion = &ebu_host->dma_access_complete;
3070b1039f0SRamuthevar Vadivel Murugan 		callback = ebu_dma_tx_callback;
3080b1039f0SRamuthevar Vadivel Murugan 	}
3090b1039f0SRamuthevar Vadivel Murugan 
3100b1039f0SRamuthevar Vadivel Murugan 	buf_dma = dma_map_single(chan->device->dev, (void *)buf, len, dir);
3110b1039f0SRamuthevar Vadivel Murugan 	if (dma_mapping_error(chan->device->dev, buf_dma)) {
3120b1039f0SRamuthevar Vadivel Murugan 		dev_err(ebu_host->dev, "Failed to map DMA buffer\n");
3130b1039f0SRamuthevar Vadivel Murugan 		ret = -EIO;
3140b1039f0SRamuthevar Vadivel Murugan 		goto err_unmap;
3150b1039f0SRamuthevar Vadivel Murugan 	}
3160b1039f0SRamuthevar Vadivel Murugan 
3170b1039f0SRamuthevar Vadivel Murugan 	tx = dmaengine_prep_slave_single(chan, buf_dma, len, dir, flags);
318073abfa7SChristophe JAILLET 	if (!tx) {
319073abfa7SChristophe JAILLET 		ret = -ENXIO;
320073abfa7SChristophe JAILLET 		goto err_unmap;
321073abfa7SChristophe JAILLET 	}
3220b1039f0SRamuthevar Vadivel Murugan 
3230b1039f0SRamuthevar Vadivel Murugan 	tx->callback = callback;
3240b1039f0SRamuthevar Vadivel Murugan 	tx->callback_param = ebu_host;
3250b1039f0SRamuthevar Vadivel Murugan 	cookie = tx->tx_submit(tx);
3260b1039f0SRamuthevar Vadivel Murugan 
3270b1039f0SRamuthevar Vadivel Murugan 	ret = dma_submit_error(cookie);
3280b1039f0SRamuthevar Vadivel Murugan 	if (ret) {
3290b1039f0SRamuthevar Vadivel Murugan 		dev_err(ebu_host->dev, "dma_submit_error %d\n", cookie);
3300b1039f0SRamuthevar Vadivel Murugan 		ret = -EIO;
3310b1039f0SRamuthevar Vadivel Murugan 		goto err_unmap;
3320b1039f0SRamuthevar Vadivel Murugan 	}
3330b1039f0SRamuthevar Vadivel Murugan 
3340b1039f0SRamuthevar Vadivel Murugan 	init_completion(dma_completion);
3350b1039f0SRamuthevar Vadivel Murugan 	dma_async_issue_pending(chan);
3360b1039f0SRamuthevar Vadivel Murugan 
3370b1039f0SRamuthevar Vadivel Murugan 	/* Wait DMA to finish the data transfer.*/
3380b1039f0SRamuthevar Vadivel Murugan 	timeout = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
3390b1039f0SRamuthevar Vadivel Murugan 	if (!timeout) {
3400b1039f0SRamuthevar Vadivel Murugan 		dev_err(ebu_host->dev, "I/O Error in DMA RX (status %d)\n",
3410b1039f0SRamuthevar Vadivel Murugan 			dmaengine_tx_status(chan, cookie, NULL));
3420b1039f0SRamuthevar Vadivel Murugan 		dmaengine_terminate_sync(chan);
3430b1039f0SRamuthevar Vadivel Murugan 		ret = -ETIMEDOUT;
3440b1039f0SRamuthevar Vadivel Murugan 		goto err_unmap;
3450b1039f0SRamuthevar Vadivel Murugan 	}
3460b1039f0SRamuthevar Vadivel Murugan 
3470b1039f0SRamuthevar Vadivel Murugan 	return 0;
3480b1039f0SRamuthevar Vadivel Murugan 
3490b1039f0SRamuthevar Vadivel Murugan err_unmap:
3500b1039f0SRamuthevar Vadivel Murugan 	dma_unmap_single(ebu_host->dev, buf_dma, len, dir);
3510b1039f0SRamuthevar Vadivel Murugan 
3520b1039f0SRamuthevar Vadivel Murugan 	return ret;
3530b1039f0SRamuthevar Vadivel Murugan }
3540b1039f0SRamuthevar Vadivel Murugan 
ebu_nand_trigger(struct ebu_nand_controller * ebu_host,int page,u32 cmd)3550b1039f0SRamuthevar Vadivel Murugan static void ebu_nand_trigger(struct ebu_nand_controller *ebu_host,
3560b1039f0SRamuthevar Vadivel Murugan 			     int page, u32 cmd)
3570b1039f0SRamuthevar Vadivel Murugan {
3580b1039f0SRamuthevar Vadivel Murugan 	unsigned int val;
3590b1039f0SRamuthevar Vadivel Murugan 
3600b1039f0SRamuthevar Vadivel Murugan 	val = cmd | (page & 0xFF) << HSNAND_CTL1_ADDR_SHIFT;
3610b1039f0SRamuthevar Vadivel Murugan 	writel(val, ebu_host->hsnand + HSNAND_CTL1);
3620b1039f0SRamuthevar Vadivel Murugan 	val = (page & 0xFFFF00) >> 8 | HSNAND_CTL2_CYC_N_V5;
3630b1039f0SRamuthevar Vadivel Murugan 	writel(val, ebu_host->hsnand + HSNAND_CTL2);
3640b1039f0SRamuthevar Vadivel Murugan 
3650b1039f0SRamuthevar Vadivel Murugan 	writel(ebu_host->nd_para0, ebu_host->hsnand + HSNAND_PARA0);
3660b1039f0SRamuthevar Vadivel Murugan 
3670b1039f0SRamuthevar Vadivel Murugan 	/* clear first, will update later */
3680b1039f0SRamuthevar Vadivel Murugan 	writel(0xFFFFFFFF, ebu_host->hsnand + HSNAND_CMSG_0);
3690b1039f0SRamuthevar Vadivel Murugan 	writel(0xFFFFFFFF, ebu_host->hsnand + HSNAND_CMSG_1);
3700b1039f0SRamuthevar Vadivel Murugan 
3710b1039f0SRamuthevar Vadivel Murugan 	writel(HSNAND_INT_MSK_CTL_WR_C,
3720b1039f0SRamuthevar Vadivel Murugan 	       ebu_host->hsnand + HSNAND_INT_MSK_CTL);
3730b1039f0SRamuthevar Vadivel Murugan 
3740b1039f0SRamuthevar Vadivel Murugan 	if (!cmd)
3750b1039f0SRamuthevar Vadivel Murugan 		val = HSNAND_CTL_RW_READ;
3760b1039f0SRamuthevar Vadivel Murugan 	else
3770b1039f0SRamuthevar Vadivel Murugan 		val = HSNAND_CTL_RW_WRITE;
3780b1039f0SRamuthevar Vadivel Murugan 
3790b1039f0SRamuthevar Vadivel Murugan 	writel(HSNAND_CTL_MSG_EN | HSNAND_CTL_CKFF_EN |
3800b1039f0SRamuthevar Vadivel Murugan 	       HSNAND_CTL_ECC_OFF_V8TH | HSNAND_CTL_CE_SEL_CS(ebu_host->cs_num) |
3810b1039f0SRamuthevar Vadivel Murugan 	       HSNAND_CTL_ENABLE_ECC | HSNAND_CTL_GO | val,
3820b1039f0SRamuthevar Vadivel Murugan 	       ebu_host->hsnand + HSNAND_CTL);
3830b1039f0SRamuthevar Vadivel Murugan }
3840b1039f0SRamuthevar Vadivel Murugan 
ebu_nand_read_page_hwecc(struct nand_chip * chip,u8 * buf,int oob_required,int page)3850b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
3860b1039f0SRamuthevar Vadivel Murugan 				    int oob_required, int page)
3870b1039f0SRamuthevar Vadivel Murugan {
3880b1039f0SRamuthevar Vadivel Murugan 	struct mtd_info *mtd = nand_to_mtd(chip);
3890b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
3900b1039f0SRamuthevar Vadivel Murugan 	int ret, reg_data;
3910b1039f0SRamuthevar Vadivel Murugan 
3920b1039f0SRamuthevar Vadivel Murugan 	ebu_nand_trigger(ebu_host, page, NAND_CMD_READ0);
3930b1039f0SRamuthevar Vadivel Murugan 
3940b1039f0SRamuthevar Vadivel Murugan 	ret = ebu_dma_start(ebu_host, DMA_DEV_TO_MEM, buf, mtd->writesize);
3950b1039f0SRamuthevar Vadivel Murugan 	if (ret)
3960b1039f0SRamuthevar Vadivel Murugan 		return ret;
3970b1039f0SRamuthevar Vadivel Murugan 
3980b1039f0SRamuthevar Vadivel Murugan 	if (oob_required)
3990b1039f0SRamuthevar Vadivel Murugan 		chip->ecc.read_oob(chip, page);
4000b1039f0SRamuthevar Vadivel Murugan 
4010b1039f0SRamuthevar Vadivel Murugan 	reg_data = readl(ebu_host->hsnand + HSNAND_CTL);
4020b1039f0SRamuthevar Vadivel Murugan 	reg_data &= ~HSNAND_CTL_GO;
4030b1039f0SRamuthevar Vadivel Murugan 	writel(reg_data, ebu_host->hsnand + HSNAND_CTL);
4040b1039f0SRamuthevar Vadivel Murugan 
4050b1039f0SRamuthevar Vadivel Murugan 	return 0;
4060b1039f0SRamuthevar Vadivel Murugan }
4070b1039f0SRamuthevar Vadivel Murugan 
ebu_nand_write_page_hwecc(struct nand_chip * chip,const u8 * buf,int oob_required,int page)4080b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
4090b1039f0SRamuthevar Vadivel Murugan 				     int oob_required, int page)
4100b1039f0SRamuthevar Vadivel Murugan {
4110b1039f0SRamuthevar Vadivel Murugan 	struct mtd_info *mtd = nand_to_mtd(chip);
4120b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
4130b1039f0SRamuthevar Vadivel Murugan 	void __iomem *int_sta = ebu_host->hsnand + HSNAND_INT_STA;
4140b1039f0SRamuthevar Vadivel Murugan 	int reg_data, ret, val;
4150b1039f0SRamuthevar Vadivel Murugan 	u32 reg;
4160b1039f0SRamuthevar Vadivel Murugan 
4170b1039f0SRamuthevar Vadivel Murugan 	ebu_nand_trigger(ebu_host, page, NAND_CMD_SEQIN);
4180b1039f0SRamuthevar Vadivel Murugan 
4190b1039f0SRamuthevar Vadivel Murugan 	ret = ebu_dma_start(ebu_host, DMA_MEM_TO_DEV, buf, mtd->writesize);
4200b1039f0SRamuthevar Vadivel Murugan 	if (ret)
4210b1039f0SRamuthevar Vadivel Murugan 		return ret;
4220b1039f0SRamuthevar Vadivel Murugan 
4230b1039f0SRamuthevar Vadivel Murugan 	if (oob_required) {
4240b1039f0SRamuthevar Vadivel Murugan 		reg = get_unaligned_le32(chip->oob_poi);
4250b1039f0SRamuthevar Vadivel Murugan 		writel(reg, ebu_host->hsnand + HSNAND_CMSG_0);
4260b1039f0SRamuthevar Vadivel Murugan 
4270b1039f0SRamuthevar Vadivel Murugan 		reg = get_unaligned_le32(chip->oob_poi + 4);
4280b1039f0SRamuthevar Vadivel Murugan 		writel(reg, ebu_host->hsnand + HSNAND_CMSG_1);
4290b1039f0SRamuthevar Vadivel Murugan 	}
4300b1039f0SRamuthevar Vadivel Murugan 
4310b1039f0SRamuthevar Vadivel Murugan 	ret = readl_poll_timeout_atomic(int_sta, val, !(val & HSNAND_INT_STA_WR_C),
4320b1039f0SRamuthevar Vadivel Murugan 					10, 1000);
4330b1039f0SRamuthevar Vadivel Murugan 	if (ret)
4340b1039f0SRamuthevar Vadivel Murugan 		return ret;
4350b1039f0SRamuthevar Vadivel Murugan 
4360b1039f0SRamuthevar Vadivel Murugan 	reg_data = readl(ebu_host->hsnand + HSNAND_CTL);
4370b1039f0SRamuthevar Vadivel Murugan 	reg_data &= ~HSNAND_CTL_GO;
4380b1039f0SRamuthevar Vadivel Murugan 	writel(reg_data, ebu_host->hsnand + HSNAND_CTL);
4390b1039f0SRamuthevar Vadivel Murugan 
4400b1039f0SRamuthevar Vadivel Murugan 	return 0;
4410b1039f0SRamuthevar Vadivel Murugan }
4420b1039f0SRamuthevar Vadivel Murugan 
4430b1039f0SRamuthevar Vadivel Murugan static const u8 ecc_strength[] = { 1, 1, 4, 8, 24, 32, 40, 60, };
4440b1039f0SRamuthevar Vadivel Murugan 
ebu_nand_attach_chip(struct nand_chip * chip)4450b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_attach_chip(struct nand_chip *chip)
4460b1039f0SRamuthevar Vadivel Murugan {
4470b1039f0SRamuthevar Vadivel Murugan 	struct mtd_info *mtd = nand_to_mtd(chip);
4480b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
4490b1039f0SRamuthevar Vadivel Murugan 	u32 ecc_steps, ecc_bytes, ecc_total, pagesize, pg_per_blk;
4500b1039f0SRamuthevar Vadivel Murugan 	u32 ecc_strength_ds = chip->ecc.strength;
4510b1039f0SRamuthevar Vadivel Murugan 	u32 ecc_size = chip->ecc.size;
4520b1039f0SRamuthevar Vadivel Murugan 	u32 writesize = mtd->writesize;
4530b1039f0SRamuthevar Vadivel Murugan 	u32 blocksize = mtd->erasesize;
4540b1039f0SRamuthevar Vadivel Murugan 	int bch_algo, start, val;
4550b1039f0SRamuthevar Vadivel Murugan 
4560b1039f0SRamuthevar Vadivel Murugan 	/* Default to an ECC size of 512 */
4570b1039f0SRamuthevar Vadivel Murugan 	if (!chip->ecc.size)
4580b1039f0SRamuthevar Vadivel Murugan 		chip->ecc.size = 512;
4590b1039f0SRamuthevar Vadivel Murugan 
4600b1039f0SRamuthevar Vadivel Murugan 	switch (ecc_size) {
4610b1039f0SRamuthevar Vadivel Murugan 	case 512:
4620b1039f0SRamuthevar Vadivel Murugan 		start = 1;
4630b1039f0SRamuthevar Vadivel Murugan 		if (!ecc_strength_ds)
4640b1039f0SRamuthevar Vadivel Murugan 			ecc_strength_ds = 4;
4650b1039f0SRamuthevar Vadivel Murugan 		break;
4660b1039f0SRamuthevar Vadivel Murugan 	case 1024:
4670b1039f0SRamuthevar Vadivel Murugan 		start = 4;
4680b1039f0SRamuthevar Vadivel Murugan 		if (!ecc_strength_ds)
4690b1039f0SRamuthevar Vadivel Murugan 			ecc_strength_ds = 32;
4700b1039f0SRamuthevar Vadivel Murugan 		break;
4710b1039f0SRamuthevar Vadivel Murugan 	default:
4720b1039f0SRamuthevar Vadivel Murugan 		return -EINVAL;
4730b1039f0SRamuthevar Vadivel Murugan 	}
4740b1039f0SRamuthevar Vadivel Murugan 
4750b1039f0SRamuthevar Vadivel Murugan 	/* BCH ECC algorithm Settings for number of bits per 512B/1024B */
4760b1039f0SRamuthevar Vadivel Murugan 	bch_algo = round_up(start + 1, 4);
4770b1039f0SRamuthevar Vadivel Murugan 	for (val = start; val < bch_algo; val++) {
4780b1039f0SRamuthevar Vadivel Murugan 		if (ecc_strength_ds == ecc_strength[val])
4790b1039f0SRamuthevar Vadivel Murugan 			break;
4800b1039f0SRamuthevar Vadivel Murugan 	}
4810b1039f0SRamuthevar Vadivel Murugan 	if (val == bch_algo)
4820b1039f0SRamuthevar Vadivel Murugan 		return -EINVAL;
4830b1039f0SRamuthevar Vadivel Murugan 
4840b1039f0SRamuthevar Vadivel Murugan 	if (ecc_strength_ds == 8)
4850b1039f0SRamuthevar Vadivel Murugan 		ecc_bytes = 14;
4860b1039f0SRamuthevar Vadivel Murugan 	else
4870b1039f0SRamuthevar Vadivel Murugan 		ecc_bytes = DIV_ROUND_UP(ecc_strength_ds * fls(8 * ecc_size), 8);
4880b1039f0SRamuthevar Vadivel Murugan 
4890b1039f0SRamuthevar Vadivel Murugan 	ecc_steps = writesize / ecc_size;
4900b1039f0SRamuthevar Vadivel Murugan 	ecc_total = ecc_steps * ecc_bytes;
4910b1039f0SRamuthevar Vadivel Murugan 	if ((ecc_total + 8) > mtd->oobsize)
4920b1039f0SRamuthevar Vadivel Murugan 		return -ERANGE;
4930b1039f0SRamuthevar Vadivel Murugan 
4940b1039f0SRamuthevar Vadivel Murugan 	chip->ecc.total = ecc_total;
4950b1039f0SRamuthevar Vadivel Murugan 	pagesize = fls(writesize >> 11);
4960b1039f0SRamuthevar Vadivel Murugan 	if (pagesize > HSNAND_PARA0_PAGE_V8192)
4970b1039f0SRamuthevar Vadivel Murugan 		return -ERANGE;
4980b1039f0SRamuthevar Vadivel Murugan 
4990b1039f0SRamuthevar Vadivel Murugan 	pg_per_blk = fls((blocksize / writesize) >> 6) / 8;
5000b1039f0SRamuthevar Vadivel Murugan 	if (pg_per_blk > HSNAND_PARA0_PIB_V256)
5010b1039f0SRamuthevar Vadivel Murugan 		return -ERANGE;
5020b1039f0SRamuthevar Vadivel Murugan 
5030b1039f0SRamuthevar Vadivel Murugan 	ebu_host->nd_para0 = pagesize | pg_per_blk | HSNAND_PARA0_BYP_EN_NP |
5040b1039f0SRamuthevar Vadivel Murugan 			     HSNAND_PARA0_BYP_DEC_NP | HSNAND_PARA0_ADEP_EN |
5050b1039f0SRamuthevar Vadivel Murugan 			     HSNAND_PARA0_TYPE_ONFI | (val << 29);
5060b1039f0SRamuthevar Vadivel Murugan 
5070b1039f0SRamuthevar Vadivel Murugan 	mtd_set_ooblayout(mtd, &ebu_nand_ooblayout_ops);
5080b1039f0SRamuthevar Vadivel Murugan 	chip->ecc.read_page = ebu_nand_read_page_hwecc;
5090b1039f0SRamuthevar Vadivel Murugan 	chip->ecc.write_page = ebu_nand_write_page_hwecc;
5100b1039f0SRamuthevar Vadivel Murugan 
5110b1039f0SRamuthevar Vadivel Murugan 	return 0;
5120b1039f0SRamuthevar Vadivel Murugan }
5130b1039f0SRamuthevar Vadivel Murugan 
ebu_nand_exec_op(struct nand_chip * chip,const struct nand_operation * op,bool check_only)5140b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_exec_op(struct nand_chip *chip,
5150b1039f0SRamuthevar Vadivel Murugan 			    const struct nand_operation *op, bool check_only)
5160b1039f0SRamuthevar Vadivel Murugan {
5170b1039f0SRamuthevar Vadivel Murugan 	const struct nand_op_instr *instr = NULL;
5180b1039f0SRamuthevar Vadivel Murugan 	unsigned int op_id;
5190b1039f0SRamuthevar Vadivel Murugan 	int i, timeout_ms, ret = 0;
5200b1039f0SRamuthevar Vadivel Murugan 
5210b1039f0SRamuthevar Vadivel Murugan 	if (check_only)
5220b1039f0SRamuthevar Vadivel Murugan 		return 0;
5230b1039f0SRamuthevar Vadivel Murugan 
5240b1039f0SRamuthevar Vadivel Murugan 	ebu_select_chip(chip);
5250b1039f0SRamuthevar Vadivel Murugan 	for (op_id = 0; op_id < op->ninstrs; op_id++) {
5260b1039f0SRamuthevar Vadivel Murugan 		instr = &op->instrs[op_id];
5270b1039f0SRamuthevar Vadivel Murugan 
5280b1039f0SRamuthevar Vadivel Murugan 		switch (instr->type) {
5290b1039f0SRamuthevar Vadivel Murugan 		case NAND_OP_CMD_INSTR:
5300b1039f0SRamuthevar Vadivel Murugan 			ebu_nand_writeb(chip, HSNAND_CLE_OFFS | HSNAND_CS_OFFS,
5310b1039f0SRamuthevar Vadivel Murugan 					instr->ctx.cmd.opcode);
5320b1039f0SRamuthevar Vadivel Murugan 			break;
5330b1039f0SRamuthevar Vadivel Murugan 
5340b1039f0SRamuthevar Vadivel Murugan 		case NAND_OP_ADDR_INSTR:
5350b1039f0SRamuthevar Vadivel Murugan 			for (i = 0; i < instr->ctx.addr.naddrs; i++)
5360b1039f0SRamuthevar Vadivel Murugan 				ebu_nand_writeb(chip,
5370b1039f0SRamuthevar Vadivel Murugan 						HSNAND_ALE_OFFS | HSNAND_CS_OFFS,
5380b1039f0SRamuthevar Vadivel Murugan 						instr->ctx.addr.addrs[i]);
5390b1039f0SRamuthevar Vadivel Murugan 			break;
5400b1039f0SRamuthevar Vadivel Murugan 
5410b1039f0SRamuthevar Vadivel Murugan 		case NAND_OP_DATA_IN_INSTR:
5420b1039f0SRamuthevar Vadivel Murugan 			ebu_read_buf(chip, instr->ctx.data.buf.in,
5430b1039f0SRamuthevar Vadivel Murugan 				     instr->ctx.data.len);
5440b1039f0SRamuthevar Vadivel Murugan 			break;
5450b1039f0SRamuthevar Vadivel Murugan 
5460b1039f0SRamuthevar Vadivel Murugan 		case NAND_OP_DATA_OUT_INSTR:
5470b1039f0SRamuthevar Vadivel Murugan 			ebu_write_buf(chip, instr->ctx.data.buf.out,
5480b1039f0SRamuthevar Vadivel Murugan 				      instr->ctx.data.len);
5490b1039f0SRamuthevar Vadivel Murugan 			break;
5500b1039f0SRamuthevar Vadivel Murugan 
5510b1039f0SRamuthevar Vadivel Murugan 		case NAND_OP_WAITRDY_INSTR:
5520b1039f0SRamuthevar Vadivel Murugan 			timeout_ms = instr->ctx.waitrdy.timeout_ms * 1000;
5530b1039f0SRamuthevar Vadivel Murugan 			ret = ebu_nand_waitrdy(chip, timeout_ms);
5540b1039f0SRamuthevar Vadivel Murugan 			break;
5550b1039f0SRamuthevar Vadivel Murugan 		}
5560b1039f0SRamuthevar Vadivel Murugan 	}
5570b1039f0SRamuthevar Vadivel Murugan 
5580b1039f0SRamuthevar Vadivel Murugan 	return ret;
5590b1039f0SRamuthevar Vadivel Murugan }
5600b1039f0SRamuthevar Vadivel Murugan 
5610b1039f0SRamuthevar Vadivel Murugan static const struct nand_controller_ops ebu_nand_controller_ops = {
5620b1039f0SRamuthevar Vadivel Murugan 	.attach_chip = ebu_nand_attach_chip,
5630b1039f0SRamuthevar Vadivel Murugan 	.setup_interface = ebu_nand_set_timings,
5640b1039f0SRamuthevar Vadivel Murugan 	.exec_op = ebu_nand_exec_op,
5650b1039f0SRamuthevar Vadivel Murugan };
5660b1039f0SRamuthevar Vadivel Murugan 
ebu_dma_cleanup(struct ebu_nand_controller * ebu_host)5670b1039f0SRamuthevar Vadivel Murugan static void ebu_dma_cleanup(struct ebu_nand_controller *ebu_host)
5680b1039f0SRamuthevar Vadivel Murugan {
5690b1039f0SRamuthevar Vadivel Murugan 	if (ebu_host->dma_rx)
5700b1039f0SRamuthevar Vadivel Murugan 		dma_release_channel(ebu_host->dma_rx);
5710b1039f0SRamuthevar Vadivel Murugan 
5720b1039f0SRamuthevar Vadivel Murugan 	if (ebu_host->dma_tx)
5730b1039f0SRamuthevar Vadivel Murugan 		dma_release_channel(ebu_host->dma_tx);
5740b1039f0SRamuthevar Vadivel Murugan }
5750b1039f0SRamuthevar Vadivel Murugan 
ebu_nand_probe(struct platform_device * pdev)5760b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_probe(struct platform_device *pdev)
5770b1039f0SRamuthevar Vadivel Murugan {
5780b1039f0SRamuthevar Vadivel Murugan 	struct device *dev = &pdev->dev;
5790b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host;
580bfc618fcSMartin Blumenstingl 	struct device_node *chip_np;
5810b1039f0SRamuthevar Vadivel Murugan 	struct nand_chip *nand;
58218f62614SMartin Blumenstingl 	struct mtd_info *mtd;
5830b1039f0SRamuthevar Vadivel Murugan 	struct resource *res;
5840b1039f0SRamuthevar Vadivel Murugan 	char *resname;
5850b1039f0SRamuthevar Vadivel Murugan 	int ret;
5860b1039f0SRamuthevar Vadivel Murugan 	u32 cs;
5870b1039f0SRamuthevar Vadivel Murugan 
5880b1039f0SRamuthevar Vadivel Murugan 	ebu_host = devm_kzalloc(dev, sizeof(*ebu_host), GFP_KERNEL);
5890b1039f0SRamuthevar Vadivel Murugan 	if (!ebu_host)
5900b1039f0SRamuthevar Vadivel Murugan 		return -ENOMEM;
5910b1039f0SRamuthevar Vadivel Murugan 
5920b1039f0SRamuthevar Vadivel Murugan 	ebu_host->dev = dev;
5930b1039f0SRamuthevar Vadivel Murugan 	nand_controller_init(&ebu_host->controller);
5940b1039f0SRamuthevar Vadivel Murugan 
5957471a53dSMartin Blumenstingl 	ebu_host->ebu = devm_platform_ioremap_resource_byname(pdev, "ebunand");
5960b1039f0SRamuthevar Vadivel Murugan 	if (IS_ERR(ebu_host->ebu))
5970b1039f0SRamuthevar Vadivel Murugan 		return PTR_ERR(ebu_host->ebu);
5980b1039f0SRamuthevar Vadivel Murugan 
5997471a53dSMartin Blumenstingl 	ebu_host->hsnand = devm_platform_ioremap_resource_byname(pdev, "hsnand");
6000b1039f0SRamuthevar Vadivel Murugan 	if (IS_ERR(ebu_host->hsnand))
6010b1039f0SRamuthevar Vadivel Murugan 		return PTR_ERR(ebu_host->hsnand);
6020b1039f0SRamuthevar Vadivel Murugan 
603bfc618fcSMartin Blumenstingl 	chip_np = of_get_next_child(dev->of_node, NULL);
604bfc618fcSMartin Blumenstingl 	if (!chip_np)
605bfc618fcSMartin Blumenstingl 		return dev_err_probe(dev, -EINVAL,
606bfc618fcSMartin Blumenstingl 				     "Could not find child node for the NAND chip\n");
607bfc618fcSMartin Blumenstingl 
608bfc618fcSMartin Blumenstingl 	ret = of_property_read_u32(chip_np, "reg", &cs);
6090b1039f0SRamuthevar Vadivel Murugan 	if (ret) {
6100b1039f0SRamuthevar Vadivel Murugan 		dev_err(dev, "failed to get chip select: %d\n", ret);
6111f3b494dSYang Yingliang 		goto err_of_node_put;
6120b1039f0SRamuthevar Vadivel Murugan 	}
61346a0dc10SEvgeny Novikov 	if (cs >= MAX_CS) {
61446a0dc10SEvgeny Novikov 		dev_err(dev, "got invalid chip select: %d\n", cs);
6151f3b494dSYang Yingliang 		ret = -EINVAL;
6161f3b494dSYang Yingliang 		goto err_of_node_put;
61746a0dc10SEvgeny Novikov 	}
61846a0dc10SEvgeny Novikov 
6190b1039f0SRamuthevar Vadivel Murugan 	ebu_host->cs_num = cs;
6200b1039f0SRamuthevar Vadivel Murugan 
6210b1039f0SRamuthevar Vadivel Murugan 	resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
622*0338bb43SYi Yang 	if (!resname) {
623*0338bb43SYi Yang 		ret = -ENOMEM;
624*0338bb43SYi Yang 		goto err_of_node_put;
625*0338bb43SYi Yang 	}
626*0338bb43SYi Yang 
6277471a53dSMartin Blumenstingl 	ebu_host->cs[cs].chipaddr = devm_platform_ioremap_resource_byname(pdev,
6287471a53dSMartin Blumenstingl 									  resname);
6291f3b494dSYang Yingliang 	if (IS_ERR(ebu_host->cs[cs].chipaddr)) {
6301f3b494dSYang Yingliang 		ret = PTR_ERR(ebu_host->cs[cs].chipaddr);
6311f3b494dSYang Yingliang 		goto err_of_node_put;
6321f3b494dSYang Yingliang 	}
6330b1039f0SRamuthevar Vadivel Murugan 
63403f2cde5SLi Zetao 	ebu_host->clk = devm_clk_get_enabled(dev, NULL);
6351f3b494dSYang Yingliang 	if (IS_ERR(ebu_host->clk)) {
6361f3b494dSYang Yingliang 		ret = dev_err_probe(dev, PTR_ERR(ebu_host->clk),
63703f2cde5SLi Zetao 				    "failed to get and enable clock\n");
6381f3b494dSYang Yingliang 		goto err_of_node_put;
6390b1039f0SRamuthevar Vadivel Murugan 	}
6400b1039f0SRamuthevar Vadivel Murugan 
6410b1039f0SRamuthevar Vadivel Murugan 	ebu_host->dma_tx = dma_request_chan(dev, "tx");
6420792ec82SEvgeny Novikov 	if (IS_ERR(ebu_host->dma_tx)) {
6430792ec82SEvgeny Novikov 		ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx),
6440b1039f0SRamuthevar Vadivel Murugan 				    "failed to request DMA tx chan!.\n");
64503f2cde5SLi Zetao 		goto err_of_node_put;
6460792ec82SEvgeny Novikov 	}
6470b1039f0SRamuthevar Vadivel Murugan 
6480b1039f0SRamuthevar Vadivel Murugan 	ebu_host->dma_rx = dma_request_chan(dev, "rx");
6490792ec82SEvgeny Novikov 	if (IS_ERR(ebu_host->dma_rx)) {
6500792ec82SEvgeny Novikov 		ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_rx),
6510b1039f0SRamuthevar Vadivel Murugan 				    "failed to request DMA rx chan!.\n");
6520792ec82SEvgeny Novikov 		ebu_host->dma_rx = NULL;
6530792ec82SEvgeny Novikov 		goto err_cleanup_dma;
6540792ec82SEvgeny Novikov 	}
6550b1039f0SRamuthevar Vadivel Murugan 
6560b1039f0SRamuthevar Vadivel Murugan 	resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
657*0338bb43SYi Yang 	if (!resname) {
658*0338bb43SYi Yang 		ret = -ENOMEM;
659*0338bb43SYi Yang 		goto err_cleanup_dma;
660*0338bb43SYi Yang 	}
661*0338bb43SYi Yang 
6620b1039f0SRamuthevar Vadivel Murugan 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
6630792ec82SEvgeny Novikov 	if (!res) {
6640792ec82SEvgeny Novikov 		ret = -EINVAL;
6650792ec82SEvgeny Novikov 		goto err_cleanup_dma;
6660792ec82SEvgeny Novikov 	}
6670b1039f0SRamuthevar Vadivel Murugan 	ebu_host->cs[cs].addr_sel = res->start;
6680b1039f0SRamuthevar Vadivel Murugan 	writel(ebu_host->cs[cs].addr_sel | EBU_ADDR_MASK(5) | EBU_ADDR_SEL_REGEN,
6690b1039f0SRamuthevar Vadivel Murugan 	       ebu_host->ebu + EBU_ADDR_SEL(cs));
6700b1039f0SRamuthevar Vadivel Murugan 
671bfc618fcSMartin Blumenstingl 	nand_set_flash_node(&ebu_host->chip, chip_np);
67218f62614SMartin Blumenstingl 
67318f62614SMartin Blumenstingl 	mtd = nand_to_mtd(&ebu_host->chip);
6740b1039f0SRamuthevar Vadivel Murugan 	if (!mtd->name) {
6750b1039f0SRamuthevar Vadivel Murugan 		dev_err(ebu_host->dev, "NAND label property is mandatory\n");
6760792ec82SEvgeny Novikov 		ret = -EINVAL;
6770792ec82SEvgeny Novikov 		goto err_cleanup_dma;
6780b1039f0SRamuthevar Vadivel Murugan 	}
6790b1039f0SRamuthevar Vadivel Murugan 
6800b1039f0SRamuthevar Vadivel Murugan 	mtd->dev.parent = dev;
6810b1039f0SRamuthevar Vadivel Murugan 	ebu_host->dev = dev;
6820b1039f0SRamuthevar Vadivel Murugan 
6830b1039f0SRamuthevar Vadivel Murugan 	platform_set_drvdata(pdev, ebu_host);
6840b1039f0SRamuthevar Vadivel Murugan 	nand_set_controller_data(&ebu_host->chip, ebu_host);
6850b1039f0SRamuthevar Vadivel Murugan 
6860b1039f0SRamuthevar Vadivel Murugan 	nand = &ebu_host->chip;
6870b1039f0SRamuthevar Vadivel Murugan 	nand->controller = &ebu_host->controller;
6880b1039f0SRamuthevar Vadivel Murugan 	nand->controller->ops = &ebu_nand_controller_ops;
6890b1039f0SRamuthevar Vadivel Murugan 
6900b1039f0SRamuthevar Vadivel Murugan 	/* Scan to find existence of the device */
6910b1039f0SRamuthevar Vadivel Murugan 	ret = nand_scan(&ebu_host->chip, 1);
6920b1039f0SRamuthevar Vadivel Murugan 	if (ret)
6930b1039f0SRamuthevar Vadivel Murugan 		goto err_cleanup_dma;
6940b1039f0SRamuthevar Vadivel Murugan 
6950b1039f0SRamuthevar Vadivel Murugan 	ret = mtd_device_register(mtd, NULL, 0);
6960b1039f0SRamuthevar Vadivel Murugan 	if (ret)
6970b1039f0SRamuthevar Vadivel Murugan 		goto err_clean_nand;
6980b1039f0SRamuthevar Vadivel Murugan 
6990b1039f0SRamuthevar Vadivel Murugan 	return 0;
7000b1039f0SRamuthevar Vadivel Murugan 
7010b1039f0SRamuthevar Vadivel Murugan err_clean_nand:
7020b1039f0SRamuthevar Vadivel Murugan 	nand_cleanup(&ebu_host->chip);
7030b1039f0SRamuthevar Vadivel Murugan err_cleanup_dma:
7040b1039f0SRamuthevar Vadivel Murugan 	ebu_dma_cleanup(ebu_host);
7051f3b494dSYang Yingliang err_of_node_put:
7061f3b494dSYang Yingliang 	of_node_put(chip_np);
7070b1039f0SRamuthevar Vadivel Murugan 
7080b1039f0SRamuthevar Vadivel Murugan 	return ret;
7090b1039f0SRamuthevar Vadivel Murugan }
7100b1039f0SRamuthevar Vadivel Murugan 
ebu_nand_remove(struct platform_device * pdev)711ec185b18SUwe Kleine-König static void ebu_nand_remove(struct platform_device *pdev)
7120b1039f0SRamuthevar Vadivel Murugan {
7130b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = platform_get_drvdata(pdev);
7140b1039f0SRamuthevar Vadivel Murugan 	int ret;
7150b1039f0SRamuthevar Vadivel Murugan 
7160b1039f0SRamuthevar Vadivel Murugan 	ret = mtd_device_unregister(nand_to_mtd(&ebu_host->chip));
7170b1039f0SRamuthevar Vadivel Murugan 	WARN_ON(ret);
7180b1039f0SRamuthevar Vadivel Murugan 	nand_cleanup(&ebu_host->chip);
7190b1039f0SRamuthevar Vadivel Murugan 	ebu_nand_disable(&ebu_host->chip);
7200b1039f0SRamuthevar Vadivel Murugan 	ebu_dma_cleanup(ebu_host);
7210b1039f0SRamuthevar Vadivel Murugan }
7220b1039f0SRamuthevar Vadivel Murugan 
7230b1039f0SRamuthevar Vadivel Murugan static const struct of_device_id ebu_nand_match[] = {
7240b1039f0SRamuthevar Vadivel Murugan 	{ .compatible = "intel,lgm-ebunand" },
7250b1039f0SRamuthevar Vadivel Murugan 	{}
7260b1039f0SRamuthevar Vadivel Murugan };
7270b1039f0SRamuthevar Vadivel Murugan MODULE_DEVICE_TABLE(of, ebu_nand_match);
7280b1039f0SRamuthevar Vadivel Murugan 
7290b1039f0SRamuthevar Vadivel Murugan static struct platform_driver ebu_nand_driver = {
7300b1039f0SRamuthevar Vadivel Murugan 	.probe = ebu_nand_probe,
731ec185b18SUwe Kleine-König 	.remove_new = ebu_nand_remove,
7320b1039f0SRamuthevar Vadivel Murugan 	.driver = {
7330b1039f0SRamuthevar Vadivel Murugan 		.name = "intel-nand-controller",
7340b1039f0SRamuthevar Vadivel Murugan 		.of_match_table = ebu_nand_match,
7350b1039f0SRamuthevar Vadivel Murugan 	},
7360b1039f0SRamuthevar Vadivel Murugan 
7370b1039f0SRamuthevar Vadivel Murugan };
7380b1039f0SRamuthevar Vadivel Murugan module_platform_driver(ebu_nand_driver);
7390b1039f0SRamuthevar Vadivel Murugan 
7400b1039f0SRamuthevar Vadivel Murugan MODULE_LICENSE("GPL v2");
7410b1039f0SRamuthevar Vadivel Murugan MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>");
7420b1039f0SRamuthevar Vadivel Murugan MODULE_DESCRIPTION("Intel's LGM External Bus NAND Controller driver");
743