xref: /openbmc/linux/drivers/mtd/nand/raw/intel-nand-controller.c (revision 0b1039f016e8a37c779a4aee362cb2100ebb1cfd)
1*0b1039f0SRamuthevar Vadivel Murugan // SPDX-License-Identifier: GPL-2.0+
2*0b1039f0SRamuthevar Vadivel Murugan /* Copyright (c) 2020 Intel Corporation. */
3*0b1039f0SRamuthevar Vadivel Murugan 
4*0b1039f0SRamuthevar Vadivel Murugan #include <linux/clk.h>
5*0b1039f0SRamuthevar Vadivel Murugan #include <linux/completion.h>
6*0b1039f0SRamuthevar Vadivel Murugan #include <linux/dmaengine.h>
7*0b1039f0SRamuthevar Vadivel Murugan #include <linux/dma-direction.h>
8*0b1039f0SRamuthevar Vadivel Murugan #include <linux/dma-mapping.h>
9*0b1039f0SRamuthevar Vadivel Murugan #include <linux/err.h>
10*0b1039f0SRamuthevar Vadivel Murugan #include <linux/init.h>
11*0b1039f0SRamuthevar Vadivel Murugan #include <linux/iopoll.h>
12*0b1039f0SRamuthevar Vadivel Murugan #include <linux/kernel.h>
13*0b1039f0SRamuthevar Vadivel Murugan #include <linux/module.h>
14*0b1039f0SRamuthevar Vadivel Murugan 
15*0b1039f0SRamuthevar Vadivel Murugan #include <linux/mtd/mtd.h>
16*0b1039f0SRamuthevar Vadivel Murugan #include <linux/mtd/rawnand.h>
17*0b1039f0SRamuthevar Vadivel Murugan #include <linux/mtd/nand.h>
18*0b1039f0SRamuthevar Vadivel Murugan 
19*0b1039f0SRamuthevar Vadivel Murugan #include <linux/platform_device.h>
20*0b1039f0SRamuthevar Vadivel Murugan #include <linux/sched.h>
21*0b1039f0SRamuthevar Vadivel Murugan #include <linux/slab.h>
22*0b1039f0SRamuthevar Vadivel Murugan #include <linux/types.h>
23*0b1039f0SRamuthevar Vadivel Murugan #include <asm/unaligned.h>
24*0b1039f0SRamuthevar Vadivel Murugan 
25*0b1039f0SRamuthevar Vadivel Murugan #define EBU_CLC			0x000
26*0b1039f0SRamuthevar Vadivel Murugan #define EBU_CLC_RST		0x00000000u
27*0b1039f0SRamuthevar Vadivel Murugan 
28*0b1039f0SRamuthevar Vadivel Murugan #define EBU_ADDR_SEL(n)		(0x020 + (n) * 4)
29*0b1039f0SRamuthevar Vadivel Murugan /* 5 bits 26:22 included for comparison in the ADDR_SELx */
30*0b1039f0SRamuthevar Vadivel Murugan #define EBU_ADDR_MASK(x)	((x) << 4)
31*0b1039f0SRamuthevar Vadivel Murugan #define EBU_ADDR_SEL_REGEN	0x1
32*0b1039f0SRamuthevar Vadivel Murugan 
33*0b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON(n)		(0x060 + (n) * 4)
34*0b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON_CMULT_V4	0x1
35*0b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON_RECOVC(n)	((n) << 2)
36*0b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON_HOLDC(n)	((n) << 4)
37*0b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON_WAITRDC(n)	((n) << 6)
38*0b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON_WAITWRC(n)	((n) << 8)
39*0b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON_BCGEN_CS	0x0
40*0b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON_SETUP_EN	BIT(22)
41*0b1039f0SRamuthevar Vadivel Murugan #define EBU_BUSCON_ALEC		0xC000
42*0b1039f0SRamuthevar Vadivel Murugan 
43*0b1039f0SRamuthevar Vadivel Murugan #define EBU_CON			0x0B0
44*0b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_NANDM_EN	BIT(0)
45*0b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_NANDM_DIS	0x0
46*0b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_CSMUX_E_EN	BIT(1)
47*0b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_ALE_P_LOW	BIT(2)
48*0b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_CLE_P_LOW	BIT(3)
49*0b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_CS_P_LOW	BIT(4)
50*0b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_SE_P_LOW	BIT(5)
51*0b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_WP_P_LOW	BIT(6)
52*0b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_PRE_P_LOW	BIT(7)
53*0b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_IN_CS_S(n)	((n) << 8)
54*0b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_OUT_CS_S(n)	((n) << 10)
55*0b1039f0SRamuthevar Vadivel Murugan #define EBU_CON_LAT_EN_CS_P	((0x3D) << 18)
56*0b1039f0SRamuthevar Vadivel Murugan 
57*0b1039f0SRamuthevar Vadivel Murugan #define EBU_WAIT		0x0B4
58*0b1039f0SRamuthevar Vadivel Murugan #define EBU_WAIT_RDBY		BIT(0)
59*0b1039f0SRamuthevar Vadivel Murugan #define EBU_WAIT_WR_C		BIT(3)
60*0b1039f0SRamuthevar Vadivel Murugan 
61*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL1		0x110
62*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL1_ADDR_SHIFT	24
63*0b1039f0SRamuthevar Vadivel Murugan 
64*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL2		0x114
65*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL2_ADDR_SHIFT	8
66*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL2_CYC_N_V5	(0x2 << 16)
67*0b1039f0SRamuthevar Vadivel Murugan 
68*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_INT_MSK_CTL	0x124
69*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_INT_MSK_CTL_WR_C	BIT(4)
70*0b1039f0SRamuthevar Vadivel Murugan 
71*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_INT_STA		0x128
72*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_INT_STA_WR_C	BIT(4)
73*0b1039f0SRamuthevar Vadivel Murugan 
74*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL		0x130
75*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL_ENABLE_ECC	BIT(0)
76*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL_GO		BIT(2)
77*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL_CE_SEL_CS(n)	BIT(3 + (n))
78*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL_RW_READ	0x0
79*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL_RW_WRITE	BIT(10)
80*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL_ECC_OFF_V8TH	BIT(11)
81*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL_CKFF_EN	0x0
82*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CTL_MSG_EN	BIT(17)
83*0b1039f0SRamuthevar Vadivel Murugan 
84*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_PARA0		0x13c
85*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_PARA0_PAGE_V8192	0x3
86*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_PARA0_PIB_V256	(0x3 << 4)
87*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_PARA0_BYP_EN_NP	0x0
88*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_PARA0_BYP_DEC_NP	0x0
89*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_PARA0_TYPE_ONFI	BIT(18)
90*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_PARA0_ADEP_EN	BIT(21)
91*0b1039f0SRamuthevar Vadivel Murugan 
92*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CMSG_0		0x150
93*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CMSG_1		0x154
94*0b1039f0SRamuthevar Vadivel Murugan 
95*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_ALE_OFFS		BIT(2)
96*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CLE_OFFS		BIT(3)
97*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_CS_OFFS		BIT(4)
98*0b1039f0SRamuthevar Vadivel Murugan 
99*0b1039f0SRamuthevar Vadivel Murugan #define HSNAND_ECC_OFFSET	0x008
100*0b1039f0SRamuthevar Vadivel Murugan 
101*0b1039f0SRamuthevar Vadivel Murugan #define NAND_DATA_IFACE_CHECK_ONLY	-1
102*0b1039f0SRamuthevar Vadivel Murugan 
103*0b1039f0SRamuthevar Vadivel Murugan #define MAX_CS	2
104*0b1039f0SRamuthevar Vadivel Murugan 
105*0b1039f0SRamuthevar Vadivel Murugan #define HZ_PER_MHZ	1000000L
106*0b1039f0SRamuthevar Vadivel Murugan #define USEC_PER_SEC	1000000L
107*0b1039f0SRamuthevar Vadivel Murugan 
108*0b1039f0SRamuthevar Vadivel Murugan struct ebu_nand_cs {
109*0b1039f0SRamuthevar Vadivel Murugan 	void __iomem *chipaddr;
110*0b1039f0SRamuthevar Vadivel Murugan 	dma_addr_t nand_pa;
111*0b1039f0SRamuthevar Vadivel Murugan 	u32 addr_sel;
112*0b1039f0SRamuthevar Vadivel Murugan };
113*0b1039f0SRamuthevar Vadivel Murugan 
114*0b1039f0SRamuthevar Vadivel Murugan struct ebu_nand_controller {
115*0b1039f0SRamuthevar Vadivel Murugan 	struct nand_controller controller;
116*0b1039f0SRamuthevar Vadivel Murugan 	struct nand_chip chip;
117*0b1039f0SRamuthevar Vadivel Murugan 	struct device *dev;
118*0b1039f0SRamuthevar Vadivel Murugan 	void __iomem *ebu;
119*0b1039f0SRamuthevar Vadivel Murugan 	void __iomem *hsnand;
120*0b1039f0SRamuthevar Vadivel Murugan 	struct dma_chan *dma_tx;
121*0b1039f0SRamuthevar Vadivel Murugan 	struct dma_chan *dma_rx;
122*0b1039f0SRamuthevar Vadivel Murugan 	struct completion dma_access_complete;
123*0b1039f0SRamuthevar Vadivel Murugan 	unsigned long clk_rate;
124*0b1039f0SRamuthevar Vadivel Murugan 	struct clk *clk;
125*0b1039f0SRamuthevar Vadivel Murugan 	u32 nd_para0;
126*0b1039f0SRamuthevar Vadivel Murugan 	u8 cs_num;
127*0b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_cs cs[MAX_CS];
128*0b1039f0SRamuthevar Vadivel Murugan };
129*0b1039f0SRamuthevar Vadivel Murugan 
130*0b1039f0SRamuthevar Vadivel Murugan static inline struct ebu_nand_controller *nand_to_ebu(struct nand_chip *chip)
131*0b1039f0SRamuthevar Vadivel Murugan {
132*0b1039f0SRamuthevar Vadivel Murugan 	return container_of(chip, struct ebu_nand_controller, chip);
133*0b1039f0SRamuthevar Vadivel Murugan }
134*0b1039f0SRamuthevar Vadivel Murugan 
135*0b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_waitrdy(struct nand_chip *chip, int timeout_ms)
136*0b1039f0SRamuthevar Vadivel Murugan {
137*0b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ctrl = nand_to_ebu(chip);
138*0b1039f0SRamuthevar Vadivel Murugan 	u32 status;
139*0b1039f0SRamuthevar Vadivel Murugan 
140*0b1039f0SRamuthevar Vadivel Murugan 	return readl_poll_timeout(ctrl->ebu + EBU_WAIT, status,
141*0b1039f0SRamuthevar Vadivel Murugan 				  (status & EBU_WAIT_RDBY) ||
142*0b1039f0SRamuthevar Vadivel Murugan 				  (status & EBU_WAIT_WR_C), 20, timeout_ms);
143*0b1039f0SRamuthevar Vadivel Murugan }
144*0b1039f0SRamuthevar Vadivel Murugan 
145*0b1039f0SRamuthevar Vadivel Murugan static u8 ebu_nand_readb(struct nand_chip *chip)
146*0b1039f0SRamuthevar Vadivel Murugan {
147*0b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
148*0b1039f0SRamuthevar Vadivel Murugan 	u8 cs_num = ebu_host->cs_num;
149*0b1039f0SRamuthevar Vadivel Murugan 	u8 val;
150*0b1039f0SRamuthevar Vadivel Murugan 
151*0b1039f0SRamuthevar Vadivel Murugan 	val = readb(ebu_host->cs[cs_num].chipaddr + HSNAND_CS_OFFS);
152*0b1039f0SRamuthevar Vadivel Murugan 	ebu_nand_waitrdy(chip, 1000);
153*0b1039f0SRamuthevar Vadivel Murugan 	return val;
154*0b1039f0SRamuthevar Vadivel Murugan }
155*0b1039f0SRamuthevar Vadivel Murugan 
156*0b1039f0SRamuthevar Vadivel Murugan static void ebu_nand_writeb(struct nand_chip *chip, u32 offset, u8 value)
157*0b1039f0SRamuthevar Vadivel Murugan {
158*0b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
159*0b1039f0SRamuthevar Vadivel Murugan 	u8 cs_num = ebu_host->cs_num;
160*0b1039f0SRamuthevar Vadivel Murugan 
161*0b1039f0SRamuthevar Vadivel Murugan 	writeb(value, ebu_host->cs[cs_num].chipaddr + offset);
162*0b1039f0SRamuthevar Vadivel Murugan 	ebu_nand_waitrdy(chip, 1000);
163*0b1039f0SRamuthevar Vadivel Murugan }
164*0b1039f0SRamuthevar Vadivel Murugan 
165*0b1039f0SRamuthevar Vadivel Murugan static void ebu_read_buf(struct nand_chip *chip, u_char *buf, unsigned int len)
166*0b1039f0SRamuthevar Vadivel Murugan {
167*0b1039f0SRamuthevar Vadivel Murugan 	int i;
168*0b1039f0SRamuthevar Vadivel Murugan 
169*0b1039f0SRamuthevar Vadivel Murugan 	for (i = 0; i < len; i++)
170*0b1039f0SRamuthevar Vadivel Murugan 		buf[i] = ebu_nand_readb(chip);
171*0b1039f0SRamuthevar Vadivel Murugan }
172*0b1039f0SRamuthevar Vadivel Murugan 
173*0b1039f0SRamuthevar Vadivel Murugan static void ebu_write_buf(struct nand_chip *chip, const u_char *buf, int len)
174*0b1039f0SRamuthevar Vadivel Murugan {
175*0b1039f0SRamuthevar Vadivel Murugan 	int i;
176*0b1039f0SRamuthevar Vadivel Murugan 
177*0b1039f0SRamuthevar Vadivel Murugan 	for (i = 0; i < len; i++)
178*0b1039f0SRamuthevar Vadivel Murugan 		ebu_nand_writeb(chip, HSNAND_CS_OFFS, buf[i]);
179*0b1039f0SRamuthevar Vadivel Murugan }
180*0b1039f0SRamuthevar Vadivel Murugan 
181*0b1039f0SRamuthevar Vadivel Murugan static void ebu_nand_disable(struct nand_chip *chip)
182*0b1039f0SRamuthevar Vadivel Murugan {
183*0b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
184*0b1039f0SRamuthevar Vadivel Murugan 
185*0b1039f0SRamuthevar Vadivel Murugan 	writel(0, ebu_host->ebu + EBU_CON);
186*0b1039f0SRamuthevar Vadivel Murugan }
187*0b1039f0SRamuthevar Vadivel Murugan 
188*0b1039f0SRamuthevar Vadivel Murugan static void ebu_select_chip(struct nand_chip *chip)
189*0b1039f0SRamuthevar Vadivel Murugan {
190*0b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
191*0b1039f0SRamuthevar Vadivel Murugan 	void __iomem *nand_con = ebu_host->ebu + EBU_CON;
192*0b1039f0SRamuthevar Vadivel Murugan 	u32 cs = ebu_host->cs_num;
193*0b1039f0SRamuthevar Vadivel Murugan 
194*0b1039f0SRamuthevar Vadivel Murugan 	writel(EBU_CON_NANDM_EN | EBU_CON_CSMUX_E_EN | EBU_CON_CS_P_LOW |
195*0b1039f0SRamuthevar Vadivel Murugan 	       EBU_CON_SE_P_LOW | EBU_CON_WP_P_LOW | EBU_CON_PRE_P_LOW |
196*0b1039f0SRamuthevar Vadivel Murugan 	       EBU_CON_IN_CS_S(cs) | EBU_CON_OUT_CS_S(cs) |
197*0b1039f0SRamuthevar Vadivel Murugan 	       EBU_CON_LAT_EN_CS_P, nand_con);
198*0b1039f0SRamuthevar Vadivel Murugan }
199*0b1039f0SRamuthevar Vadivel Murugan 
200*0b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_set_timings(struct nand_chip *chip, int csline,
201*0b1039f0SRamuthevar Vadivel Murugan 				const struct nand_interface_config *conf)
202*0b1039f0SRamuthevar Vadivel Murugan {
203*0b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ctrl = nand_to_ebu(chip);
204*0b1039f0SRamuthevar Vadivel Murugan 	unsigned int rate = clk_get_rate(ctrl->clk) / HZ_PER_MHZ;
205*0b1039f0SRamuthevar Vadivel Murugan 	unsigned int period = DIV_ROUND_UP(USEC_PER_SEC, rate);
206*0b1039f0SRamuthevar Vadivel Murugan 	const struct nand_sdr_timings *timings;
207*0b1039f0SRamuthevar Vadivel Murugan 	u32 trecov, thold, twrwait, trdwait;
208*0b1039f0SRamuthevar Vadivel Murugan 	u32 reg = 0;
209*0b1039f0SRamuthevar Vadivel Murugan 
210*0b1039f0SRamuthevar Vadivel Murugan 	timings = nand_get_sdr_timings(conf);
211*0b1039f0SRamuthevar Vadivel Murugan 	if (IS_ERR(timings))
212*0b1039f0SRamuthevar Vadivel Murugan 		return PTR_ERR(timings);
213*0b1039f0SRamuthevar Vadivel Murugan 
214*0b1039f0SRamuthevar Vadivel Murugan 	if (csline == NAND_DATA_IFACE_CHECK_ONLY)
215*0b1039f0SRamuthevar Vadivel Murugan 		return 0;
216*0b1039f0SRamuthevar Vadivel Murugan 
217*0b1039f0SRamuthevar Vadivel Murugan 	trecov = DIV_ROUND_UP(max(timings->tREA_max, timings->tREH_min),
218*0b1039f0SRamuthevar Vadivel Murugan 			      period);
219*0b1039f0SRamuthevar Vadivel Murugan 	reg |= EBU_BUSCON_RECOVC(trecov);
220*0b1039f0SRamuthevar Vadivel Murugan 
221*0b1039f0SRamuthevar Vadivel Murugan 	thold = DIV_ROUND_UP(max(timings->tDH_min, timings->tDS_min), period);
222*0b1039f0SRamuthevar Vadivel Murugan 	reg |= EBU_BUSCON_HOLDC(thold);
223*0b1039f0SRamuthevar Vadivel Murugan 
224*0b1039f0SRamuthevar Vadivel Murugan 	trdwait = DIV_ROUND_UP(max(timings->tRC_min, timings->tREH_min),
225*0b1039f0SRamuthevar Vadivel Murugan 			       period);
226*0b1039f0SRamuthevar Vadivel Murugan 	reg |= EBU_BUSCON_WAITRDC(trdwait);
227*0b1039f0SRamuthevar Vadivel Murugan 
228*0b1039f0SRamuthevar Vadivel Murugan 	twrwait = DIV_ROUND_UP(max(timings->tWC_min, timings->tWH_min), period);
229*0b1039f0SRamuthevar Vadivel Murugan 	reg |= EBU_BUSCON_WAITWRC(twrwait);
230*0b1039f0SRamuthevar Vadivel Murugan 
231*0b1039f0SRamuthevar Vadivel Murugan 	reg |= EBU_BUSCON_CMULT_V4 | EBU_BUSCON_BCGEN_CS | EBU_BUSCON_ALEC |
232*0b1039f0SRamuthevar Vadivel Murugan 		EBU_BUSCON_SETUP_EN;
233*0b1039f0SRamuthevar Vadivel Murugan 
234*0b1039f0SRamuthevar Vadivel Murugan 	writel(reg, ctrl->ebu + EBU_BUSCON(ctrl->cs_num));
235*0b1039f0SRamuthevar Vadivel Murugan 
236*0b1039f0SRamuthevar Vadivel Murugan 	return 0;
237*0b1039f0SRamuthevar Vadivel Murugan }
238*0b1039f0SRamuthevar Vadivel Murugan 
239*0b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
240*0b1039f0SRamuthevar Vadivel Murugan 				  struct mtd_oob_region *oobregion)
241*0b1039f0SRamuthevar Vadivel Murugan {
242*0b1039f0SRamuthevar Vadivel Murugan 	struct nand_chip *chip = mtd_to_nand(mtd);
243*0b1039f0SRamuthevar Vadivel Murugan 
244*0b1039f0SRamuthevar Vadivel Murugan 	if (section)
245*0b1039f0SRamuthevar Vadivel Murugan 		return -ERANGE;
246*0b1039f0SRamuthevar Vadivel Murugan 
247*0b1039f0SRamuthevar Vadivel Murugan 	oobregion->offset = HSNAND_ECC_OFFSET;
248*0b1039f0SRamuthevar Vadivel Murugan 	oobregion->length = chip->ecc.total;
249*0b1039f0SRamuthevar Vadivel Murugan 
250*0b1039f0SRamuthevar Vadivel Murugan 	return 0;
251*0b1039f0SRamuthevar Vadivel Murugan }
252*0b1039f0SRamuthevar Vadivel Murugan 
253*0b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_ooblayout_free(struct mtd_info *mtd, int section,
254*0b1039f0SRamuthevar Vadivel Murugan 				   struct mtd_oob_region *oobregion)
255*0b1039f0SRamuthevar Vadivel Murugan {
256*0b1039f0SRamuthevar Vadivel Murugan 	struct nand_chip *chip = mtd_to_nand(mtd);
257*0b1039f0SRamuthevar Vadivel Murugan 
258*0b1039f0SRamuthevar Vadivel Murugan 	if (section)
259*0b1039f0SRamuthevar Vadivel Murugan 		return -ERANGE;
260*0b1039f0SRamuthevar Vadivel Murugan 
261*0b1039f0SRamuthevar Vadivel Murugan 	oobregion->offset = chip->ecc.total + HSNAND_ECC_OFFSET;
262*0b1039f0SRamuthevar Vadivel Murugan 	oobregion->length = mtd->oobsize - oobregion->offset;
263*0b1039f0SRamuthevar Vadivel Murugan 
264*0b1039f0SRamuthevar Vadivel Murugan 	return 0;
265*0b1039f0SRamuthevar Vadivel Murugan }
266*0b1039f0SRamuthevar Vadivel Murugan 
267*0b1039f0SRamuthevar Vadivel Murugan static const struct mtd_ooblayout_ops ebu_nand_ooblayout_ops = {
268*0b1039f0SRamuthevar Vadivel Murugan 	.ecc = ebu_nand_ooblayout_ecc,
269*0b1039f0SRamuthevar Vadivel Murugan 	.free = ebu_nand_ooblayout_free,
270*0b1039f0SRamuthevar Vadivel Murugan };
271*0b1039f0SRamuthevar Vadivel Murugan 
272*0b1039f0SRamuthevar Vadivel Murugan static void ebu_dma_rx_callback(void *cookie)
273*0b1039f0SRamuthevar Vadivel Murugan {
274*0b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = cookie;
275*0b1039f0SRamuthevar Vadivel Murugan 
276*0b1039f0SRamuthevar Vadivel Murugan 	dmaengine_terminate_async(ebu_host->dma_rx);
277*0b1039f0SRamuthevar Vadivel Murugan 
278*0b1039f0SRamuthevar Vadivel Murugan 	complete(&ebu_host->dma_access_complete);
279*0b1039f0SRamuthevar Vadivel Murugan }
280*0b1039f0SRamuthevar Vadivel Murugan 
281*0b1039f0SRamuthevar Vadivel Murugan static void ebu_dma_tx_callback(void *cookie)
282*0b1039f0SRamuthevar Vadivel Murugan {
283*0b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = cookie;
284*0b1039f0SRamuthevar Vadivel Murugan 
285*0b1039f0SRamuthevar Vadivel Murugan 	dmaengine_terminate_async(ebu_host->dma_tx);
286*0b1039f0SRamuthevar Vadivel Murugan 
287*0b1039f0SRamuthevar Vadivel Murugan 	complete(&ebu_host->dma_access_complete);
288*0b1039f0SRamuthevar Vadivel Murugan }
289*0b1039f0SRamuthevar Vadivel Murugan 
290*0b1039f0SRamuthevar Vadivel Murugan static int ebu_dma_start(struct ebu_nand_controller *ebu_host, u32 dir,
291*0b1039f0SRamuthevar Vadivel Murugan 			 const u8 *buf, u32 len)
292*0b1039f0SRamuthevar Vadivel Murugan {
293*0b1039f0SRamuthevar Vadivel Murugan 	struct dma_async_tx_descriptor *tx;
294*0b1039f0SRamuthevar Vadivel Murugan 	struct completion *dma_completion;
295*0b1039f0SRamuthevar Vadivel Murugan 	dma_async_tx_callback callback;
296*0b1039f0SRamuthevar Vadivel Murugan 	struct dma_chan *chan;
297*0b1039f0SRamuthevar Vadivel Murugan 	dma_cookie_t cookie;
298*0b1039f0SRamuthevar Vadivel Murugan 	unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
299*0b1039f0SRamuthevar Vadivel Murugan 	dma_addr_t buf_dma;
300*0b1039f0SRamuthevar Vadivel Murugan 	int ret;
301*0b1039f0SRamuthevar Vadivel Murugan 	u32 timeout;
302*0b1039f0SRamuthevar Vadivel Murugan 
303*0b1039f0SRamuthevar Vadivel Murugan 	if (dir == DMA_DEV_TO_MEM) {
304*0b1039f0SRamuthevar Vadivel Murugan 		chan = ebu_host->dma_rx;
305*0b1039f0SRamuthevar Vadivel Murugan 		dma_completion = &ebu_host->dma_access_complete;
306*0b1039f0SRamuthevar Vadivel Murugan 		callback = ebu_dma_rx_callback;
307*0b1039f0SRamuthevar Vadivel Murugan 	} else {
308*0b1039f0SRamuthevar Vadivel Murugan 		chan = ebu_host->dma_tx;
309*0b1039f0SRamuthevar Vadivel Murugan 		dma_completion = &ebu_host->dma_access_complete;
310*0b1039f0SRamuthevar Vadivel Murugan 		callback = ebu_dma_tx_callback;
311*0b1039f0SRamuthevar Vadivel Murugan 	}
312*0b1039f0SRamuthevar Vadivel Murugan 
313*0b1039f0SRamuthevar Vadivel Murugan 	buf_dma = dma_map_single(chan->device->dev, (void *)buf, len, dir);
314*0b1039f0SRamuthevar Vadivel Murugan 	if (dma_mapping_error(chan->device->dev, buf_dma)) {
315*0b1039f0SRamuthevar Vadivel Murugan 		dev_err(ebu_host->dev, "Failed to map DMA buffer\n");
316*0b1039f0SRamuthevar Vadivel Murugan 		ret = -EIO;
317*0b1039f0SRamuthevar Vadivel Murugan 		goto err_unmap;
318*0b1039f0SRamuthevar Vadivel Murugan 	}
319*0b1039f0SRamuthevar Vadivel Murugan 
320*0b1039f0SRamuthevar Vadivel Murugan 	tx = dmaengine_prep_slave_single(chan, buf_dma, len, dir, flags);
321*0b1039f0SRamuthevar Vadivel Murugan 	if (!tx)
322*0b1039f0SRamuthevar Vadivel Murugan 		return -ENXIO;
323*0b1039f0SRamuthevar Vadivel Murugan 
324*0b1039f0SRamuthevar Vadivel Murugan 	tx->callback = callback;
325*0b1039f0SRamuthevar Vadivel Murugan 	tx->callback_param = ebu_host;
326*0b1039f0SRamuthevar Vadivel Murugan 	cookie = tx->tx_submit(tx);
327*0b1039f0SRamuthevar Vadivel Murugan 
328*0b1039f0SRamuthevar Vadivel Murugan 	ret = dma_submit_error(cookie);
329*0b1039f0SRamuthevar Vadivel Murugan 	if (ret) {
330*0b1039f0SRamuthevar Vadivel Murugan 		dev_err(ebu_host->dev, "dma_submit_error %d\n", cookie);
331*0b1039f0SRamuthevar Vadivel Murugan 		ret = -EIO;
332*0b1039f0SRamuthevar Vadivel Murugan 		goto err_unmap;
333*0b1039f0SRamuthevar Vadivel Murugan 	}
334*0b1039f0SRamuthevar Vadivel Murugan 
335*0b1039f0SRamuthevar Vadivel Murugan 	init_completion(dma_completion);
336*0b1039f0SRamuthevar Vadivel Murugan 	dma_async_issue_pending(chan);
337*0b1039f0SRamuthevar Vadivel Murugan 
338*0b1039f0SRamuthevar Vadivel Murugan 	/* Wait DMA to finish the data transfer.*/
339*0b1039f0SRamuthevar Vadivel Murugan 	timeout = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
340*0b1039f0SRamuthevar Vadivel Murugan 	if (!timeout) {
341*0b1039f0SRamuthevar Vadivel Murugan 		dev_err(ebu_host->dev, "I/O Error in DMA RX (status %d)\n",
342*0b1039f0SRamuthevar Vadivel Murugan 			dmaengine_tx_status(chan, cookie, NULL));
343*0b1039f0SRamuthevar Vadivel Murugan 		dmaengine_terminate_sync(chan);
344*0b1039f0SRamuthevar Vadivel Murugan 		ret = -ETIMEDOUT;
345*0b1039f0SRamuthevar Vadivel Murugan 		goto err_unmap;
346*0b1039f0SRamuthevar Vadivel Murugan 	}
347*0b1039f0SRamuthevar Vadivel Murugan 
348*0b1039f0SRamuthevar Vadivel Murugan 	return 0;
349*0b1039f0SRamuthevar Vadivel Murugan 
350*0b1039f0SRamuthevar Vadivel Murugan err_unmap:
351*0b1039f0SRamuthevar Vadivel Murugan 	dma_unmap_single(ebu_host->dev, buf_dma, len, dir);
352*0b1039f0SRamuthevar Vadivel Murugan 
353*0b1039f0SRamuthevar Vadivel Murugan 	return ret;
354*0b1039f0SRamuthevar Vadivel Murugan }
355*0b1039f0SRamuthevar Vadivel Murugan 
356*0b1039f0SRamuthevar Vadivel Murugan static void ebu_nand_trigger(struct ebu_nand_controller *ebu_host,
357*0b1039f0SRamuthevar Vadivel Murugan 			     int page, u32 cmd)
358*0b1039f0SRamuthevar Vadivel Murugan {
359*0b1039f0SRamuthevar Vadivel Murugan 	unsigned int val;
360*0b1039f0SRamuthevar Vadivel Murugan 
361*0b1039f0SRamuthevar Vadivel Murugan 	val = cmd | (page & 0xFF) << HSNAND_CTL1_ADDR_SHIFT;
362*0b1039f0SRamuthevar Vadivel Murugan 	writel(val, ebu_host->hsnand + HSNAND_CTL1);
363*0b1039f0SRamuthevar Vadivel Murugan 	val = (page & 0xFFFF00) >> 8 | HSNAND_CTL2_CYC_N_V5;
364*0b1039f0SRamuthevar Vadivel Murugan 	writel(val, ebu_host->hsnand + HSNAND_CTL2);
365*0b1039f0SRamuthevar Vadivel Murugan 
366*0b1039f0SRamuthevar Vadivel Murugan 	writel(ebu_host->nd_para0, ebu_host->hsnand + HSNAND_PARA0);
367*0b1039f0SRamuthevar Vadivel Murugan 
368*0b1039f0SRamuthevar Vadivel Murugan 	/* clear first, will update later */
369*0b1039f0SRamuthevar Vadivel Murugan 	writel(0xFFFFFFFF, ebu_host->hsnand + HSNAND_CMSG_0);
370*0b1039f0SRamuthevar Vadivel Murugan 	writel(0xFFFFFFFF, ebu_host->hsnand + HSNAND_CMSG_1);
371*0b1039f0SRamuthevar Vadivel Murugan 
372*0b1039f0SRamuthevar Vadivel Murugan 	writel(HSNAND_INT_MSK_CTL_WR_C,
373*0b1039f0SRamuthevar Vadivel Murugan 	       ebu_host->hsnand + HSNAND_INT_MSK_CTL);
374*0b1039f0SRamuthevar Vadivel Murugan 
375*0b1039f0SRamuthevar Vadivel Murugan 	if (!cmd)
376*0b1039f0SRamuthevar Vadivel Murugan 		val = HSNAND_CTL_RW_READ;
377*0b1039f0SRamuthevar Vadivel Murugan 	else
378*0b1039f0SRamuthevar Vadivel Murugan 		val = HSNAND_CTL_RW_WRITE;
379*0b1039f0SRamuthevar Vadivel Murugan 
380*0b1039f0SRamuthevar Vadivel Murugan 	writel(HSNAND_CTL_MSG_EN | HSNAND_CTL_CKFF_EN |
381*0b1039f0SRamuthevar Vadivel Murugan 	       HSNAND_CTL_ECC_OFF_V8TH | HSNAND_CTL_CE_SEL_CS(ebu_host->cs_num) |
382*0b1039f0SRamuthevar Vadivel Murugan 	       HSNAND_CTL_ENABLE_ECC | HSNAND_CTL_GO | val,
383*0b1039f0SRamuthevar Vadivel Murugan 	       ebu_host->hsnand + HSNAND_CTL);
384*0b1039f0SRamuthevar Vadivel Murugan }
385*0b1039f0SRamuthevar Vadivel Murugan 
386*0b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
387*0b1039f0SRamuthevar Vadivel Murugan 				    int oob_required, int page)
388*0b1039f0SRamuthevar Vadivel Murugan {
389*0b1039f0SRamuthevar Vadivel Murugan 	struct mtd_info *mtd = nand_to_mtd(chip);
390*0b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
391*0b1039f0SRamuthevar Vadivel Murugan 	int ret, reg_data;
392*0b1039f0SRamuthevar Vadivel Murugan 
393*0b1039f0SRamuthevar Vadivel Murugan 	ebu_nand_trigger(ebu_host, page, NAND_CMD_READ0);
394*0b1039f0SRamuthevar Vadivel Murugan 
395*0b1039f0SRamuthevar Vadivel Murugan 	ret = ebu_dma_start(ebu_host, DMA_DEV_TO_MEM, buf, mtd->writesize);
396*0b1039f0SRamuthevar Vadivel Murugan 	if (ret)
397*0b1039f0SRamuthevar Vadivel Murugan 		return ret;
398*0b1039f0SRamuthevar Vadivel Murugan 
399*0b1039f0SRamuthevar Vadivel Murugan 	if (oob_required)
400*0b1039f0SRamuthevar Vadivel Murugan 		chip->ecc.read_oob(chip, page);
401*0b1039f0SRamuthevar Vadivel Murugan 
402*0b1039f0SRamuthevar Vadivel Murugan 	reg_data = readl(ebu_host->hsnand + HSNAND_CTL);
403*0b1039f0SRamuthevar Vadivel Murugan 	reg_data &= ~HSNAND_CTL_GO;
404*0b1039f0SRamuthevar Vadivel Murugan 	writel(reg_data, ebu_host->hsnand + HSNAND_CTL);
405*0b1039f0SRamuthevar Vadivel Murugan 
406*0b1039f0SRamuthevar Vadivel Murugan 	return 0;
407*0b1039f0SRamuthevar Vadivel Murugan }
408*0b1039f0SRamuthevar Vadivel Murugan 
409*0b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
410*0b1039f0SRamuthevar Vadivel Murugan 				     int oob_required, int page)
411*0b1039f0SRamuthevar Vadivel Murugan {
412*0b1039f0SRamuthevar Vadivel Murugan 	struct mtd_info *mtd = nand_to_mtd(chip);
413*0b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
414*0b1039f0SRamuthevar Vadivel Murugan 	void __iomem *int_sta = ebu_host->hsnand + HSNAND_INT_STA;
415*0b1039f0SRamuthevar Vadivel Murugan 	int reg_data, ret, val;
416*0b1039f0SRamuthevar Vadivel Murugan 	u32 reg;
417*0b1039f0SRamuthevar Vadivel Murugan 
418*0b1039f0SRamuthevar Vadivel Murugan 	ebu_nand_trigger(ebu_host, page, NAND_CMD_SEQIN);
419*0b1039f0SRamuthevar Vadivel Murugan 
420*0b1039f0SRamuthevar Vadivel Murugan 	ret = ebu_dma_start(ebu_host, DMA_MEM_TO_DEV, buf, mtd->writesize);
421*0b1039f0SRamuthevar Vadivel Murugan 	if (ret)
422*0b1039f0SRamuthevar Vadivel Murugan 		return ret;
423*0b1039f0SRamuthevar Vadivel Murugan 
424*0b1039f0SRamuthevar Vadivel Murugan 	if (oob_required) {
425*0b1039f0SRamuthevar Vadivel Murugan 		reg = get_unaligned_le32(chip->oob_poi);
426*0b1039f0SRamuthevar Vadivel Murugan 		writel(reg, ebu_host->hsnand + HSNAND_CMSG_0);
427*0b1039f0SRamuthevar Vadivel Murugan 
428*0b1039f0SRamuthevar Vadivel Murugan 		reg = get_unaligned_le32(chip->oob_poi + 4);
429*0b1039f0SRamuthevar Vadivel Murugan 		writel(reg, ebu_host->hsnand + HSNAND_CMSG_1);
430*0b1039f0SRamuthevar Vadivel Murugan 	}
431*0b1039f0SRamuthevar Vadivel Murugan 
432*0b1039f0SRamuthevar Vadivel Murugan 	ret = readl_poll_timeout_atomic(int_sta, val, !(val & HSNAND_INT_STA_WR_C),
433*0b1039f0SRamuthevar Vadivel Murugan 					10, 1000);
434*0b1039f0SRamuthevar Vadivel Murugan 	if (ret)
435*0b1039f0SRamuthevar Vadivel Murugan 		return ret;
436*0b1039f0SRamuthevar Vadivel Murugan 
437*0b1039f0SRamuthevar Vadivel Murugan 	reg_data = readl(ebu_host->hsnand + HSNAND_CTL);
438*0b1039f0SRamuthevar Vadivel Murugan 	reg_data &= ~HSNAND_CTL_GO;
439*0b1039f0SRamuthevar Vadivel Murugan 	writel(reg_data, ebu_host->hsnand + HSNAND_CTL);
440*0b1039f0SRamuthevar Vadivel Murugan 
441*0b1039f0SRamuthevar Vadivel Murugan 	return 0;
442*0b1039f0SRamuthevar Vadivel Murugan }
443*0b1039f0SRamuthevar Vadivel Murugan 
444*0b1039f0SRamuthevar Vadivel Murugan static const u8 ecc_strength[] = { 1, 1, 4, 8, 24, 32, 40, 60, };
445*0b1039f0SRamuthevar Vadivel Murugan 
446*0b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_attach_chip(struct nand_chip *chip)
447*0b1039f0SRamuthevar Vadivel Murugan {
448*0b1039f0SRamuthevar Vadivel Murugan 	struct mtd_info *mtd = nand_to_mtd(chip);
449*0b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
450*0b1039f0SRamuthevar Vadivel Murugan 	u32 ecc_steps, ecc_bytes, ecc_total, pagesize, pg_per_blk;
451*0b1039f0SRamuthevar Vadivel Murugan 	u32 ecc_strength_ds = chip->ecc.strength;
452*0b1039f0SRamuthevar Vadivel Murugan 	u32 ecc_size = chip->ecc.size;
453*0b1039f0SRamuthevar Vadivel Murugan 	u32 writesize = mtd->writesize;
454*0b1039f0SRamuthevar Vadivel Murugan 	u32 blocksize = mtd->erasesize;
455*0b1039f0SRamuthevar Vadivel Murugan 	int bch_algo, start, val;
456*0b1039f0SRamuthevar Vadivel Murugan 
457*0b1039f0SRamuthevar Vadivel Murugan 	/* Default to an ECC size of 512 */
458*0b1039f0SRamuthevar Vadivel Murugan 	if (!chip->ecc.size)
459*0b1039f0SRamuthevar Vadivel Murugan 		chip->ecc.size = 512;
460*0b1039f0SRamuthevar Vadivel Murugan 
461*0b1039f0SRamuthevar Vadivel Murugan 	switch (ecc_size) {
462*0b1039f0SRamuthevar Vadivel Murugan 	case 512:
463*0b1039f0SRamuthevar Vadivel Murugan 		start = 1;
464*0b1039f0SRamuthevar Vadivel Murugan 		if (!ecc_strength_ds)
465*0b1039f0SRamuthevar Vadivel Murugan 			ecc_strength_ds = 4;
466*0b1039f0SRamuthevar Vadivel Murugan 		break;
467*0b1039f0SRamuthevar Vadivel Murugan 	case 1024:
468*0b1039f0SRamuthevar Vadivel Murugan 		start = 4;
469*0b1039f0SRamuthevar Vadivel Murugan 		if (!ecc_strength_ds)
470*0b1039f0SRamuthevar Vadivel Murugan 			ecc_strength_ds = 32;
471*0b1039f0SRamuthevar Vadivel Murugan 		break;
472*0b1039f0SRamuthevar Vadivel Murugan 	default:
473*0b1039f0SRamuthevar Vadivel Murugan 		return -EINVAL;
474*0b1039f0SRamuthevar Vadivel Murugan 	}
475*0b1039f0SRamuthevar Vadivel Murugan 
476*0b1039f0SRamuthevar Vadivel Murugan 	/* BCH ECC algorithm Settings for number of bits per 512B/1024B */
477*0b1039f0SRamuthevar Vadivel Murugan 	bch_algo = round_up(start + 1, 4);
478*0b1039f0SRamuthevar Vadivel Murugan 	for (val = start; val < bch_algo; val++) {
479*0b1039f0SRamuthevar Vadivel Murugan 		if (ecc_strength_ds == ecc_strength[val])
480*0b1039f0SRamuthevar Vadivel Murugan 			break;
481*0b1039f0SRamuthevar Vadivel Murugan 	}
482*0b1039f0SRamuthevar Vadivel Murugan 	if (val == bch_algo)
483*0b1039f0SRamuthevar Vadivel Murugan 		return -EINVAL;
484*0b1039f0SRamuthevar Vadivel Murugan 
485*0b1039f0SRamuthevar Vadivel Murugan 	if (ecc_strength_ds == 8)
486*0b1039f0SRamuthevar Vadivel Murugan 		ecc_bytes = 14;
487*0b1039f0SRamuthevar Vadivel Murugan 	else
488*0b1039f0SRamuthevar Vadivel Murugan 		ecc_bytes = DIV_ROUND_UP(ecc_strength_ds * fls(8 * ecc_size), 8);
489*0b1039f0SRamuthevar Vadivel Murugan 
490*0b1039f0SRamuthevar Vadivel Murugan 	ecc_steps = writesize / ecc_size;
491*0b1039f0SRamuthevar Vadivel Murugan 	ecc_total = ecc_steps * ecc_bytes;
492*0b1039f0SRamuthevar Vadivel Murugan 	if ((ecc_total + 8) > mtd->oobsize)
493*0b1039f0SRamuthevar Vadivel Murugan 		return -ERANGE;
494*0b1039f0SRamuthevar Vadivel Murugan 
495*0b1039f0SRamuthevar Vadivel Murugan 	chip->ecc.total = ecc_total;
496*0b1039f0SRamuthevar Vadivel Murugan 	pagesize = fls(writesize >> 11);
497*0b1039f0SRamuthevar Vadivel Murugan 	if (pagesize > HSNAND_PARA0_PAGE_V8192)
498*0b1039f0SRamuthevar Vadivel Murugan 		return -ERANGE;
499*0b1039f0SRamuthevar Vadivel Murugan 
500*0b1039f0SRamuthevar Vadivel Murugan 	pg_per_blk = fls((blocksize / writesize) >> 6) / 8;
501*0b1039f0SRamuthevar Vadivel Murugan 	if (pg_per_blk > HSNAND_PARA0_PIB_V256)
502*0b1039f0SRamuthevar Vadivel Murugan 		return -ERANGE;
503*0b1039f0SRamuthevar Vadivel Murugan 
504*0b1039f0SRamuthevar Vadivel Murugan 	ebu_host->nd_para0 = pagesize | pg_per_blk | HSNAND_PARA0_BYP_EN_NP |
505*0b1039f0SRamuthevar Vadivel Murugan 			     HSNAND_PARA0_BYP_DEC_NP | HSNAND_PARA0_ADEP_EN |
506*0b1039f0SRamuthevar Vadivel Murugan 			     HSNAND_PARA0_TYPE_ONFI | (val << 29);
507*0b1039f0SRamuthevar Vadivel Murugan 
508*0b1039f0SRamuthevar Vadivel Murugan 	mtd_set_ooblayout(mtd, &ebu_nand_ooblayout_ops);
509*0b1039f0SRamuthevar Vadivel Murugan 	chip->ecc.read_page = ebu_nand_read_page_hwecc;
510*0b1039f0SRamuthevar Vadivel Murugan 	chip->ecc.write_page = ebu_nand_write_page_hwecc;
511*0b1039f0SRamuthevar Vadivel Murugan 
512*0b1039f0SRamuthevar Vadivel Murugan 	return 0;
513*0b1039f0SRamuthevar Vadivel Murugan }
514*0b1039f0SRamuthevar Vadivel Murugan 
515*0b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_exec_op(struct nand_chip *chip,
516*0b1039f0SRamuthevar Vadivel Murugan 			    const struct nand_operation *op, bool check_only)
517*0b1039f0SRamuthevar Vadivel Murugan {
518*0b1039f0SRamuthevar Vadivel Murugan 	const struct nand_op_instr *instr = NULL;
519*0b1039f0SRamuthevar Vadivel Murugan 	unsigned int op_id;
520*0b1039f0SRamuthevar Vadivel Murugan 	int i, timeout_ms, ret = 0;
521*0b1039f0SRamuthevar Vadivel Murugan 
522*0b1039f0SRamuthevar Vadivel Murugan 	if (check_only)
523*0b1039f0SRamuthevar Vadivel Murugan 		return 0;
524*0b1039f0SRamuthevar Vadivel Murugan 
525*0b1039f0SRamuthevar Vadivel Murugan 	ebu_select_chip(chip);
526*0b1039f0SRamuthevar Vadivel Murugan 	for (op_id = 0; op_id < op->ninstrs; op_id++) {
527*0b1039f0SRamuthevar Vadivel Murugan 		instr = &op->instrs[op_id];
528*0b1039f0SRamuthevar Vadivel Murugan 
529*0b1039f0SRamuthevar Vadivel Murugan 		switch (instr->type) {
530*0b1039f0SRamuthevar Vadivel Murugan 		case NAND_OP_CMD_INSTR:
531*0b1039f0SRamuthevar Vadivel Murugan 			ebu_nand_writeb(chip, HSNAND_CLE_OFFS | HSNAND_CS_OFFS,
532*0b1039f0SRamuthevar Vadivel Murugan 					instr->ctx.cmd.opcode);
533*0b1039f0SRamuthevar Vadivel Murugan 			break;
534*0b1039f0SRamuthevar Vadivel Murugan 
535*0b1039f0SRamuthevar Vadivel Murugan 		case NAND_OP_ADDR_INSTR:
536*0b1039f0SRamuthevar Vadivel Murugan 			for (i = 0; i < instr->ctx.addr.naddrs; i++)
537*0b1039f0SRamuthevar Vadivel Murugan 				ebu_nand_writeb(chip,
538*0b1039f0SRamuthevar Vadivel Murugan 						HSNAND_ALE_OFFS | HSNAND_CS_OFFS,
539*0b1039f0SRamuthevar Vadivel Murugan 						instr->ctx.addr.addrs[i]);
540*0b1039f0SRamuthevar Vadivel Murugan 			break;
541*0b1039f0SRamuthevar Vadivel Murugan 
542*0b1039f0SRamuthevar Vadivel Murugan 		case NAND_OP_DATA_IN_INSTR:
543*0b1039f0SRamuthevar Vadivel Murugan 			ebu_read_buf(chip, instr->ctx.data.buf.in,
544*0b1039f0SRamuthevar Vadivel Murugan 				     instr->ctx.data.len);
545*0b1039f0SRamuthevar Vadivel Murugan 			break;
546*0b1039f0SRamuthevar Vadivel Murugan 
547*0b1039f0SRamuthevar Vadivel Murugan 		case NAND_OP_DATA_OUT_INSTR:
548*0b1039f0SRamuthevar Vadivel Murugan 			ebu_write_buf(chip, instr->ctx.data.buf.out,
549*0b1039f0SRamuthevar Vadivel Murugan 				      instr->ctx.data.len);
550*0b1039f0SRamuthevar Vadivel Murugan 			break;
551*0b1039f0SRamuthevar Vadivel Murugan 
552*0b1039f0SRamuthevar Vadivel Murugan 		case NAND_OP_WAITRDY_INSTR:
553*0b1039f0SRamuthevar Vadivel Murugan 			timeout_ms = instr->ctx.waitrdy.timeout_ms * 1000;
554*0b1039f0SRamuthevar Vadivel Murugan 			ret = ebu_nand_waitrdy(chip, timeout_ms);
555*0b1039f0SRamuthevar Vadivel Murugan 			break;
556*0b1039f0SRamuthevar Vadivel Murugan 		}
557*0b1039f0SRamuthevar Vadivel Murugan 	}
558*0b1039f0SRamuthevar Vadivel Murugan 
559*0b1039f0SRamuthevar Vadivel Murugan 	return ret;
560*0b1039f0SRamuthevar Vadivel Murugan }
561*0b1039f0SRamuthevar Vadivel Murugan 
562*0b1039f0SRamuthevar Vadivel Murugan static const struct nand_controller_ops ebu_nand_controller_ops = {
563*0b1039f0SRamuthevar Vadivel Murugan 	.attach_chip = ebu_nand_attach_chip,
564*0b1039f0SRamuthevar Vadivel Murugan 	.setup_interface = ebu_nand_set_timings,
565*0b1039f0SRamuthevar Vadivel Murugan 	.exec_op = ebu_nand_exec_op,
566*0b1039f0SRamuthevar Vadivel Murugan };
567*0b1039f0SRamuthevar Vadivel Murugan 
568*0b1039f0SRamuthevar Vadivel Murugan static void ebu_dma_cleanup(struct ebu_nand_controller *ebu_host)
569*0b1039f0SRamuthevar Vadivel Murugan {
570*0b1039f0SRamuthevar Vadivel Murugan 	if (ebu_host->dma_rx)
571*0b1039f0SRamuthevar Vadivel Murugan 		dma_release_channel(ebu_host->dma_rx);
572*0b1039f0SRamuthevar Vadivel Murugan 
573*0b1039f0SRamuthevar Vadivel Murugan 	if (ebu_host->dma_tx)
574*0b1039f0SRamuthevar Vadivel Murugan 		dma_release_channel(ebu_host->dma_tx);
575*0b1039f0SRamuthevar Vadivel Murugan }
576*0b1039f0SRamuthevar Vadivel Murugan 
577*0b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_probe(struct platform_device *pdev)
578*0b1039f0SRamuthevar Vadivel Murugan {
579*0b1039f0SRamuthevar Vadivel Murugan 	struct device *dev = &pdev->dev;
580*0b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host;
581*0b1039f0SRamuthevar Vadivel Murugan 	struct nand_chip *nand;
582*0b1039f0SRamuthevar Vadivel Murugan 	struct mtd_info *mtd = NULL;
583*0b1039f0SRamuthevar Vadivel Murugan 	struct resource *res;
584*0b1039f0SRamuthevar Vadivel Murugan 	char *resname;
585*0b1039f0SRamuthevar Vadivel Murugan 	int ret;
586*0b1039f0SRamuthevar Vadivel Murugan 	u32 cs;
587*0b1039f0SRamuthevar Vadivel Murugan 
588*0b1039f0SRamuthevar Vadivel Murugan 	ebu_host = devm_kzalloc(dev, sizeof(*ebu_host), GFP_KERNEL);
589*0b1039f0SRamuthevar Vadivel Murugan 	if (!ebu_host)
590*0b1039f0SRamuthevar Vadivel Murugan 		return -ENOMEM;
591*0b1039f0SRamuthevar Vadivel Murugan 
592*0b1039f0SRamuthevar Vadivel Murugan 	ebu_host->dev = dev;
593*0b1039f0SRamuthevar Vadivel Murugan 	nand_controller_init(&ebu_host->controller);
594*0b1039f0SRamuthevar Vadivel Murugan 
595*0b1039f0SRamuthevar Vadivel Murugan 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ebunand");
596*0b1039f0SRamuthevar Vadivel Murugan 	ebu_host->ebu = devm_ioremap_resource(&pdev->dev, res);
597*0b1039f0SRamuthevar Vadivel Murugan 	if (IS_ERR(ebu_host->ebu))
598*0b1039f0SRamuthevar Vadivel Murugan 		return PTR_ERR(ebu_host->ebu);
599*0b1039f0SRamuthevar Vadivel Murugan 
600*0b1039f0SRamuthevar Vadivel Murugan 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hsnand");
601*0b1039f0SRamuthevar Vadivel Murugan 	ebu_host->hsnand = devm_ioremap_resource(&pdev->dev, res);
602*0b1039f0SRamuthevar Vadivel Murugan 	if (IS_ERR(ebu_host->hsnand))
603*0b1039f0SRamuthevar Vadivel Murugan 		return PTR_ERR(ebu_host->hsnand);
604*0b1039f0SRamuthevar Vadivel Murugan 
605*0b1039f0SRamuthevar Vadivel Murugan 	ret = device_property_read_u32(dev, "reg", &cs);
606*0b1039f0SRamuthevar Vadivel Murugan 	if (ret) {
607*0b1039f0SRamuthevar Vadivel Murugan 		dev_err(dev, "failed to get chip select: %d\n", ret);
608*0b1039f0SRamuthevar Vadivel Murugan 		return ret;
609*0b1039f0SRamuthevar Vadivel Murugan 	}
610*0b1039f0SRamuthevar Vadivel Murugan 	ebu_host->cs_num = cs;
611*0b1039f0SRamuthevar Vadivel Murugan 
612*0b1039f0SRamuthevar Vadivel Murugan 	resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
613*0b1039f0SRamuthevar Vadivel Murugan 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
614*0b1039f0SRamuthevar Vadivel Murugan 	ebu_host->cs[cs].chipaddr = devm_ioremap_resource(dev, res);
615*0b1039f0SRamuthevar Vadivel Murugan 	ebu_host->cs[cs].nand_pa = res->start;
616*0b1039f0SRamuthevar Vadivel Murugan 	if (IS_ERR(ebu_host->cs[cs].chipaddr))
617*0b1039f0SRamuthevar Vadivel Murugan 		return PTR_ERR(ebu_host->cs[cs].chipaddr);
618*0b1039f0SRamuthevar Vadivel Murugan 
619*0b1039f0SRamuthevar Vadivel Murugan 	ebu_host->clk = devm_clk_get(dev, NULL);
620*0b1039f0SRamuthevar Vadivel Murugan 	if (IS_ERR(ebu_host->clk))
621*0b1039f0SRamuthevar Vadivel Murugan 		return dev_err_probe(dev, PTR_ERR(ebu_host->clk),
622*0b1039f0SRamuthevar Vadivel Murugan 				     "failed to get clock\n");
623*0b1039f0SRamuthevar Vadivel Murugan 
624*0b1039f0SRamuthevar Vadivel Murugan 	ret = clk_prepare_enable(ebu_host->clk);
625*0b1039f0SRamuthevar Vadivel Murugan 	if (ret) {
626*0b1039f0SRamuthevar Vadivel Murugan 		dev_err(dev, "failed to enable clock: %d\n", ret);
627*0b1039f0SRamuthevar Vadivel Murugan 		return ret;
628*0b1039f0SRamuthevar Vadivel Murugan 	}
629*0b1039f0SRamuthevar Vadivel Murugan 	ebu_host->clk_rate = clk_get_rate(ebu_host->clk);
630*0b1039f0SRamuthevar Vadivel Murugan 
631*0b1039f0SRamuthevar Vadivel Murugan 	ebu_host->dma_tx = dma_request_chan(dev, "tx");
632*0b1039f0SRamuthevar Vadivel Murugan 	if (IS_ERR(ebu_host->dma_tx))
633*0b1039f0SRamuthevar Vadivel Murugan 		return dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx),
634*0b1039f0SRamuthevar Vadivel Murugan 				     "failed to request DMA tx chan!.\n");
635*0b1039f0SRamuthevar Vadivel Murugan 
636*0b1039f0SRamuthevar Vadivel Murugan 	ebu_host->dma_rx = dma_request_chan(dev, "rx");
637*0b1039f0SRamuthevar Vadivel Murugan 	if (IS_ERR(ebu_host->dma_rx))
638*0b1039f0SRamuthevar Vadivel Murugan 		return dev_err_probe(dev, PTR_ERR(ebu_host->dma_rx),
639*0b1039f0SRamuthevar Vadivel Murugan 				     "failed to request DMA rx chan!.\n");
640*0b1039f0SRamuthevar Vadivel Murugan 
641*0b1039f0SRamuthevar Vadivel Murugan 	resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
642*0b1039f0SRamuthevar Vadivel Murugan 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
643*0b1039f0SRamuthevar Vadivel Murugan 	if (!res)
644*0b1039f0SRamuthevar Vadivel Murugan 		return -EINVAL;
645*0b1039f0SRamuthevar Vadivel Murugan 	ebu_host->cs[cs].addr_sel = res->start;
646*0b1039f0SRamuthevar Vadivel Murugan 	writel(ebu_host->cs[cs].addr_sel | EBU_ADDR_MASK(5) | EBU_ADDR_SEL_REGEN,
647*0b1039f0SRamuthevar Vadivel Murugan 	       ebu_host->ebu + EBU_ADDR_SEL(cs));
648*0b1039f0SRamuthevar Vadivel Murugan 
649*0b1039f0SRamuthevar Vadivel Murugan 	nand_set_flash_node(&ebu_host->chip, dev->of_node);
650*0b1039f0SRamuthevar Vadivel Murugan 	if (!mtd->name) {
651*0b1039f0SRamuthevar Vadivel Murugan 		dev_err(ebu_host->dev, "NAND label property is mandatory\n");
652*0b1039f0SRamuthevar Vadivel Murugan 		return -EINVAL;
653*0b1039f0SRamuthevar Vadivel Murugan 	}
654*0b1039f0SRamuthevar Vadivel Murugan 
655*0b1039f0SRamuthevar Vadivel Murugan 	mtd = nand_to_mtd(&ebu_host->chip);
656*0b1039f0SRamuthevar Vadivel Murugan 	mtd->dev.parent = dev;
657*0b1039f0SRamuthevar Vadivel Murugan 	ebu_host->dev = dev;
658*0b1039f0SRamuthevar Vadivel Murugan 
659*0b1039f0SRamuthevar Vadivel Murugan 	platform_set_drvdata(pdev, ebu_host);
660*0b1039f0SRamuthevar Vadivel Murugan 	nand_set_controller_data(&ebu_host->chip, ebu_host);
661*0b1039f0SRamuthevar Vadivel Murugan 
662*0b1039f0SRamuthevar Vadivel Murugan 	nand = &ebu_host->chip;
663*0b1039f0SRamuthevar Vadivel Murugan 	nand->controller = &ebu_host->controller;
664*0b1039f0SRamuthevar Vadivel Murugan 	nand->controller->ops = &ebu_nand_controller_ops;
665*0b1039f0SRamuthevar Vadivel Murugan 
666*0b1039f0SRamuthevar Vadivel Murugan 	/* Scan to find existence of the device */
667*0b1039f0SRamuthevar Vadivel Murugan 	ret = nand_scan(&ebu_host->chip, 1);
668*0b1039f0SRamuthevar Vadivel Murugan 	if (ret)
669*0b1039f0SRamuthevar Vadivel Murugan 		goto err_cleanup_dma;
670*0b1039f0SRamuthevar Vadivel Murugan 
671*0b1039f0SRamuthevar Vadivel Murugan 	ret = mtd_device_register(mtd, NULL, 0);
672*0b1039f0SRamuthevar Vadivel Murugan 	if (ret)
673*0b1039f0SRamuthevar Vadivel Murugan 		goto err_clean_nand;
674*0b1039f0SRamuthevar Vadivel Murugan 
675*0b1039f0SRamuthevar Vadivel Murugan 	return 0;
676*0b1039f0SRamuthevar Vadivel Murugan 
677*0b1039f0SRamuthevar Vadivel Murugan err_clean_nand:
678*0b1039f0SRamuthevar Vadivel Murugan 	nand_cleanup(&ebu_host->chip);
679*0b1039f0SRamuthevar Vadivel Murugan err_cleanup_dma:
680*0b1039f0SRamuthevar Vadivel Murugan 	ebu_dma_cleanup(ebu_host);
681*0b1039f0SRamuthevar Vadivel Murugan 	clk_disable_unprepare(ebu_host->clk);
682*0b1039f0SRamuthevar Vadivel Murugan 
683*0b1039f0SRamuthevar Vadivel Murugan 	return ret;
684*0b1039f0SRamuthevar Vadivel Murugan }
685*0b1039f0SRamuthevar Vadivel Murugan 
686*0b1039f0SRamuthevar Vadivel Murugan static int ebu_nand_remove(struct platform_device *pdev)
687*0b1039f0SRamuthevar Vadivel Murugan {
688*0b1039f0SRamuthevar Vadivel Murugan 	struct ebu_nand_controller *ebu_host = platform_get_drvdata(pdev);
689*0b1039f0SRamuthevar Vadivel Murugan 	int ret;
690*0b1039f0SRamuthevar Vadivel Murugan 
691*0b1039f0SRamuthevar Vadivel Murugan 	ret = mtd_device_unregister(nand_to_mtd(&ebu_host->chip));
692*0b1039f0SRamuthevar Vadivel Murugan 	WARN_ON(ret);
693*0b1039f0SRamuthevar Vadivel Murugan 	nand_cleanup(&ebu_host->chip);
694*0b1039f0SRamuthevar Vadivel Murugan 	ebu_nand_disable(&ebu_host->chip);
695*0b1039f0SRamuthevar Vadivel Murugan 	ebu_dma_cleanup(ebu_host);
696*0b1039f0SRamuthevar Vadivel Murugan 	clk_disable_unprepare(ebu_host->clk);
697*0b1039f0SRamuthevar Vadivel Murugan 
698*0b1039f0SRamuthevar Vadivel Murugan 	return 0;
699*0b1039f0SRamuthevar Vadivel Murugan }
700*0b1039f0SRamuthevar Vadivel Murugan 
701*0b1039f0SRamuthevar Vadivel Murugan static const struct of_device_id ebu_nand_match[] = {
702*0b1039f0SRamuthevar Vadivel Murugan 	{ .compatible = "intel,nand-controller" },
703*0b1039f0SRamuthevar Vadivel Murugan 	{ .compatible = "intel,lgm-ebunand" },
704*0b1039f0SRamuthevar Vadivel Murugan 	{}
705*0b1039f0SRamuthevar Vadivel Murugan };
706*0b1039f0SRamuthevar Vadivel Murugan MODULE_DEVICE_TABLE(of, ebu_nand_match);
707*0b1039f0SRamuthevar Vadivel Murugan 
708*0b1039f0SRamuthevar Vadivel Murugan static struct platform_driver ebu_nand_driver = {
709*0b1039f0SRamuthevar Vadivel Murugan 	.probe = ebu_nand_probe,
710*0b1039f0SRamuthevar Vadivel Murugan 	.remove = ebu_nand_remove,
711*0b1039f0SRamuthevar Vadivel Murugan 	.driver = {
712*0b1039f0SRamuthevar Vadivel Murugan 		.name = "intel-nand-controller",
713*0b1039f0SRamuthevar Vadivel Murugan 		.of_match_table = ebu_nand_match,
714*0b1039f0SRamuthevar Vadivel Murugan 	},
715*0b1039f0SRamuthevar Vadivel Murugan 
716*0b1039f0SRamuthevar Vadivel Murugan };
717*0b1039f0SRamuthevar Vadivel Murugan module_platform_driver(ebu_nand_driver);
718*0b1039f0SRamuthevar Vadivel Murugan 
719*0b1039f0SRamuthevar Vadivel Murugan MODULE_LICENSE("GPL v2");
720*0b1039f0SRamuthevar Vadivel Murugan MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>");
721*0b1039f0SRamuthevar Vadivel Murugan MODULE_DESCRIPTION("Intel's LGM External Bus NAND Controller driver");
722