xref: /openbmc/linux/drivers/mtd/nand/raw/qcom_nandc.c (revision e7f127b2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/slab.h>
8 #include <linux/bitops.h>
9 #include <linux/dma/qcom_adm.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmaengine.h>
12 #include <linux/module.h>
13 #include <linux/mtd/rawnand.h>
14 #include <linux/mtd/partitions.h>
15 #include <linux/of.h>
16 #include <linux/of_device.h>
17 #include <linux/delay.h>
18 #include <linux/dma/qcom_bam_dma.h>
19 
20 /* NANDc reg offsets */
21 #define	NAND_FLASH_CMD			0x00
22 #define	NAND_ADDR0			0x04
23 #define	NAND_ADDR1			0x08
24 #define	NAND_FLASH_CHIP_SELECT		0x0c
25 #define	NAND_EXEC_CMD			0x10
26 #define	NAND_FLASH_STATUS		0x14
27 #define	NAND_BUFFER_STATUS		0x18
28 #define	NAND_DEV0_CFG0			0x20
29 #define	NAND_DEV0_CFG1			0x24
30 #define	NAND_DEV0_ECC_CFG		0x28
31 #define	NAND_AUTO_STATUS_EN		0x2c
32 #define	NAND_DEV1_CFG0			0x30
33 #define	NAND_DEV1_CFG1			0x34
34 #define	NAND_READ_ID			0x40
35 #define	NAND_READ_STATUS		0x44
36 #define	NAND_DEV_CMD0			0xa0
37 #define	NAND_DEV_CMD1			0xa4
38 #define	NAND_DEV_CMD2			0xa8
39 #define	NAND_DEV_CMD_VLD		0xac
40 #define	SFLASHC_BURST_CFG		0xe0
41 #define	NAND_ERASED_CW_DETECT_CFG	0xe8
42 #define	NAND_ERASED_CW_DETECT_STATUS	0xec
43 #define	NAND_EBI2_ECC_BUF_CFG		0xf0
44 #define	FLASH_BUF_ACC			0x100
45 
46 #define	NAND_CTRL			0xf00
47 #define	NAND_VERSION			0xf08
48 #define	NAND_READ_LOCATION_0		0xf20
49 #define	NAND_READ_LOCATION_1		0xf24
50 #define	NAND_READ_LOCATION_2		0xf28
51 #define	NAND_READ_LOCATION_3		0xf2c
52 #define	NAND_READ_LOCATION_LAST_CW_0	0xf40
53 #define	NAND_READ_LOCATION_LAST_CW_1	0xf44
54 #define	NAND_READ_LOCATION_LAST_CW_2	0xf48
55 #define	NAND_READ_LOCATION_LAST_CW_3	0xf4c
56 
57 /* dummy register offsets, used by write_reg_dma */
58 #define	NAND_DEV_CMD1_RESTORE		0xdead
59 #define	NAND_DEV_CMD_VLD_RESTORE	0xbeef
60 
61 /* NAND_FLASH_CMD bits */
62 #define	PAGE_ACC			BIT(4)
63 #define	LAST_PAGE			BIT(5)
64 
65 /* NAND_FLASH_CHIP_SELECT bits */
66 #define	NAND_DEV_SEL			0
67 #define	DM_EN				BIT(2)
68 
69 /* NAND_FLASH_STATUS bits */
70 #define	FS_OP_ERR			BIT(4)
71 #define	FS_READY_BSY_N			BIT(5)
72 #define	FS_MPU_ERR			BIT(8)
73 #define	FS_DEVICE_STS_ERR		BIT(16)
74 #define	FS_DEVICE_WP			BIT(23)
75 
76 /* NAND_BUFFER_STATUS bits */
77 #define	BS_UNCORRECTABLE_BIT		BIT(8)
78 #define	BS_CORRECTABLE_ERR_MSK		0x1f
79 
80 /* NAND_DEVn_CFG0 bits */
81 #define	DISABLE_STATUS_AFTER_WRITE	4
82 #define	CW_PER_PAGE			6
83 #define	UD_SIZE_BYTES			9
84 #define	ECC_PARITY_SIZE_BYTES_RS	19
85 #define	SPARE_SIZE_BYTES		23
86 #define	NUM_ADDR_CYCLES			27
87 #define	STATUS_BFR_READ			30
88 #define	SET_RD_MODE_AFTER_STATUS	31
89 
90 /* NAND_DEVn_CFG0 bits */
91 #define	DEV0_CFG1_ECC_DISABLE		0
92 #define	WIDE_FLASH			1
93 #define	NAND_RECOVERY_CYCLES		2
94 #define	CS_ACTIVE_BSY			5
95 #define	BAD_BLOCK_BYTE_NUM		6
96 #define	BAD_BLOCK_IN_SPARE_AREA		16
97 #define	WR_RD_BSY_GAP			17
98 #define	ENABLE_BCH_ECC			27
99 
100 /* NAND_DEV0_ECC_CFG bits */
101 #define	ECC_CFG_ECC_DISABLE		0
102 #define	ECC_SW_RESET			1
103 #define	ECC_MODE			4
104 #define	ECC_PARITY_SIZE_BYTES_BCH	8
105 #define	ECC_NUM_DATA_BYTES		16
106 #define	ECC_FORCE_CLK_OPEN		30
107 
108 /* NAND_DEV_CMD1 bits */
109 #define	READ_ADDR			0
110 
111 /* NAND_DEV_CMD_VLD bits */
112 #define	READ_START_VLD			BIT(0)
113 #define	READ_STOP_VLD			BIT(1)
114 #define	WRITE_START_VLD			BIT(2)
115 #define	ERASE_START_VLD			BIT(3)
116 #define	SEQ_READ_START_VLD		BIT(4)
117 
118 /* NAND_EBI2_ECC_BUF_CFG bits */
119 #define	NUM_STEPS			0
120 
121 /* NAND_ERASED_CW_DETECT_CFG bits */
122 #define	ERASED_CW_ECC_MASK		1
123 #define	AUTO_DETECT_RES			0
124 #define	MASK_ECC			(1 << ERASED_CW_ECC_MASK)
125 #define	RESET_ERASED_DET		(1 << AUTO_DETECT_RES)
126 #define	ACTIVE_ERASED_DET		(0 << AUTO_DETECT_RES)
127 #define	CLR_ERASED_PAGE_DET		(RESET_ERASED_DET | MASK_ECC)
128 #define	SET_ERASED_PAGE_DET		(ACTIVE_ERASED_DET | MASK_ECC)
129 
130 /* NAND_ERASED_CW_DETECT_STATUS bits */
131 #define	PAGE_ALL_ERASED			BIT(7)
132 #define	CODEWORD_ALL_ERASED		BIT(6)
133 #define	PAGE_ERASED			BIT(5)
134 #define	CODEWORD_ERASED			BIT(4)
135 #define	ERASED_PAGE			(PAGE_ALL_ERASED | PAGE_ERASED)
136 #define	ERASED_CW			(CODEWORD_ALL_ERASED | CODEWORD_ERASED)
137 
138 /* NAND_READ_LOCATION_n bits */
139 #define READ_LOCATION_OFFSET		0
140 #define READ_LOCATION_SIZE		16
141 #define READ_LOCATION_LAST		31
142 
143 /* Version Mask */
144 #define	NAND_VERSION_MAJOR_MASK		0xf0000000
145 #define	NAND_VERSION_MAJOR_SHIFT	28
146 #define	NAND_VERSION_MINOR_MASK		0x0fff0000
147 #define	NAND_VERSION_MINOR_SHIFT	16
148 
149 /* NAND OP_CMDs */
150 #define	OP_PAGE_READ			0x2
151 #define	OP_PAGE_READ_WITH_ECC		0x3
152 #define	OP_PAGE_READ_WITH_ECC_SPARE	0x4
153 #define	OP_PAGE_READ_ONFI_READ		0x5
154 #define	OP_PROGRAM_PAGE			0x6
155 #define	OP_PAGE_PROGRAM_WITH_ECC	0x7
156 #define	OP_PROGRAM_PAGE_SPARE		0x9
157 #define	OP_BLOCK_ERASE			0xa
158 #define	OP_FETCH_ID			0xb
159 #define	OP_RESET_DEVICE			0xd
160 
161 /* Default Value for NAND_DEV_CMD_VLD */
162 #define NAND_DEV_CMD_VLD_VAL		(READ_START_VLD | WRITE_START_VLD | \
163 					 ERASE_START_VLD | SEQ_READ_START_VLD)
164 
165 /* NAND_CTRL bits */
166 #define	BAM_MODE_EN			BIT(0)
167 
168 /*
169  * the NAND controller performs reads/writes with ECC in 516 byte chunks.
170  * the driver calls the chunks 'step' or 'codeword' interchangeably
171  */
172 #define	NANDC_STEP_SIZE			512
173 
174 /*
175  * the largest page size we support is 8K, this will have 16 steps/codewords
176  * of 512 bytes each
177  */
178 #define	MAX_NUM_STEPS			(SZ_8K / NANDC_STEP_SIZE)
179 
180 /* we read at most 3 registers per codeword scan */
181 #define	MAX_REG_RD			(3 * MAX_NUM_STEPS)
182 
183 /* ECC modes supported by the controller */
184 #define	ECC_NONE	BIT(0)
185 #define	ECC_RS_4BIT	BIT(1)
186 #define	ECC_BCH_4BIT	BIT(2)
187 #define	ECC_BCH_8BIT	BIT(3)
188 
189 #define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc)	\
190 nandc_set_reg(chip, reg,			\
191 	      ((cw_offset) << READ_LOCATION_OFFSET) |		\
192 	      ((read_size) << READ_LOCATION_SIZE) |			\
193 	      ((is_last_read_loc) << READ_LOCATION_LAST))
194 
195 #define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc)	\
196 nandc_set_reg(chip, reg,			\
197 	      ((cw_offset) << READ_LOCATION_OFFSET) |		\
198 	      ((read_size) << READ_LOCATION_SIZE) |			\
199 	      ((is_last_read_loc) << READ_LOCATION_LAST))
200 /*
201  * Returns the actual register address for all NAND_DEV_ registers
202  * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
203  */
204 #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
205 
206 /* Returns the NAND register physical address */
207 #define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
208 
209 /* Returns the dma address for reg read buffer */
210 #define reg_buf_dma_addr(chip, vaddr) \
211 	((chip)->reg_read_dma + \
212 	((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
213 
214 #define QPIC_PER_CW_CMD_ELEMENTS	32
215 #define QPIC_PER_CW_CMD_SGL		32
216 #define QPIC_PER_CW_DATA_SGL		8
217 
218 #define QPIC_NAND_COMPLETION_TIMEOUT	msecs_to_jiffies(2000)
219 
220 /*
221  * Flags used in DMA descriptor preparation helper functions
222  * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
223  */
224 /* Don't set the EOT in current tx BAM sgl */
225 #define NAND_BAM_NO_EOT			BIT(0)
226 /* Set the NWD flag in current BAM sgl */
227 #define NAND_BAM_NWD			BIT(1)
228 /* Finish writing in the current BAM sgl and start writing in another BAM sgl */
229 #define NAND_BAM_NEXT_SGL		BIT(2)
230 /*
231  * Erased codeword status is being used two times in single transfer so this
232  * flag will determine the current value of erased codeword status register
233  */
234 #define NAND_ERASED_CW_SET		BIT(4)
235 
236 /*
237  * This data type corresponds to the BAM transaction which will be used for all
238  * NAND transfers.
239  * @bam_ce - the array of BAM command elements
240  * @cmd_sgl - sgl for NAND BAM command pipe
241  * @data_sgl - sgl for NAND BAM consumer/producer pipe
242  * @bam_ce_pos - the index in bam_ce which is available for next sgl
243  * @bam_ce_start - the index in bam_ce which marks the start position ce
244  *		   for current sgl. It will be used for size calculation
245  *		   for current sgl
246  * @cmd_sgl_pos - current index in command sgl.
247  * @cmd_sgl_start - start index in command sgl.
248  * @tx_sgl_pos - current index in data sgl for tx.
249  * @tx_sgl_start - start index in data sgl for tx.
250  * @rx_sgl_pos - current index in data sgl for rx.
251  * @rx_sgl_start - start index in data sgl for rx.
252  * @wait_second_completion - wait for second DMA desc completion before making
253  *			     the NAND transfer completion.
254  * @txn_done - completion for NAND transfer.
255  * @last_data_desc - last DMA desc in data channel (tx/rx).
256  * @last_cmd_desc - last DMA desc in command channel.
257  */
258 struct bam_transaction {
259 	struct bam_cmd_element *bam_ce;
260 	struct scatterlist *cmd_sgl;
261 	struct scatterlist *data_sgl;
262 	u32 bam_ce_pos;
263 	u32 bam_ce_start;
264 	u32 cmd_sgl_pos;
265 	u32 cmd_sgl_start;
266 	u32 tx_sgl_pos;
267 	u32 tx_sgl_start;
268 	u32 rx_sgl_pos;
269 	u32 rx_sgl_start;
270 	bool wait_second_completion;
271 	struct completion txn_done;
272 	struct dma_async_tx_descriptor *last_data_desc;
273 	struct dma_async_tx_descriptor *last_cmd_desc;
274 };
275 
276 /*
277  * This data type corresponds to the nand dma descriptor
278  * @list - list for desc_info
279  * @dir - DMA transfer direction
280  * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
281  *	      ADM
282  * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
283  * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
284  * @dma_desc - low level DMA engine descriptor
285  */
286 struct desc_info {
287 	struct list_head node;
288 
289 	enum dma_data_direction dir;
290 	union {
291 		struct scatterlist adm_sgl;
292 		struct {
293 			struct scatterlist *bam_sgl;
294 			int sgl_cnt;
295 		};
296 	};
297 	struct dma_async_tx_descriptor *dma_desc;
298 };
299 
300 /*
301  * holds the current register values that we want to write. acts as a contiguous
302  * chunk of memory which we use to write the controller registers through DMA.
303  */
304 struct nandc_regs {
305 	__le32 cmd;
306 	__le32 addr0;
307 	__le32 addr1;
308 	__le32 chip_sel;
309 	__le32 exec;
310 
311 	__le32 cfg0;
312 	__le32 cfg1;
313 	__le32 ecc_bch_cfg;
314 
315 	__le32 clrflashstatus;
316 	__le32 clrreadstatus;
317 
318 	__le32 cmd1;
319 	__le32 vld;
320 
321 	__le32 orig_cmd1;
322 	__le32 orig_vld;
323 
324 	__le32 ecc_buf_cfg;
325 	__le32 read_location0;
326 	__le32 read_location1;
327 	__le32 read_location2;
328 	__le32 read_location3;
329 	__le32 read_location_last0;
330 	__le32 read_location_last1;
331 	__le32 read_location_last2;
332 	__le32 read_location_last3;
333 
334 	__le32 erased_cw_detect_cfg_clr;
335 	__le32 erased_cw_detect_cfg_set;
336 };
337 
338 /*
339  * NAND controller data struct
340  *
341  * @controller:			base controller structure
342  * @host_list:			list containing all the chips attached to the
343  *				controller
344  * @dev:			parent device
345  * @base:			MMIO base
346  * @base_phys:			physical base address of controller registers
347  * @base_dma:			dma base address of controller registers
348  * @core_clk:			controller clock
349  * @aon_clk:			another controller clock
350  *
351  * @chan:			dma channel
352  * @cmd_crci:			ADM DMA CRCI for command flow control
353  * @data_crci:			ADM DMA CRCI for data flow control
354  * @desc_list:			DMA descriptor list (list of desc_infos)
355  *
356  * @data_buffer:		our local DMA buffer for page read/writes,
357  *				used when we can't use the buffer provided
358  *				by upper layers directly
359  * @buf_size/count/start:	markers for chip->legacy.read_buf/write_buf
360  *				functions
361  * @reg_read_buf:		local buffer for reading back registers via DMA
362  * @reg_read_dma:		contains dma address for register read buffer
363  * @reg_read_pos:		marker for data read in reg_read_buf
364  *
365  * @regs:			a contiguous chunk of memory for DMA register
366  *				writes. contains the register values to be
367  *				written to controller
368  * @cmd1/vld:			some fixed controller register values
369  * @props:			properties of current NAND controller,
370  *				initialized via DT match data
371  * @max_cwperpage:		maximum QPIC codewords required. calculated
372  *				from all connected NAND devices pagesize
373  */
374 struct qcom_nand_controller {
375 	struct nand_controller controller;
376 	struct list_head host_list;
377 
378 	struct device *dev;
379 
380 	void __iomem *base;
381 	phys_addr_t base_phys;
382 	dma_addr_t base_dma;
383 
384 	struct clk *core_clk;
385 	struct clk *aon_clk;
386 
387 	union {
388 		/* will be used only by QPIC for BAM DMA */
389 		struct {
390 			struct dma_chan *tx_chan;
391 			struct dma_chan *rx_chan;
392 			struct dma_chan *cmd_chan;
393 		};
394 
395 		/* will be used only by EBI2 for ADM DMA */
396 		struct {
397 			struct dma_chan *chan;
398 			unsigned int cmd_crci;
399 			unsigned int data_crci;
400 		};
401 	};
402 
403 	struct list_head desc_list;
404 	struct bam_transaction *bam_txn;
405 
406 	u8		*data_buffer;
407 	int		buf_size;
408 	int		buf_count;
409 	int		buf_start;
410 	unsigned int	max_cwperpage;
411 
412 	__le32 *reg_read_buf;
413 	dma_addr_t reg_read_dma;
414 	int reg_read_pos;
415 
416 	struct nandc_regs *regs;
417 
418 	u32 cmd1, vld;
419 	const struct qcom_nandc_props *props;
420 };
421 
422 /*
423  * NAND chip structure
424  *
425  * @chip:			base NAND chip structure
426  * @node:			list node to add itself to host_list in
427  *				qcom_nand_controller
428  *
429  * @cs:				chip select value for this chip
430  * @cw_size:			the number of bytes in a single step/codeword
431  *				of a page, consisting of all data, ecc, spare
432  *				and reserved bytes
433  * @cw_data:			the number of bytes within a codeword protected
434  *				by ECC
435  * @use_ecc:			request the controller to use ECC for the
436  *				upcoming read/write
437  * @bch_enabled:		flag to tell whether BCH ECC mode is used
438  * @ecc_bytes_hw:		ECC bytes used by controller hardware for this
439  *				chip
440  * @status:			value to be returned if NAND_CMD_STATUS command
441  *				is executed
442  * @last_command:		keeps track of last command on this chip. used
443  *				for reading correct status
444  *
445  * @cfg0, cfg1, cfg0_raw..:	NANDc register configurations needed for
446  *				ecc/non-ecc mode for the current nand flash
447  *				device
448  */
449 struct qcom_nand_host {
450 	struct nand_chip chip;
451 	struct list_head node;
452 
453 	int cs;
454 	int cw_size;
455 	int cw_data;
456 	bool use_ecc;
457 	bool bch_enabled;
458 	int ecc_bytes_hw;
459 	int spare_bytes;
460 	int bbm_size;
461 	u8 status;
462 	int last_command;
463 
464 	u32 cfg0, cfg1;
465 	u32 cfg0_raw, cfg1_raw;
466 	u32 ecc_buf_cfg;
467 	u32 ecc_bch_cfg;
468 	u32 clrflashstatus;
469 	u32 clrreadstatus;
470 };
471 
472 /*
473  * This data type corresponds to the NAND controller properties which varies
474  * among different NAND controllers.
475  * @ecc_modes - ecc mode for NAND
476  * @is_bam - whether NAND controller is using BAM
477  * @is_qpic - whether NAND CTRL is part of qpic IP
478  * @qpic_v2 - flag to indicate QPIC IP version 2
479  * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
480  */
481 struct qcom_nandc_props {
482 	u32 ecc_modes;
483 	bool is_bam;
484 	bool is_qpic;
485 	bool qpic_v2;
486 	u32 dev_cmd_reg_start;
487 };
488 
489 /* Frees the BAM transaction memory */
490 static void free_bam_transaction(struct qcom_nand_controller *nandc)
491 {
492 	struct bam_transaction *bam_txn = nandc->bam_txn;
493 
494 	devm_kfree(nandc->dev, bam_txn);
495 }
496 
497 /* Allocates and Initializes the BAM transaction */
498 static struct bam_transaction *
499 alloc_bam_transaction(struct qcom_nand_controller *nandc)
500 {
501 	struct bam_transaction *bam_txn;
502 	size_t bam_txn_size;
503 	unsigned int num_cw = nandc->max_cwperpage;
504 	void *bam_txn_buf;
505 
506 	bam_txn_size =
507 		sizeof(*bam_txn) + num_cw *
508 		((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
509 		(sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
510 		(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
511 
512 	bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
513 	if (!bam_txn_buf)
514 		return NULL;
515 
516 	bam_txn = bam_txn_buf;
517 	bam_txn_buf += sizeof(*bam_txn);
518 
519 	bam_txn->bam_ce = bam_txn_buf;
520 	bam_txn_buf +=
521 		sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
522 
523 	bam_txn->cmd_sgl = bam_txn_buf;
524 	bam_txn_buf +=
525 		sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
526 
527 	bam_txn->data_sgl = bam_txn_buf;
528 
529 	init_completion(&bam_txn->txn_done);
530 
531 	return bam_txn;
532 }
533 
534 /* Clears the BAM transaction indexes */
535 static void clear_bam_transaction(struct qcom_nand_controller *nandc)
536 {
537 	struct bam_transaction *bam_txn = nandc->bam_txn;
538 
539 	if (!nandc->props->is_bam)
540 		return;
541 
542 	bam_txn->bam_ce_pos = 0;
543 	bam_txn->bam_ce_start = 0;
544 	bam_txn->cmd_sgl_pos = 0;
545 	bam_txn->cmd_sgl_start = 0;
546 	bam_txn->tx_sgl_pos = 0;
547 	bam_txn->tx_sgl_start = 0;
548 	bam_txn->rx_sgl_pos = 0;
549 	bam_txn->rx_sgl_start = 0;
550 	bam_txn->last_data_desc = NULL;
551 	bam_txn->wait_second_completion = false;
552 
553 	sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
554 		      QPIC_PER_CW_CMD_SGL);
555 	sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
556 		      QPIC_PER_CW_DATA_SGL);
557 
558 	reinit_completion(&bam_txn->txn_done);
559 }
560 
561 /* Callback for DMA descriptor completion */
562 static void qpic_bam_dma_done(void *data)
563 {
564 	struct bam_transaction *bam_txn = data;
565 
566 	/*
567 	 * In case of data transfer with NAND, 2 callbacks will be generated.
568 	 * One for command channel and another one for data channel.
569 	 * If current transaction has data descriptors
570 	 * (i.e. wait_second_completion is true), then set this to false
571 	 * and wait for second DMA descriptor completion.
572 	 */
573 	if (bam_txn->wait_second_completion)
574 		bam_txn->wait_second_completion = false;
575 	else
576 		complete(&bam_txn->txn_done);
577 }
578 
579 static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
580 {
581 	return container_of(chip, struct qcom_nand_host, chip);
582 }
583 
584 static inline struct qcom_nand_controller *
585 get_qcom_nand_controller(struct nand_chip *chip)
586 {
587 	return container_of(chip->controller, struct qcom_nand_controller,
588 			    controller);
589 }
590 
591 static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
592 {
593 	return ioread32(nandc->base + offset);
594 }
595 
596 static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
597 			       u32 val)
598 {
599 	iowrite32(val, nandc->base + offset);
600 }
601 
602 static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
603 					  bool is_cpu)
604 {
605 	if (!nandc->props->is_bam)
606 		return;
607 
608 	if (is_cpu)
609 		dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
610 					MAX_REG_RD *
611 					sizeof(*nandc->reg_read_buf),
612 					DMA_FROM_DEVICE);
613 	else
614 		dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
615 					   MAX_REG_RD *
616 					   sizeof(*nandc->reg_read_buf),
617 					   DMA_FROM_DEVICE);
618 }
619 
620 static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
621 {
622 	switch (offset) {
623 	case NAND_FLASH_CMD:
624 		return &regs->cmd;
625 	case NAND_ADDR0:
626 		return &regs->addr0;
627 	case NAND_ADDR1:
628 		return &regs->addr1;
629 	case NAND_FLASH_CHIP_SELECT:
630 		return &regs->chip_sel;
631 	case NAND_EXEC_CMD:
632 		return &regs->exec;
633 	case NAND_FLASH_STATUS:
634 		return &regs->clrflashstatus;
635 	case NAND_DEV0_CFG0:
636 		return &regs->cfg0;
637 	case NAND_DEV0_CFG1:
638 		return &regs->cfg1;
639 	case NAND_DEV0_ECC_CFG:
640 		return &regs->ecc_bch_cfg;
641 	case NAND_READ_STATUS:
642 		return &regs->clrreadstatus;
643 	case NAND_DEV_CMD1:
644 		return &regs->cmd1;
645 	case NAND_DEV_CMD1_RESTORE:
646 		return &regs->orig_cmd1;
647 	case NAND_DEV_CMD_VLD:
648 		return &regs->vld;
649 	case NAND_DEV_CMD_VLD_RESTORE:
650 		return &regs->orig_vld;
651 	case NAND_EBI2_ECC_BUF_CFG:
652 		return &regs->ecc_buf_cfg;
653 	case NAND_READ_LOCATION_0:
654 		return &regs->read_location0;
655 	case NAND_READ_LOCATION_1:
656 		return &regs->read_location1;
657 	case NAND_READ_LOCATION_2:
658 		return &regs->read_location2;
659 	case NAND_READ_LOCATION_3:
660 		return &regs->read_location3;
661 	case NAND_READ_LOCATION_LAST_CW_0:
662 		return &regs->read_location_last0;
663 	case NAND_READ_LOCATION_LAST_CW_1:
664 		return &regs->read_location_last1;
665 	case NAND_READ_LOCATION_LAST_CW_2:
666 		return &regs->read_location_last2;
667 	case NAND_READ_LOCATION_LAST_CW_3:
668 		return &regs->read_location_last3;
669 	default:
670 		return NULL;
671 	}
672 }
673 
674 static void nandc_set_reg(struct nand_chip *chip, int offset,
675 			  u32 val)
676 {
677 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
678 	struct nandc_regs *regs = nandc->regs;
679 	__le32 *reg;
680 
681 	reg = offset_to_nandc_reg(regs, offset);
682 
683 	if (reg)
684 		*reg = cpu_to_le32(val);
685 }
686 
687 /* Helper to check the code word, whether it is last cw or not */
688 static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
689 {
690 	return cw == (ecc->steps - 1);
691 }
692 
693 /* helper to configure location register values */
694 static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
695 			       int cw_offset, int read_size, int is_last_read_loc)
696 {
697 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
698 	struct nand_ecc_ctrl *ecc = &chip->ecc;
699 	int reg_base = NAND_READ_LOCATION_0;
700 
701 	if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
702 		reg_base = NAND_READ_LOCATION_LAST_CW_0;
703 
704 	reg_base += reg * 4;
705 
706 	if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
707 		return nandc_set_read_loc_last(chip, reg_base, cw_offset,
708 				read_size, is_last_read_loc);
709 	else
710 		return nandc_set_read_loc_first(chip, reg_base, cw_offset,
711 				read_size, is_last_read_loc);
712 }
713 
714 /* helper to configure address register values */
715 static void set_address(struct qcom_nand_host *host, u16 column, int page)
716 {
717 	struct nand_chip *chip = &host->chip;
718 
719 	if (chip->options & NAND_BUSWIDTH_16)
720 		column >>= 1;
721 
722 	nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
723 	nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
724 }
725 
726 /*
727  * update_rw_regs:	set up read/write register values, these will be
728  *			written to the NAND controller registers via DMA
729  *
730  * @num_cw:		number of steps for the read/write operation
731  * @read:		read or write operation
732  * @cw	:		which code word
733  */
734 static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
735 {
736 	struct nand_chip *chip = &host->chip;
737 	u32 cmd, cfg0, cfg1, ecc_bch_cfg;
738 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
739 
740 	if (read) {
741 		if (host->use_ecc)
742 			cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
743 		else
744 			cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
745 	} else {
746 		cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
747 	}
748 
749 	if (host->use_ecc) {
750 		cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
751 				(num_cw - 1) << CW_PER_PAGE;
752 
753 		cfg1 = host->cfg1;
754 		ecc_bch_cfg = host->ecc_bch_cfg;
755 	} else {
756 		cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
757 				(num_cw - 1) << CW_PER_PAGE;
758 
759 		cfg1 = host->cfg1_raw;
760 		ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
761 	}
762 
763 	nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
764 	nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
765 	nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
766 	nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
767 	if (!nandc->props->qpic_v2)
768 		nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
769 	nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
770 	nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
771 	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
772 
773 	if (read)
774 		nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
775 				   host->cw_data : host->cw_size, 1);
776 }
777 
778 /*
779  * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
780  * for BAM. This descriptor will be added in the NAND DMA descriptor queue
781  * which will be submitted to DMA engine.
782  */
783 static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
784 				  struct dma_chan *chan,
785 				  unsigned long flags)
786 {
787 	struct desc_info *desc;
788 	struct scatterlist *sgl;
789 	unsigned int sgl_cnt;
790 	int ret;
791 	struct bam_transaction *bam_txn = nandc->bam_txn;
792 	enum dma_transfer_direction dir_eng;
793 	struct dma_async_tx_descriptor *dma_desc;
794 
795 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
796 	if (!desc)
797 		return -ENOMEM;
798 
799 	if (chan == nandc->cmd_chan) {
800 		sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
801 		sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
802 		bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
803 		dir_eng = DMA_MEM_TO_DEV;
804 		desc->dir = DMA_TO_DEVICE;
805 	} else if (chan == nandc->tx_chan) {
806 		sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
807 		sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
808 		bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
809 		dir_eng = DMA_MEM_TO_DEV;
810 		desc->dir = DMA_TO_DEVICE;
811 	} else {
812 		sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
813 		sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
814 		bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
815 		dir_eng = DMA_DEV_TO_MEM;
816 		desc->dir = DMA_FROM_DEVICE;
817 	}
818 
819 	sg_mark_end(sgl + sgl_cnt - 1);
820 	ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
821 	if (ret == 0) {
822 		dev_err(nandc->dev, "failure in mapping desc\n");
823 		kfree(desc);
824 		return -ENOMEM;
825 	}
826 
827 	desc->sgl_cnt = sgl_cnt;
828 	desc->bam_sgl = sgl;
829 
830 	dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
831 					   flags);
832 
833 	if (!dma_desc) {
834 		dev_err(nandc->dev, "failure in prep desc\n");
835 		dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
836 		kfree(desc);
837 		return -EINVAL;
838 	}
839 
840 	desc->dma_desc = dma_desc;
841 
842 	/* update last data/command descriptor */
843 	if (chan == nandc->cmd_chan)
844 		bam_txn->last_cmd_desc = dma_desc;
845 	else
846 		bam_txn->last_data_desc = dma_desc;
847 
848 	list_add_tail(&desc->node, &nandc->desc_list);
849 
850 	return 0;
851 }
852 
853 /*
854  * Prepares the command descriptor for BAM DMA which will be used for NAND
855  * register reads and writes. The command descriptor requires the command
856  * to be formed in command element type so this function uses the command
857  * element from bam transaction ce array and fills the same with required
858  * data. A single SGL can contain multiple command elements so
859  * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
860  * after the current command element.
861  */
862 static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
863 				 int reg_off, const void *vaddr,
864 				 int size, unsigned int flags)
865 {
866 	int bam_ce_size;
867 	int i, ret;
868 	struct bam_cmd_element *bam_ce_buffer;
869 	struct bam_transaction *bam_txn = nandc->bam_txn;
870 
871 	bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
872 
873 	/* fill the command desc */
874 	for (i = 0; i < size; i++) {
875 		if (read)
876 			bam_prep_ce(&bam_ce_buffer[i],
877 				    nandc_reg_phys(nandc, reg_off + 4 * i),
878 				    BAM_READ_COMMAND,
879 				    reg_buf_dma_addr(nandc,
880 						     (__le32 *)vaddr + i));
881 		else
882 			bam_prep_ce_le32(&bam_ce_buffer[i],
883 					 nandc_reg_phys(nandc, reg_off + 4 * i),
884 					 BAM_WRITE_COMMAND,
885 					 *((__le32 *)vaddr + i));
886 	}
887 
888 	bam_txn->bam_ce_pos += size;
889 
890 	/* use the separate sgl after this command */
891 	if (flags & NAND_BAM_NEXT_SGL) {
892 		bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
893 		bam_ce_size = (bam_txn->bam_ce_pos -
894 				bam_txn->bam_ce_start) *
895 				sizeof(struct bam_cmd_element);
896 		sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
897 			   bam_ce_buffer, bam_ce_size);
898 		bam_txn->cmd_sgl_pos++;
899 		bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
900 
901 		if (flags & NAND_BAM_NWD) {
902 			ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
903 						     DMA_PREP_FENCE |
904 						     DMA_PREP_CMD);
905 			if (ret)
906 				return ret;
907 		}
908 	}
909 
910 	return 0;
911 }
912 
913 /*
914  * Prepares the data descriptor for BAM DMA which will be used for NAND
915  * data reads and writes.
916  */
917 static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
918 				  const void *vaddr,
919 				  int size, unsigned int flags)
920 {
921 	int ret;
922 	struct bam_transaction *bam_txn = nandc->bam_txn;
923 
924 	if (read) {
925 		sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
926 			   vaddr, size);
927 		bam_txn->rx_sgl_pos++;
928 	} else {
929 		sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
930 			   vaddr, size);
931 		bam_txn->tx_sgl_pos++;
932 
933 		/*
934 		 * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
935 		 * is not set, form the DMA descriptor
936 		 */
937 		if (!(flags & NAND_BAM_NO_EOT)) {
938 			ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
939 						     DMA_PREP_INTERRUPT);
940 			if (ret)
941 				return ret;
942 		}
943 	}
944 
945 	return 0;
946 }
947 
948 static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
949 			     int reg_off, const void *vaddr, int size,
950 			     bool flow_control)
951 {
952 	struct desc_info *desc;
953 	struct dma_async_tx_descriptor *dma_desc;
954 	struct scatterlist *sgl;
955 	struct dma_slave_config slave_conf;
956 	struct qcom_adm_peripheral_config periph_conf = {};
957 	enum dma_transfer_direction dir_eng;
958 	int ret;
959 
960 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
961 	if (!desc)
962 		return -ENOMEM;
963 
964 	sgl = &desc->adm_sgl;
965 
966 	sg_init_one(sgl, vaddr, size);
967 
968 	if (read) {
969 		dir_eng = DMA_DEV_TO_MEM;
970 		desc->dir = DMA_FROM_DEVICE;
971 	} else {
972 		dir_eng = DMA_MEM_TO_DEV;
973 		desc->dir = DMA_TO_DEVICE;
974 	}
975 
976 	ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
977 	if (ret == 0) {
978 		ret = -ENOMEM;
979 		goto err;
980 	}
981 
982 	memset(&slave_conf, 0x00, sizeof(slave_conf));
983 
984 	slave_conf.device_fc = flow_control;
985 	if (read) {
986 		slave_conf.src_maxburst = 16;
987 		slave_conf.src_addr = nandc->base_dma + reg_off;
988 		if (nandc->data_crci) {
989 			periph_conf.crci = nandc->data_crci;
990 			slave_conf.peripheral_config = &periph_conf;
991 			slave_conf.peripheral_size = sizeof(periph_conf);
992 		}
993 	} else {
994 		slave_conf.dst_maxburst = 16;
995 		slave_conf.dst_addr = nandc->base_dma + reg_off;
996 		if (nandc->cmd_crci) {
997 			periph_conf.crci = nandc->cmd_crci;
998 			slave_conf.peripheral_config = &periph_conf;
999 			slave_conf.peripheral_size = sizeof(periph_conf);
1000 		}
1001 	}
1002 
1003 	ret = dmaengine_slave_config(nandc->chan, &slave_conf);
1004 	if (ret) {
1005 		dev_err(nandc->dev, "failed to configure dma channel\n");
1006 		goto err;
1007 	}
1008 
1009 	dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
1010 	if (!dma_desc) {
1011 		dev_err(nandc->dev, "failed to prepare desc\n");
1012 		ret = -EINVAL;
1013 		goto err;
1014 	}
1015 
1016 	desc->dma_desc = dma_desc;
1017 
1018 	list_add_tail(&desc->node, &nandc->desc_list);
1019 
1020 	return 0;
1021 err:
1022 	kfree(desc);
1023 
1024 	return ret;
1025 }
1026 
1027 /*
1028  * read_reg_dma:	prepares a descriptor to read a given number of
1029  *			contiguous registers to the reg_read_buf pointer
1030  *
1031  * @first:		offset of the first register in the contiguous block
1032  * @num_regs:		number of registers to read
1033  * @flags:		flags to control DMA descriptor preparation
1034  */
1035 static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
1036 			int num_regs, unsigned int flags)
1037 {
1038 	bool flow_control = false;
1039 	void *vaddr;
1040 
1041 	vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
1042 	nandc->reg_read_pos += num_regs;
1043 
1044 	if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
1045 		first = dev_cmd_reg_addr(nandc, first);
1046 
1047 	if (nandc->props->is_bam)
1048 		return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
1049 					     num_regs, flags);
1050 
1051 	if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
1052 		flow_control = true;
1053 
1054 	return prep_adm_dma_desc(nandc, true, first, vaddr,
1055 				 num_regs * sizeof(u32), flow_control);
1056 }
1057 
1058 /*
1059  * write_reg_dma:	prepares a descriptor to write a given number of
1060  *			contiguous registers
1061  *
1062  * @first:		offset of the first register in the contiguous block
1063  * @num_regs:		number of registers to write
1064  * @flags:		flags to control DMA descriptor preparation
1065  */
1066 static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
1067 			 int num_regs, unsigned int flags)
1068 {
1069 	bool flow_control = false;
1070 	struct nandc_regs *regs = nandc->regs;
1071 	void *vaddr;
1072 
1073 	vaddr = offset_to_nandc_reg(regs, first);
1074 
1075 	if (first == NAND_ERASED_CW_DETECT_CFG) {
1076 		if (flags & NAND_ERASED_CW_SET)
1077 			vaddr = &regs->erased_cw_detect_cfg_set;
1078 		else
1079 			vaddr = &regs->erased_cw_detect_cfg_clr;
1080 	}
1081 
1082 	if (first == NAND_EXEC_CMD)
1083 		flags |= NAND_BAM_NWD;
1084 
1085 	if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
1086 		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
1087 
1088 	if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
1089 		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
1090 
1091 	if (nandc->props->is_bam)
1092 		return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
1093 					     num_regs, flags);
1094 
1095 	if (first == NAND_FLASH_CMD)
1096 		flow_control = true;
1097 
1098 	return prep_adm_dma_desc(nandc, false, first, vaddr,
1099 				 num_regs * sizeof(u32), flow_control);
1100 }
1101 
1102 /*
1103  * read_data_dma:	prepares a DMA descriptor to transfer data from the
1104  *			controller's internal buffer to the buffer 'vaddr'
1105  *
1106  * @reg_off:		offset within the controller's data buffer
1107  * @vaddr:		virtual address of the buffer we want to write to
1108  * @size:		DMA transaction size in bytes
1109  * @flags:		flags to control DMA descriptor preparation
1110  */
1111 static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1112 			 const u8 *vaddr, int size, unsigned int flags)
1113 {
1114 	if (nandc->props->is_bam)
1115 		return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
1116 
1117 	return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
1118 }
1119 
1120 /*
1121  * write_data_dma:	prepares a DMA descriptor to transfer data from
1122  *			'vaddr' to the controller's internal buffer
1123  *
1124  * @reg_off:		offset within the controller's data buffer
1125  * @vaddr:		virtual address of the buffer we want to read from
1126  * @size:		DMA transaction size in bytes
1127  * @flags:		flags to control DMA descriptor preparation
1128  */
1129 static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1130 			  const u8 *vaddr, int size, unsigned int flags)
1131 {
1132 	if (nandc->props->is_bam)
1133 		return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
1134 
1135 	return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
1136 }
1137 
1138 /*
1139  * Helper to prepare DMA descriptors for configuring registers
1140  * before reading a NAND page.
1141  */
1142 static void config_nand_page_read(struct nand_chip *chip)
1143 {
1144 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1145 
1146 	write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1147 	write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1148 	if (!nandc->props->qpic_v2)
1149 		write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
1150 	write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
1151 	write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
1152 		      NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
1153 }
1154 
1155 /*
1156  * Helper to prepare DMA descriptors for configuring registers
1157  * before reading each codeword in NAND page.
1158  */
1159 static void
1160 config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw)
1161 {
1162 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1163 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1164 
1165 	int reg = NAND_READ_LOCATION_0;
1166 
1167 	if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
1168 		reg = NAND_READ_LOCATION_LAST_CW_0;
1169 
1170 	if (nandc->props->is_bam)
1171 		write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
1172 
1173 	write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1174 	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1175 
1176 	if (use_ecc) {
1177 		read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
1178 		read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
1179 			     NAND_BAM_NEXT_SGL);
1180 	} else {
1181 		read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1182 	}
1183 }
1184 
1185 /*
1186  * Helper to prepare dma descriptors to configure registers needed for reading a
1187  * single codeword in page
1188  */
1189 static void
1190 config_nand_single_cw_page_read(struct nand_chip *chip,
1191 				bool use_ecc, int cw)
1192 {
1193 	config_nand_page_read(chip);
1194 	config_nand_cw_read(chip, use_ecc, cw);
1195 }
1196 
1197 /*
1198  * Helper to prepare DMA descriptors used to configure registers needed for
1199  * before writing a NAND page.
1200  */
1201 static void config_nand_page_write(struct nand_chip *chip)
1202 {
1203 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1204 
1205 	write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1206 	write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1207 	if (!nandc->props->qpic_v2)
1208 		write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
1209 			      NAND_BAM_NEXT_SGL);
1210 }
1211 
1212 /*
1213  * Helper to prepare DMA descriptors for configuring registers
1214  * before writing each codeword in NAND page.
1215  */
1216 static void config_nand_cw_write(struct nand_chip *chip)
1217 {
1218 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1219 
1220 	write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1221 	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1222 
1223 	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1224 
1225 	write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1226 	write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1227 }
1228 
1229 /*
1230  * the following functions are used within chip->legacy.cmdfunc() to
1231  * perform different NAND_CMD_* commands
1232  */
1233 
1234 /* sets up descriptors for NAND_CMD_PARAM */
1235 static int nandc_param(struct qcom_nand_host *host)
1236 {
1237 	struct nand_chip *chip = &host->chip;
1238 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1239 
1240 	/*
1241 	 * NAND_CMD_PARAM is called before we know much about the FLASH chip
1242 	 * in use. we configure the controller to perform a raw read of 512
1243 	 * bytes to read onfi params
1244 	 */
1245 	if (nandc->props->qpic_v2)
1246 		nandc_set_reg(chip, NAND_FLASH_CMD, OP_PAGE_READ_ONFI_READ |
1247 			      PAGE_ACC | LAST_PAGE);
1248 	else
1249 		nandc_set_reg(chip, NAND_FLASH_CMD, OP_PAGE_READ |
1250 			      PAGE_ACC | LAST_PAGE);
1251 
1252 	nandc_set_reg(chip, NAND_ADDR0, 0);
1253 	nandc_set_reg(chip, NAND_ADDR1, 0);
1254 	nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
1255 					| 512 << UD_SIZE_BYTES
1256 					| 5 << NUM_ADDR_CYCLES
1257 					| 0 << SPARE_SIZE_BYTES);
1258 	nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
1259 					| 0 << CS_ACTIVE_BSY
1260 					| 17 << BAD_BLOCK_BYTE_NUM
1261 					| 1 << BAD_BLOCK_IN_SPARE_AREA
1262 					| 2 << WR_RD_BSY_GAP
1263 					| 0 << WIDE_FLASH
1264 					| 1 << DEV0_CFG1_ECC_DISABLE);
1265 	if (!nandc->props->qpic_v2)
1266 		nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
1267 
1268 	/* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
1269 	if (!nandc->props->qpic_v2) {
1270 		nandc_set_reg(chip, NAND_DEV_CMD_VLD,
1271 			      (nandc->vld & ~READ_START_VLD));
1272 		nandc_set_reg(chip, NAND_DEV_CMD1,
1273 			      (nandc->cmd1 & ~(0xFF << READ_ADDR))
1274 			      | NAND_CMD_PARAM << READ_ADDR);
1275 	}
1276 
1277 	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
1278 
1279 	if (!nandc->props->qpic_v2) {
1280 		nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
1281 		nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
1282 	}
1283 
1284 	nandc_set_read_loc(chip, 0, 0, 0, 512, 1);
1285 
1286 	if (!nandc->props->qpic_v2) {
1287 		write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
1288 		write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
1289 	}
1290 
1291 	nandc->buf_count = 512;
1292 	memset(nandc->data_buffer, 0xff, nandc->buf_count);
1293 
1294 	config_nand_single_cw_page_read(chip, false, 0);
1295 
1296 	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1297 		      nandc->buf_count, 0);
1298 
1299 	/* restore CMD1 and VLD regs */
1300 	if (!nandc->props->qpic_v2) {
1301 		write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
1302 		write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
1303 	}
1304 
1305 	return 0;
1306 }
1307 
1308 /* sets up descriptors for NAND_CMD_ERASE1 */
1309 static int erase_block(struct qcom_nand_host *host, int page_addr)
1310 {
1311 	struct nand_chip *chip = &host->chip;
1312 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1313 
1314 	nandc_set_reg(chip, NAND_FLASH_CMD,
1315 		      OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1316 	nandc_set_reg(chip, NAND_ADDR0, page_addr);
1317 	nandc_set_reg(chip, NAND_ADDR1, 0);
1318 	nandc_set_reg(chip, NAND_DEV0_CFG0,
1319 		      host->cfg0_raw & ~(7 << CW_PER_PAGE));
1320 	nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
1321 	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
1322 	nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
1323 	nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
1324 
1325 	write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
1326 	write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
1327 	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1328 
1329 	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1330 
1331 	write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1332 	write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1333 
1334 	return 0;
1335 }
1336 
1337 /* sets up descriptors for NAND_CMD_READID */
1338 static int read_id(struct qcom_nand_host *host, int column)
1339 {
1340 	struct nand_chip *chip = &host->chip;
1341 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1342 
1343 	if (column == -1)
1344 		return 0;
1345 
1346 	nandc_set_reg(chip, NAND_FLASH_CMD, OP_FETCH_ID);
1347 	nandc_set_reg(chip, NAND_ADDR0, column);
1348 	nandc_set_reg(chip, NAND_ADDR1, 0);
1349 	nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
1350 		      nandc->props->is_bam ? 0 : DM_EN);
1351 	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
1352 
1353 	write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
1354 	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1355 
1356 	read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
1357 
1358 	return 0;
1359 }
1360 
1361 /* sets up descriptors for NAND_CMD_RESET */
1362 static int reset(struct qcom_nand_host *host)
1363 {
1364 	struct nand_chip *chip = &host->chip;
1365 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1366 
1367 	nandc_set_reg(chip, NAND_FLASH_CMD, OP_RESET_DEVICE);
1368 	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
1369 
1370 	write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1371 	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1372 
1373 	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1374 
1375 	return 0;
1376 }
1377 
1378 /* helpers to submit/free our list of dma descriptors */
1379 static int submit_descs(struct qcom_nand_controller *nandc)
1380 {
1381 	struct desc_info *desc;
1382 	dma_cookie_t cookie = 0;
1383 	struct bam_transaction *bam_txn = nandc->bam_txn;
1384 	int r;
1385 
1386 	if (nandc->props->is_bam) {
1387 		if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1388 			r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1389 			if (r)
1390 				return r;
1391 		}
1392 
1393 		if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1394 			r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1395 						   DMA_PREP_INTERRUPT);
1396 			if (r)
1397 				return r;
1398 		}
1399 
1400 		if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
1401 			r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
1402 						   DMA_PREP_CMD);
1403 			if (r)
1404 				return r;
1405 		}
1406 	}
1407 
1408 	list_for_each_entry(desc, &nandc->desc_list, node)
1409 		cookie = dmaengine_submit(desc->dma_desc);
1410 
1411 	if (nandc->props->is_bam) {
1412 		bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
1413 		bam_txn->last_cmd_desc->callback_param = bam_txn;
1414 		if (bam_txn->last_data_desc) {
1415 			bam_txn->last_data_desc->callback = qpic_bam_dma_done;
1416 			bam_txn->last_data_desc->callback_param = bam_txn;
1417 			bam_txn->wait_second_completion = true;
1418 		}
1419 
1420 		dma_async_issue_pending(nandc->tx_chan);
1421 		dma_async_issue_pending(nandc->rx_chan);
1422 		dma_async_issue_pending(nandc->cmd_chan);
1423 
1424 		if (!wait_for_completion_timeout(&bam_txn->txn_done,
1425 						 QPIC_NAND_COMPLETION_TIMEOUT))
1426 			return -ETIMEDOUT;
1427 	} else {
1428 		if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1429 			return -ETIMEDOUT;
1430 	}
1431 
1432 	return 0;
1433 }
1434 
1435 static void free_descs(struct qcom_nand_controller *nandc)
1436 {
1437 	struct desc_info *desc, *n;
1438 
1439 	list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1440 		list_del(&desc->node);
1441 
1442 		if (nandc->props->is_bam)
1443 			dma_unmap_sg(nandc->dev, desc->bam_sgl,
1444 				     desc->sgl_cnt, desc->dir);
1445 		else
1446 			dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1447 				     desc->dir);
1448 
1449 		kfree(desc);
1450 	}
1451 }
1452 
1453 /* reset the register read buffer for next NAND operation */
1454 static void clear_read_regs(struct qcom_nand_controller *nandc)
1455 {
1456 	nandc->reg_read_pos = 0;
1457 	nandc_read_buffer_sync(nandc, false);
1458 }
1459 
1460 static void pre_command(struct qcom_nand_host *host, int command)
1461 {
1462 	struct nand_chip *chip = &host->chip;
1463 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1464 
1465 	nandc->buf_count = 0;
1466 	nandc->buf_start = 0;
1467 	host->use_ecc = false;
1468 	host->last_command = command;
1469 
1470 	clear_read_regs(nandc);
1471 
1472 	if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
1473 	    command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
1474 		clear_bam_transaction(nandc);
1475 }
1476 
1477 /*
1478  * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
1479  * privately maintained status byte, this status byte can be read after
1480  * NAND_CMD_STATUS is called
1481  */
1482 static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
1483 {
1484 	struct nand_chip *chip = &host->chip;
1485 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1486 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1487 	int num_cw;
1488 	int i;
1489 
1490 	num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
1491 	nandc_read_buffer_sync(nandc, true);
1492 
1493 	for (i = 0; i < num_cw; i++) {
1494 		u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
1495 
1496 		if (flash_status & FS_MPU_ERR)
1497 			host->status &= ~NAND_STATUS_WP;
1498 
1499 		if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
1500 						 (flash_status &
1501 						  FS_DEVICE_STS_ERR)))
1502 			host->status |= NAND_STATUS_FAIL;
1503 	}
1504 }
1505 
1506 static void post_command(struct qcom_nand_host *host, int command)
1507 {
1508 	struct nand_chip *chip = &host->chip;
1509 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1510 
1511 	switch (command) {
1512 	case NAND_CMD_READID:
1513 		nandc_read_buffer_sync(nandc, true);
1514 		memcpy(nandc->data_buffer, nandc->reg_read_buf,
1515 		       nandc->buf_count);
1516 		break;
1517 	case NAND_CMD_PAGEPROG:
1518 	case NAND_CMD_ERASE1:
1519 		parse_erase_write_errors(host, command);
1520 		break;
1521 	default:
1522 		break;
1523 	}
1524 }
1525 
1526 /*
1527  * Implements chip->legacy.cmdfunc. It's  only used for a limited set of
1528  * commands. The rest of the commands wouldn't be called by upper layers.
1529  * For example, NAND_CMD_READOOB would never be called because we have our own
1530  * versions of read_oob ops for nand_ecc_ctrl.
1531  */
1532 static void qcom_nandc_command(struct nand_chip *chip, unsigned int command,
1533 			       int column, int page_addr)
1534 {
1535 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
1536 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1537 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1538 	bool wait = false;
1539 	int ret = 0;
1540 
1541 	pre_command(host, command);
1542 
1543 	switch (command) {
1544 	case NAND_CMD_RESET:
1545 		ret = reset(host);
1546 		wait = true;
1547 		break;
1548 
1549 	case NAND_CMD_READID:
1550 		nandc->buf_count = 4;
1551 		ret = read_id(host, column);
1552 		wait = true;
1553 		break;
1554 
1555 	case NAND_CMD_PARAM:
1556 		ret = nandc_param(host);
1557 		wait = true;
1558 		break;
1559 
1560 	case NAND_CMD_ERASE1:
1561 		ret = erase_block(host, page_addr);
1562 		wait = true;
1563 		break;
1564 
1565 	case NAND_CMD_READ0:
1566 		/* we read the entire page for now */
1567 		WARN_ON(column != 0);
1568 
1569 		host->use_ecc = true;
1570 		set_address(host, 0, page_addr);
1571 		update_rw_regs(host, ecc->steps, true, 0);
1572 		break;
1573 
1574 	case NAND_CMD_SEQIN:
1575 		WARN_ON(column != 0);
1576 		set_address(host, 0, page_addr);
1577 		break;
1578 
1579 	case NAND_CMD_PAGEPROG:
1580 	case NAND_CMD_STATUS:
1581 	case NAND_CMD_NONE:
1582 	default:
1583 		break;
1584 	}
1585 
1586 	if (ret) {
1587 		dev_err(nandc->dev, "failure executing command %d\n",
1588 			command);
1589 		free_descs(nandc);
1590 		return;
1591 	}
1592 
1593 	if (wait) {
1594 		ret = submit_descs(nandc);
1595 		if (ret)
1596 			dev_err(nandc->dev,
1597 				"failure submitting descs for command %d\n",
1598 				command);
1599 	}
1600 
1601 	free_descs(nandc);
1602 
1603 	post_command(host, command);
1604 }
1605 
1606 /*
1607  * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
1608  * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
1609  *
1610  * when using RS ECC, the HW reports the same erros when reading an erased CW,
1611  * but it notifies that it is an erased CW by placing special characters at
1612  * certain offsets in the buffer.
1613  *
1614  * verify if the page is erased or not, and fix up the page for RS ECC by
1615  * replacing the special characters with 0xff.
1616  */
1617 static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1618 {
1619 	u8 empty1, empty2;
1620 
1621 	/*
1622 	 * an erased page flags an error in NAND_FLASH_STATUS, check if the page
1623 	 * is erased by looking for 0x54s at offsets 3 and 175 from the
1624 	 * beginning of each codeword
1625 	 */
1626 
1627 	empty1 = data_buf[3];
1628 	empty2 = data_buf[175];
1629 
1630 	/*
1631 	 * if the erased codework markers, if they exist override them with
1632 	 * 0xffs
1633 	 */
1634 	if ((empty1 == 0x54 && empty2 == 0xff) ||
1635 	    (empty1 == 0xff && empty2 == 0x54)) {
1636 		data_buf[3] = 0xff;
1637 		data_buf[175] = 0xff;
1638 	}
1639 
1640 	/*
1641 	 * check if the entire chunk contains 0xffs or not. if it doesn't, then
1642 	 * restore the original values at the special offsets
1643 	 */
1644 	if (memchr_inv(data_buf, 0xff, data_len)) {
1645 		data_buf[3] = empty1;
1646 		data_buf[175] = empty2;
1647 
1648 		return false;
1649 	}
1650 
1651 	return true;
1652 }
1653 
1654 struct read_stats {
1655 	__le32 flash;
1656 	__le32 buffer;
1657 	__le32 erased_cw;
1658 };
1659 
1660 /* reads back FLASH_STATUS register set by the controller */
1661 static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
1662 {
1663 	struct nand_chip *chip = &host->chip;
1664 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1665 	int i;
1666 
1667 	nandc_read_buffer_sync(nandc, true);
1668 
1669 	for (i = 0; i < cw_cnt; i++) {
1670 		u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
1671 
1672 		if (flash & (FS_OP_ERR | FS_MPU_ERR))
1673 			return -EIO;
1674 	}
1675 
1676 	return 0;
1677 }
1678 
1679 /* performs raw read for one codeword */
1680 static int
1681 qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
1682 		       u8 *data_buf, u8 *oob_buf, int page, int cw)
1683 {
1684 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
1685 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1686 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1687 	int data_size1, data_size2, oob_size1, oob_size2;
1688 	int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
1689 	int raw_cw = cw;
1690 
1691 	nand_read_page_op(chip, page, 0, NULL, 0);
1692 	host->use_ecc = false;
1693 
1694 	if (nandc->props->qpic_v2)
1695 		raw_cw = ecc->steps - 1;
1696 
1697 	clear_bam_transaction(nandc);
1698 	set_address(host, host->cw_size * cw, page);
1699 	update_rw_regs(host, 1, true, raw_cw);
1700 	config_nand_page_read(chip);
1701 
1702 	data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1703 	oob_size1 = host->bbm_size;
1704 
1705 	if (qcom_nandc_is_last_cw(ecc, cw)) {
1706 		data_size2 = ecc->size - data_size1 -
1707 			     ((ecc->steps - 1) * 4);
1708 		oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
1709 			    host->spare_bytes;
1710 	} else {
1711 		data_size2 = host->cw_data - data_size1;
1712 		oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1713 	}
1714 
1715 	if (nandc->props->is_bam) {
1716 		nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
1717 		read_loc += data_size1;
1718 
1719 		nandc_set_read_loc(chip, cw, 1, read_loc, oob_size1, 0);
1720 		read_loc += oob_size1;
1721 
1722 		nandc_set_read_loc(chip, cw, 2, read_loc, data_size2, 0);
1723 		read_loc += data_size2;
1724 
1725 		nandc_set_read_loc(chip, cw, 3, read_loc, oob_size2, 1);
1726 	}
1727 
1728 	config_nand_cw_read(chip, false, raw_cw);
1729 
1730 	read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
1731 	reg_off += data_size1;
1732 
1733 	read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
1734 	reg_off += oob_size1;
1735 
1736 	read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
1737 	reg_off += data_size2;
1738 
1739 	read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
1740 
1741 	ret = submit_descs(nandc);
1742 	free_descs(nandc);
1743 	if (ret) {
1744 		dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
1745 		return ret;
1746 	}
1747 
1748 	return check_flash_errors(host, 1);
1749 }
1750 
1751 /*
1752  * Bitflips can happen in erased codewords also so this function counts the
1753  * number of 0 in each CW for which ECC engine returns the uncorrectable
1754  * error. The page will be assumed as erased if this count is less than or
1755  * equal to the ecc->strength for each CW.
1756  *
1757  * 1. Both DATA and OOB need to be checked for number of 0. The
1758  *    top-level API can be called with only data buf or OOB buf so use
1759  *    chip->data_buf if data buf is null and chip->oob_poi if oob buf
1760  *    is null for copying the raw bytes.
1761  * 2. Perform raw read for all the CW which has uncorrectable errors.
1762  * 3. For each CW, check the number of 0 in cw_data and usable OOB bytes.
1763  *    The BBM and spare bytes bit flip won’t affect the ECC so don’t check
1764  *    the number of bitflips in this area.
1765  */
1766 static int
1767 check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
1768 		      u8 *oob_buf, unsigned long uncorrectable_cws,
1769 		      int page, unsigned int max_bitflips)
1770 {
1771 	struct nand_chip *chip = &host->chip;
1772 	struct mtd_info *mtd = nand_to_mtd(chip);
1773 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1774 	u8 *cw_data_buf, *cw_oob_buf;
1775 	int cw, data_size, oob_size, ret = 0;
1776 
1777 	if (!data_buf)
1778 		data_buf = nand_get_data_buf(chip);
1779 
1780 	if (!oob_buf) {
1781 		nand_get_data_buf(chip);
1782 		oob_buf = chip->oob_poi;
1783 	}
1784 
1785 	for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
1786 		if (qcom_nandc_is_last_cw(ecc, cw)) {
1787 			data_size = ecc->size - ((ecc->steps - 1) * 4);
1788 			oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
1789 		} else {
1790 			data_size = host->cw_data;
1791 			oob_size = host->ecc_bytes_hw;
1792 		}
1793 
1794 		/* determine starting buffer address for current CW */
1795 		cw_data_buf = data_buf + (cw * host->cw_data);
1796 		cw_oob_buf = oob_buf + (cw * ecc->bytes);
1797 
1798 		ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
1799 					     cw_oob_buf, page, cw);
1800 		if (ret)
1801 			return ret;
1802 
1803 		/*
1804 		 * make sure it isn't an erased page reported
1805 		 * as not-erased by HW because of a few bitflips
1806 		 */
1807 		ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
1808 						  cw_oob_buf + host->bbm_size,
1809 						  oob_size, NULL,
1810 						  0, ecc->strength);
1811 		if (ret < 0) {
1812 			mtd->ecc_stats.failed++;
1813 		} else {
1814 			mtd->ecc_stats.corrected += ret;
1815 			max_bitflips = max_t(unsigned int, max_bitflips, ret);
1816 		}
1817 	}
1818 
1819 	return max_bitflips;
1820 }
1821 
1822 /*
1823  * reads back status registers set by the controller to notify page read
1824  * errors. this is equivalent to what 'ecc->correct()' would do.
1825  */
1826 static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1827 			     u8 *oob_buf, int page)
1828 {
1829 	struct nand_chip *chip = &host->chip;
1830 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1831 	struct mtd_info *mtd = nand_to_mtd(chip);
1832 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1833 	unsigned int max_bitflips = 0, uncorrectable_cws = 0;
1834 	struct read_stats *buf;
1835 	bool flash_op_err = false, erased;
1836 	int i;
1837 	u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1838 
1839 	buf = (struct read_stats *)nandc->reg_read_buf;
1840 	nandc_read_buffer_sync(nandc, true);
1841 
1842 	for (i = 0; i < ecc->steps; i++, buf++) {
1843 		u32 flash, buffer, erased_cw;
1844 		int data_len, oob_len;
1845 
1846 		if (qcom_nandc_is_last_cw(ecc, i)) {
1847 			data_len = ecc->size - ((ecc->steps - 1) << 2);
1848 			oob_len = ecc->steps << 2;
1849 		} else {
1850 			data_len = host->cw_data;
1851 			oob_len = 0;
1852 		}
1853 
1854 		flash = le32_to_cpu(buf->flash);
1855 		buffer = le32_to_cpu(buf->buffer);
1856 		erased_cw = le32_to_cpu(buf->erased_cw);
1857 
1858 		/*
1859 		 * Check ECC failure for each codeword. ECC failure can
1860 		 * happen in either of the following conditions
1861 		 * 1. If number of bitflips are greater than ECC engine
1862 		 *    capability.
1863 		 * 2. If this codeword contains all 0xff for which erased
1864 		 *    codeword detection check will be done.
1865 		 */
1866 		if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
1867 			/*
1868 			 * For BCH ECC, ignore erased codeword errors, if
1869 			 * ERASED_CW bits are set.
1870 			 */
1871 			if (host->bch_enabled) {
1872 				erased = (erased_cw & ERASED_CW) == ERASED_CW;
1873 			/*
1874 			 * For RS ECC, HW reports the erased CW by placing
1875 			 * special characters at certain offsets in the buffer.
1876 			 * These special characters will be valid only if
1877 			 * complete page is read i.e. data_buf is not NULL.
1878 			 */
1879 			} else if (data_buf) {
1880 				erased = erased_chunk_check_and_fixup(data_buf,
1881 								      data_len);
1882 			} else {
1883 				erased = false;
1884 			}
1885 
1886 			if (!erased)
1887 				uncorrectable_cws |= BIT(i);
1888 		/*
1889 		 * Check if MPU or any other operational error (timeout,
1890 		 * device failure, etc.) happened for this codeword and
1891 		 * make flash_op_err true. If flash_op_err is set, then
1892 		 * EIO will be returned for page read.
1893 		 */
1894 		} else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1895 			flash_op_err = true;
1896 		/*
1897 		 * No ECC or operational errors happened. Check the number of
1898 		 * bits corrected and update the ecc_stats.corrected.
1899 		 */
1900 		} else {
1901 			unsigned int stat;
1902 
1903 			stat = buffer & BS_CORRECTABLE_ERR_MSK;
1904 			mtd->ecc_stats.corrected += stat;
1905 			max_bitflips = max(max_bitflips, stat);
1906 		}
1907 
1908 		if (data_buf)
1909 			data_buf += data_len;
1910 		if (oob_buf)
1911 			oob_buf += oob_len + ecc->bytes;
1912 	}
1913 
1914 	if (flash_op_err)
1915 		return -EIO;
1916 
1917 	if (!uncorrectable_cws)
1918 		return max_bitflips;
1919 
1920 	return check_for_erased_page(host, data_buf_start, oob_buf_start,
1921 				     uncorrectable_cws, page,
1922 				     max_bitflips);
1923 }
1924 
1925 /*
1926  * helper to perform the actual page read operation, used by ecc->read_page(),
1927  * ecc->read_oob()
1928  */
1929 static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1930 			 u8 *oob_buf, int page)
1931 {
1932 	struct nand_chip *chip = &host->chip;
1933 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1934 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1935 	u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1936 	int i, ret;
1937 
1938 	config_nand_page_read(chip);
1939 
1940 	/* queue cmd descs for each codeword */
1941 	for (i = 0; i < ecc->steps; i++) {
1942 		int data_size, oob_size;
1943 
1944 		if (qcom_nandc_is_last_cw(ecc, i)) {
1945 			data_size = ecc->size - ((ecc->steps - 1) << 2);
1946 			oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1947 				   host->spare_bytes;
1948 		} else {
1949 			data_size = host->cw_data;
1950 			oob_size = host->ecc_bytes_hw + host->spare_bytes;
1951 		}
1952 
1953 		if (nandc->props->is_bam) {
1954 			if (data_buf && oob_buf) {
1955 				nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
1956 				nandc_set_read_loc(chip, i, 1, data_size,
1957 						   oob_size, 1);
1958 			} else if (data_buf) {
1959 				nandc_set_read_loc(chip, i, 0, 0, data_size, 1);
1960 			} else {
1961 				nandc_set_read_loc(chip, i, 0, data_size,
1962 						   oob_size, 1);
1963 			}
1964 		}
1965 
1966 		config_nand_cw_read(chip, true, i);
1967 
1968 		if (data_buf)
1969 			read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
1970 				      data_size, 0);
1971 
1972 		/*
1973 		 * when ecc is enabled, the controller doesn't read the real
1974 		 * or dummy bad block markers in each chunk. To maintain a
1975 		 * consistent layout across RAW and ECC reads, we just
1976 		 * leave the real/dummy BBM offsets empty (i.e, filled with
1977 		 * 0xffs)
1978 		 */
1979 		if (oob_buf) {
1980 			int j;
1981 
1982 			for (j = 0; j < host->bbm_size; j++)
1983 				*oob_buf++ = 0xff;
1984 
1985 			read_data_dma(nandc, FLASH_BUF_ACC + data_size,
1986 				      oob_buf, oob_size, 0);
1987 		}
1988 
1989 		if (data_buf)
1990 			data_buf += data_size;
1991 		if (oob_buf)
1992 			oob_buf += oob_size;
1993 	}
1994 
1995 	ret = submit_descs(nandc);
1996 	free_descs(nandc);
1997 
1998 	if (ret) {
1999 		dev_err(nandc->dev, "failure to read page/oob\n");
2000 		return ret;
2001 	}
2002 
2003 	return parse_read_errors(host, data_buf_start, oob_buf_start, page);
2004 }
2005 
2006 /*
2007  * a helper that copies the last step/codeword of a page (containing free oob)
2008  * into our local buffer
2009  */
2010 static int copy_last_cw(struct qcom_nand_host *host, int page)
2011 {
2012 	struct nand_chip *chip = &host->chip;
2013 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2014 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2015 	int size;
2016 	int ret;
2017 
2018 	clear_read_regs(nandc);
2019 
2020 	size = host->use_ecc ? host->cw_data : host->cw_size;
2021 
2022 	/* prepare a clean read buffer */
2023 	memset(nandc->data_buffer, 0xff, size);
2024 
2025 	set_address(host, host->cw_size * (ecc->steps - 1), page);
2026 	update_rw_regs(host, 1, true, ecc->steps - 1);
2027 
2028 	config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
2029 
2030 	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
2031 
2032 	ret = submit_descs(nandc);
2033 	if (ret)
2034 		dev_err(nandc->dev, "failed to copy last codeword\n");
2035 
2036 	free_descs(nandc);
2037 
2038 	return ret;
2039 }
2040 
2041 /* implements ecc->read_page() */
2042 static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
2043 				int oob_required, int page)
2044 {
2045 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2046 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2047 	u8 *data_buf, *oob_buf = NULL;
2048 
2049 	nand_read_page_op(chip, page, 0, NULL, 0);
2050 	data_buf = buf;
2051 	oob_buf = oob_required ? chip->oob_poi : NULL;
2052 
2053 	clear_bam_transaction(nandc);
2054 
2055 	return read_page_ecc(host, data_buf, oob_buf, page);
2056 }
2057 
2058 /* implements ecc->read_page_raw() */
2059 static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
2060 				    int oob_required, int page)
2061 {
2062 	struct mtd_info *mtd = nand_to_mtd(chip);
2063 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2064 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2065 	int cw, ret;
2066 	u8 *data_buf = buf, *oob_buf = chip->oob_poi;
2067 
2068 	for (cw = 0; cw < ecc->steps; cw++) {
2069 		ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
2070 					     page, cw);
2071 		if (ret)
2072 			return ret;
2073 
2074 		data_buf += host->cw_data;
2075 		oob_buf += ecc->bytes;
2076 	}
2077 
2078 	return 0;
2079 }
2080 
2081 /* implements ecc->read_oob() */
2082 static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
2083 {
2084 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2085 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2086 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2087 
2088 	clear_read_regs(nandc);
2089 	clear_bam_transaction(nandc);
2090 
2091 	host->use_ecc = true;
2092 	set_address(host, 0, page);
2093 	update_rw_regs(host, ecc->steps, true, 0);
2094 
2095 	return read_page_ecc(host, NULL, chip->oob_poi, page);
2096 }
2097 
2098 /* implements ecc->write_page() */
2099 static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
2100 				 int oob_required, int page)
2101 {
2102 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2103 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2104 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2105 	u8 *data_buf, *oob_buf;
2106 	int i, ret;
2107 
2108 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2109 
2110 	clear_read_regs(nandc);
2111 	clear_bam_transaction(nandc);
2112 
2113 	data_buf = (u8 *)buf;
2114 	oob_buf = chip->oob_poi;
2115 
2116 	host->use_ecc = true;
2117 	update_rw_regs(host, ecc->steps, false, 0);
2118 	config_nand_page_write(chip);
2119 
2120 	for (i = 0; i < ecc->steps; i++) {
2121 		int data_size, oob_size;
2122 
2123 		if (qcom_nandc_is_last_cw(ecc, i)) {
2124 			data_size = ecc->size - ((ecc->steps - 1) << 2);
2125 			oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
2126 				   host->spare_bytes;
2127 		} else {
2128 			data_size = host->cw_data;
2129 			oob_size = ecc->bytes;
2130 		}
2131 
2132 
2133 		write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
2134 			       i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
2135 
2136 		/*
2137 		 * when ECC is enabled, we don't really need to write anything
2138 		 * to oob for the first n - 1 codewords since these oob regions
2139 		 * just contain ECC bytes that's written by the controller
2140 		 * itself. For the last codeword, we skip the bbm positions and
2141 		 * write to the free oob area.
2142 		 */
2143 		if (qcom_nandc_is_last_cw(ecc, i)) {
2144 			oob_buf += host->bbm_size;
2145 
2146 			write_data_dma(nandc, FLASH_BUF_ACC + data_size,
2147 				       oob_buf, oob_size, 0);
2148 		}
2149 
2150 		config_nand_cw_write(chip);
2151 
2152 		data_buf += data_size;
2153 		oob_buf += oob_size;
2154 	}
2155 
2156 	ret = submit_descs(nandc);
2157 	if (ret)
2158 		dev_err(nandc->dev, "failure to write page\n");
2159 
2160 	free_descs(nandc);
2161 
2162 	if (!ret)
2163 		ret = nand_prog_page_end_op(chip);
2164 
2165 	return ret;
2166 }
2167 
2168 /* implements ecc->write_page_raw() */
2169 static int qcom_nandc_write_page_raw(struct nand_chip *chip,
2170 				     const uint8_t *buf, int oob_required,
2171 				     int page)
2172 {
2173 	struct mtd_info *mtd = nand_to_mtd(chip);
2174 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2175 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2176 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2177 	u8 *data_buf, *oob_buf;
2178 	int i, ret;
2179 
2180 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2181 	clear_read_regs(nandc);
2182 	clear_bam_transaction(nandc);
2183 
2184 	data_buf = (u8 *)buf;
2185 	oob_buf = chip->oob_poi;
2186 
2187 	host->use_ecc = false;
2188 	update_rw_regs(host, ecc->steps, false, 0);
2189 	config_nand_page_write(chip);
2190 
2191 	for (i = 0; i < ecc->steps; i++) {
2192 		int data_size1, data_size2, oob_size1, oob_size2;
2193 		int reg_off = FLASH_BUF_ACC;
2194 
2195 		data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
2196 		oob_size1 = host->bbm_size;
2197 
2198 		if (qcom_nandc_is_last_cw(ecc, i)) {
2199 			data_size2 = ecc->size - data_size1 -
2200 				     ((ecc->steps - 1) << 2);
2201 			oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
2202 				    host->spare_bytes;
2203 		} else {
2204 			data_size2 = host->cw_data - data_size1;
2205 			oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
2206 		}
2207 
2208 		write_data_dma(nandc, reg_off, data_buf, data_size1,
2209 			       NAND_BAM_NO_EOT);
2210 		reg_off += data_size1;
2211 		data_buf += data_size1;
2212 
2213 		write_data_dma(nandc, reg_off, oob_buf, oob_size1,
2214 			       NAND_BAM_NO_EOT);
2215 		reg_off += oob_size1;
2216 		oob_buf += oob_size1;
2217 
2218 		write_data_dma(nandc, reg_off, data_buf, data_size2,
2219 			       NAND_BAM_NO_EOT);
2220 		reg_off += data_size2;
2221 		data_buf += data_size2;
2222 
2223 		write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
2224 		oob_buf += oob_size2;
2225 
2226 		config_nand_cw_write(chip);
2227 	}
2228 
2229 	ret = submit_descs(nandc);
2230 	if (ret)
2231 		dev_err(nandc->dev, "failure to write raw page\n");
2232 
2233 	free_descs(nandc);
2234 
2235 	if (!ret)
2236 		ret = nand_prog_page_end_op(chip);
2237 
2238 	return ret;
2239 }
2240 
2241 /*
2242  * implements ecc->write_oob()
2243  *
2244  * the NAND controller cannot write only data or only OOB within a codeword
2245  * since ECC is calculated for the combined codeword. So update the OOB from
2246  * chip->oob_poi, and pad the data area with OxFF before writing.
2247  */
2248 static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
2249 {
2250 	struct mtd_info *mtd = nand_to_mtd(chip);
2251 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2252 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2253 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2254 	u8 *oob = chip->oob_poi;
2255 	int data_size, oob_size;
2256 	int ret;
2257 
2258 	host->use_ecc = true;
2259 	clear_bam_transaction(nandc);
2260 
2261 	/* calculate the data and oob size for the last codeword/step */
2262 	data_size = ecc->size - ((ecc->steps - 1) << 2);
2263 	oob_size = mtd->oobavail;
2264 
2265 	memset(nandc->data_buffer, 0xff, host->cw_data);
2266 	/* override new oob content to last codeword */
2267 	mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
2268 				    0, mtd->oobavail);
2269 
2270 	set_address(host, host->cw_size * (ecc->steps - 1), page);
2271 	update_rw_regs(host, 1, false, 0);
2272 
2273 	config_nand_page_write(chip);
2274 	write_data_dma(nandc, FLASH_BUF_ACC,
2275 		       nandc->data_buffer, data_size + oob_size, 0);
2276 	config_nand_cw_write(chip);
2277 
2278 	ret = submit_descs(nandc);
2279 
2280 	free_descs(nandc);
2281 
2282 	if (ret) {
2283 		dev_err(nandc->dev, "failure to write oob\n");
2284 		return -EIO;
2285 	}
2286 
2287 	return nand_prog_page_end_op(chip);
2288 }
2289 
2290 static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
2291 {
2292 	struct mtd_info *mtd = nand_to_mtd(chip);
2293 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2294 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2295 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2296 	int page, ret, bbpos, bad = 0;
2297 
2298 	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2299 
2300 	/*
2301 	 * configure registers for a raw sub page read, the address is set to
2302 	 * the beginning of the last codeword, we don't care about reading ecc
2303 	 * portion of oob. we just want the first few bytes from this codeword
2304 	 * that contains the BBM
2305 	 */
2306 	host->use_ecc = false;
2307 
2308 	clear_bam_transaction(nandc);
2309 	ret = copy_last_cw(host, page);
2310 	if (ret)
2311 		goto err;
2312 
2313 	if (check_flash_errors(host, 1)) {
2314 		dev_warn(nandc->dev, "error when trying to read BBM\n");
2315 		goto err;
2316 	}
2317 
2318 	bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
2319 
2320 	bad = nandc->data_buffer[bbpos] != 0xff;
2321 
2322 	if (chip->options & NAND_BUSWIDTH_16)
2323 		bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
2324 err:
2325 	return bad;
2326 }
2327 
2328 static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
2329 {
2330 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2331 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2332 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2333 	int page, ret;
2334 
2335 	clear_read_regs(nandc);
2336 	clear_bam_transaction(nandc);
2337 
2338 	/*
2339 	 * to mark the BBM as bad, we flash the entire last codeword with 0s.
2340 	 * we don't care about the rest of the content in the codeword since
2341 	 * we aren't going to use this block again
2342 	 */
2343 	memset(nandc->data_buffer, 0x00, host->cw_size);
2344 
2345 	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2346 
2347 	/* prepare write */
2348 	host->use_ecc = false;
2349 	set_address(host, host->cw_size * (ecc->steps - 1), page);
2350 	update_rw_regs(host, 1, false, ecc->steps - 1);
2351 
2352 	config_nand_page_write(chip);
2353 	write_data_dma(nandc, FLASH_BUF_ACC,
2354 		       nandc->data_buffer, host->cw_size, 0);
2355 	config_nand_cw_write(chip);
2356 
2357 	ret = submit_descs(nandc);
2358 
2359 	free_descs(nandc);
2360 
2361 	if (ret) {
2362 		dev_err(nandc->dev, "failure to update BBM\n");
2363 		return -EIO;
2364 	}
2365 
2366 	return nand_prog_page_end_op(chip);
2367 }
2368 
2369 /*
2370  * the three functions below implement chip->legacy.read_byte(),
2371  * chip->legacy.read_buf() and chip->legacy.write_buf() respectively. these
2372  * aren't used for reading/writing page data, they are used for smaller data
2373  * like reading	id, status etc
2374  */
2375 static uint8_t qcom_nandc_read_byte(struct nand_chip *chip)
2376 {
2377 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2378 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2379 	u8 *buf = nandc->data_buffer;
2380 	u8 ret = 0x0;
2381 
2382 	if (host->last_command == NAND_CMD_STATUS) {
2383 		ret = host->status;
2384 
2385 		host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2386 
2387 		return ret;
2388 	}
2389 
2390 	if (nandc->buf_start < nandc->buf_count)
2391 		ret = buf[nandc->buf_start++];
2392 
2393 	return ret;
2394 }
2395 
2396 static void qcom_nandc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
2397 {
2398 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2399 	int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2400 
2401 	memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
2402 	nandc->buf_start += real_len;
2403 }
2404 
2405 static void qcom_nandc_write_buf(struct nand_chip *chip, const uint8_t *buf,
2406 				 int len)
2407 {
2408 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2409 	int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2410 
2411 	memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
2412 
2413 	nandc->buf_start += real_len;
2414 }
2415 
2416 /* we support only one external chip for now */
2417 static void qcom_nandc_select_chip(struct nand_chip *chip, int chipnr)
2418 {
2419 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2420 
2421 	if (chipnr <= 0)
2422 		return;
2423 
2424 	dev_warn(nandc->dev, "invalid chip select\n");
2425 }
2426 
2427 /*
2428  * NAND controller page layout info
2429  *
2430  * Layout with ECC enabled:
2431  *
2432  * |----------------------|  |---------------------------------|
2433  * |           xx.......yy|  |             *********xx.......yy|
2434  * |    DATA   xx..ECC..yy|  |    DATA     **SPARE**xx..ECC..yy|
2435  * |   (516)   xx.......yy|  |  (516-n*4)  **(n*4)**xx.......yy|
2436  * |           xx.......yy|  |             *********xx.......yy|
2437  * |----------------------|  |---------------------------------|
2438  *     codeword 1,2..n-1                  codeword n
2439  *  <---(528/532 Bytes)-->    <-------(528/532 Bytes)--------->
2440  *
2441  * n = Number of codewords in the page
2442  * . = ECC bytes
2443  * * = Spare/free bytes
2444  * x = Unused byte(s)
2445  * y = Reserved byte(s)
2446  *
2447  * 2K page: n = 4, spare = 16 bytes
2448  * 4K page: n = 8, spare = 32 bytes
2449  * 8K page: n = 16, spare = 64 bytes
2450  *
2451  * the qcom nand controller operates at a sub page/codeword level. each
2452  * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
2453  * the number of ECC bytes vary based on the ECC strength and the bus width.
2454  *
2455  * the first n - 1 codewords contains 516 bytes of user data, the remaining
2456  * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
2457  * both user data and spare(oobavail) bytes that sum up to 516 bytes.
2458  *
2459  * When we access a page with ECC enabled, the reserved bytes(s) are not
2460  * accessible at all. When reading, we fill up these unreadable positions
2461  * with 0xffs. When writing, the controller skips writing the inaccessible
2462  * bytes.
2463  *
2464  * Layout with ECC disabled:
2465  *
2466  * |------------------------------|  |---------------------------------------|
2467  * |         yy          xx.......|  |         bb          *********xx.......|
2468  * |  DATA1  yy  DATA2   xx..ECC..|  |  DATA1  bb  DATA2   **SPARE**xx..ECC..|
2469  * | (size1) yy (size2)  xx.......|  | (size1) bb (size2)  **(n*4)**xx.......|
2470  * |         yy          xx.......|  |         bb          *********xx.......|
2471  * |------------------------------|  |---------------------------------------|
2472  *         codeword 1,2..n-1                        codeword n
2473  *  <-------(528/532 Bytes)------>    <-----------(528/532 Bytes)----------->
2474  *
2475  * n = Number of codewords in the page
2476  * . = ECC bytes
2477  * * = Spare/free bytes
2478  * x = Unused byte(s)
2479  * y = Dummy Bad Bock byte(s)
2480  * b = Real Bad Block byte(s)
2481  * size1/size2 = function of codeword size and 'n'
2482  *
2483  * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
2484  * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
2485  * Block Markers. In the last codeword, this position contains the real BBM
2486  *
2487  * In order to have a consistent layout between RAW and ECC modes, we assume
2488  * the following OOB layout arrangement:
2489  *
2490  * |-----------|  |--------------------|
2491  * |yyxx.......|  |bb*********xx.......|
2492  * |yyxx..ECC..|  |bb*FREEOOB*xx..ECC..|
2493  * |yyxx.......|  |bb*********xx.......|
2494  * |yyxx.......|  |bb*********xx.......|
2495  * |-----------|  |--------------------|
2496  *  first n - 1       nth OOB region
2497  *  OOB regions
2498  *
2499  * n = Number of codewords in the page
2500  * . = ECC bytes
2501  * * = FREE OOB bytes
2502  * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
2503  * x = Unused byte(s)
2504  * b = Real bad block byte(s) (inaccessible when ECC enabled)
2505  *
2506  * This layout is read as is when ECC is disabled. When ECC is enabled, the
2507  * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
2508  * and assumed as 0xffs when we read a page/oob. The ECC, unused and
2509  * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
2510  * the sum of the three).
2511  */
2512 static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2513 				   struct mtd_oob_region *oobregion)
2514 {
2515 	struct nand_chip *chip = mtd_to_nand(mtd);
2516 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2517 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2518 
2519 	if (section > 1)
2520 		return -ERANGE;
2521 
2522 	if (!section) {
2523 		oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2524 				    host->bbm_size;
2525 		oobregion->offset = 0;
2526 	} else {
2527 		oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2528 		oobregion->offset = mtd->oobsize - oobregion->length;
2529 	}
2530 
2531 	return 0;
2532 }
2533 
2534 static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2535 				     struct mtd_oob_region *oobregion)
2536 {
2537 	struct nand_chip *chip = mtd_to_nand(mtd);
2538 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2539 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2540 
2541 	if (section)
2542 		return -ERANGE;
2543 
2544 	oobregion->length = ecc->steps * 4;
2545 	oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2546 
2547 	return 0;
2548 }
2549 
2550 static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2551 	.ecc = qcom_nand_ooblayout_ecc,
2552 	.free = qcom_nand_ooblayout_free,
2553 };
2554 
2555 static int
2556 qcom_nandc_calc_ecc_bytes(int step_size, int strength)
2557 {
2558 	return strength == 4 ? 12 : 16;
2559 }
2560 NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
2561 		     NANDC_STEP_SIZE, 4, 8);
2562 
2563 static int qcom_nand_attach_chip(struct nand_chip *chip)
2564 {
2565 	struct mtd_info *mtd = nand_to_mtd(chip);
2566 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2567 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2568 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2569 	int cwperpage, bad_block_byte, ret;
2570 	bool wide_bus;
2571 	int ecc_mode = 1;
2572 
2573 	/* controller only supports 512 bytes data steps */
2574 	ecc->size = NANDC_STEP_SIZE;
2575 	wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2576 	cwperpage = mtd->writesize / NANDC_STEP_SIZE;
2577 
2578 	/*
2579 	 * Each CW has 4 available OOB bytes which will be protected with ECC
2580 	 * so remaining bytes can be used for ECC.
2581 	 */
2582 	ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
2583 				   mtd->oobsize - (cwperpage * 4));
2584 	if (ret) {
2585 		dev_err(nandc->dev, "No valid ECC settings possible\n");
2586 		return ret;
2587 	}
2588 
2589 	if (ecc->strength >= 8) {
2590 		/* 8 bit ECC defaults to BCH ECC on all platforms */
2591 		host->bch_enabled = true;
2592 		ecc_mode = 1;
2593 
2594 		if (wide_bus) {
2595 			host->ecc_bytes_hw = 14;
2596 			host->spare_bytes = 0;
2597 			host->bbm_size = 2;
2598 		} else {
2599 			host->ecc_bytes_hw = 13;
2600 			host->spare_bytes = 2;
2601 			host->bbm_size = 1;
2602 		}
2603 	} else {
2604 		/*
2605 		 * if the controller supports BCH for 4 bit ECC, the controller
2606 		 * uses lesser bytes for ECC. If RS is used, the ECC bytes is
2607 		 * always 10 bytes
2608 		 */
2609 		if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
2610 			/* BCH */
2611 			host->bch_enabled = true;
2612 			ecc_mode = 0;
2613 
2614 			if (wide_bus) {
2615 				host->ecc_bytes_hw = 8;
2616 				host->spare_bytes = 2;
2617 				host->bbm_size = 2;
2618 			} else {
2619 				host->ecc_bytes_hw = 7;
2620 				host->spare_bytes = 4;
2621 				host->bbm_size = 1;
2622 			}
2623 		} else {
2624 			/* RS */
2625 			host->ecc_bytes_hw = 10;
2626 
2627 			if (wide_bus) {
2628 				host->spare_bytes = 0;
2629 				host->bbm_size = 2;
2630 			} else {
2631 				host->spare_bytes = 1;
2632 				host->bbm_size = 1;
2633 			}
2634 		}
2635 	}
2636 
2637 	/*
2638 	 * we consider ecc->bytes as the sum of all the non-data content in a
2639 	 * step. It gives us a clean representation of the oob area (even if
2640 	 * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
2641 	 * ECC and 12 bytes for 4 bit ECC
2642 	 */
2643 	ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2644 
2645 	ecc->read_page		= qcom_nandc_read_page;
2646 	ecc->read_page_raw	= qcom_nandc_read_page_raw;
2647 	ecc->read_oob		= qcom_nandc_read_oob;
2648 	ecc->write_page		= qcom_nandc_write_page;
2649 	ecc->write_page_raw	= qcom_nandc_write_page_raw;
2650 	ecc->write_oob		= qcom_nandc_write_oob;
2651 
2652 	ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2653 
2654 	mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
2655 
2656 	nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2657 				     cwperpage);
2658 
2659 	/*
2660 	 * DATA_UD_BYTES varies based on whether the read/write command protects
2661 	 * spare data with ECC too. We protect spare data by default, so we set
2662 	 * it to main + spare data, which are 512 and 4 bytes respectively.
2663 	 */
2664 	host->cw_data = 516;
2665 
2666 	/*
2667 	 * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
2668 	 * for 8 bit ECC
2669 	 */
2670 	host->cw_size = host->cw_data + ecc->bytes;
2671 	bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2672 
2673 	host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2674 				| host->cw_data << UD_SIZE_BYTES
2675 				| 0 << DISABLE_STATUS_AFTER_WRITE
2676 				| 5 << NUM_ADDR_CYCLES
2677 				| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2678 				| 0 << STATUS_BFR_READ
2679 				| 1 << SET_RD_MODE_AFTER_STATUS
2680 				| host->spare_bytes << SPARE_SIZE_BYTES;
2681 
2682 	host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2683 				| 0 <<  CS_ACTIVE_BSY
2684 				| bad_block_byte << BAD_BLOCK_BYTE_NUM
2685 				| 0 << BAD_BLOCK_IN_SPARE_AREA
2686 				| 2 << WR_RD_BSY_GAP
2687 				| wide_bus << WIDE_FLASH
2688 				| host->bch_enabled << ENABLE_BCH_ECC;
2689 
2690 	host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2691 				| host->cw_size << UD_SIZE_BYTES
2692 				| 5 << NUM_ADDR_CYCLES
2693 				| 0 << SPARE_SIZE_BYTES;
2694 
2695 	host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2696 				| 0 << CS_ACTIVE_BSY
2697 				| 17 << BAD_BLOCK_BYTE_NUM
2698 				| 1 << BAD_BLOCK_IN_SPARE_AREA
2699 				| 2 << WR_RD_BSY_GAP
2700 				| wide_bus << WIDE_FLASH
2701 				| 1 << DEV0_CFG1_ECC_DISABLE;
2702 
2703 	host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
2704 				| 0 << ECC_SW_RESET
2705 				| host->cw_data << ECC_NUM_DATA_BYTES
2706 				| 1 << ECC_FORCE_CLK_OPEN
2707 				| ecc_mode << ECC_MODE
2708 				| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2709 
2710 	if (!nandc->props->qpic_v2)
2711 		host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2712 
2713 	host->clrflashstatus = FS_READY_BSY_N;
2714 	host->clrreadstatus = 0xc0;
2715 	nandc->regs->erased_cw_detect_cfg_clr =
2716 		cpu_to_le32(CLR_ERASED_PAGE_DET);
2717 	nandc->regs->erased_cw_detect_cfg_set =
2718 		cpu_to_le32(SET_ERASED_PAGE_DET);
2719 
2720 	dev_dbg(nandc->dev,
2721 		"cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2722 		host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2723 		host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2724 		cwperpage);
2725 
2726 	return 0;
2727 }
2728 
2729 static const struct nand_controller_ops qcom_nandc_ops = {
2730 	.attach_chip = qcom_nand_attach_chip,
2731 };
2732 
2733 static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
2734 {
2735 	if (nandc->props->is_bam) {
2736 		if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
2737 			dma_unmap_single(nandc->dev, nandc->reg_read_dma,
2738 					 MAX_REG_RD *
2739 					 sizeof(*nandc->reg_read_buf),
2740 					 DMA_FROM_DEVICE);
2741 
2742 		if (nandc->tx_chan)
2743 			dma_release_channel(nandc->tx_chan);
2744 
2745 		if (nandc->rx_chan)
2746 			dma_release_channel(nandc->rx_chan);
2747 
2748 		if (nandc->cmd_chan)
2749 			dma_release_channel(nandc->cmd_chan);
2750 	} else {
2751 		if (nandc->chan)
2752 			dma_release_channel(nandc->chan);
2753 	}
2754 }
2755 
2756 static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
2757 {
2758 	int ret;
2759 
2760 	ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
2761 	if (ret) {
2762 		dev_err(nandc->dev, "failed to set DMA mask\n");
2763 		return ret;
2764 	}
2765 
2766 	/*
2767 	 * we use the internal buffer for reading ONFI params, reading small
2768 	 * data like ID and status, and preforming read-copy-write operations
2769 	 * when writing to a codeword partially. 532 is the maximum possible
2770 	 * size of a codeword for our nand controller
2771 	 */
2772 	nandc->buf_size = 532;
2773 
2774 	nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
2775 					GFP_KERNEL);
2776 	if (!nandc->data_buffer)
2777 		return -ENOMEM;
2778 
2779 	nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
2780 					GFP_KERNEL);
2781 	if (!nandc->regs)
2782 		return -ENOMEM;
2783 
2784 	nandc->reg_read_buf = devm_kcalloc(nandc->dev,
2785 				MAX_REG_RD, sizeof(*nandc->reg_read_buf),
2786 				GFP_KERNEL);
2787 	if (!nandc->reg_read_buf)
2788 		return -ENOMEM;
2789 
2790 	if (nandc->props->is_bam) {
2791 		nandc->reg_read_dma =
2792 			dma_map_single(nandc->dev, nandc->reg_read_buf,
2793 				       MAX_REG_RD *
2794 				       sizeof(*nandc->reg_read_buf),
2795 				       DMA_FROM_DEVICE);
2796 		if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
2797 			dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
2798 			return -EIO;
2799 		}
2800 
2801 		nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
2802 		if (IS_ERR(nandc->tx_chan)) {
2803 			ret = PTR_ERR(nandc->tx_chan);
2804 			nandc->tx_chan = NULL;
2805 			dev_err_probe(nandc->dev, ret,
2806 				      "tx DMA channel request failed\n");
2807 			goto unalloc;
2808 		}
2809 
2810 		nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
2811 		if (IS_ERR(nandc->rx_chan)) {
2812 			ret = PTR_ERR(nandc->rx_chan);
2813 			nandc->rx_chan = NULL;
2814 			dev_err_probe(nandc->dev, ret,
2815 				      "rx DMA channel request failed\n");
2816 			goto unalloc;
2817 		}
2818 
2819 		nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
2820 		if (IS_ERR(nandc->cmd_chan)) {
2821 			ret = PTR_ERR(nandc->cmd_chan);
2822 			nandc->cmd_chan = NULL;
2823 			dev_err_probe(nandc->dev, ret,
2824 				      "cmd DMA channel request failed\n");
2825 			goto unalloc;
2826 		}
2827 
2828 		/*
2829 		 * Initially allocate BAM transaction to read ONFI param page.
2830 		 * After detecting all the devices, this BAM transaction will
2831 		 * be freed and the next BAM tranasction will be allocated with
2832 		 * maximum codeword size
2833 		 */
2834 		nandc->max_cwperpage = 1;
2835 		nandc->bam_txn = alloc_bam_transaction(nandc);
2836 		if (!nandc->bam_txn) {
2837 			dev_err(nandc->dev,
2838 				"failed to allocate bam transaction\n");
2839 			ret = -ENOMEM;
2840 			goto unalloc;
2841 		}
2842 	} else {
2843 		nandc->chan = dma_request_chan(nandc->dev, "rxtx");
2844 		if (IS_ERR(nandc->chan)) {
2845 			ret = PTR_ERR(nandc->chan);
2846 			nandc->chan = NULL;
2847 			dev_err_probe(nandc->dev, ret,
2848 				      "rxtx DMA channel request failed\n");
2849 			return ret;
2850 		}
2851 	}
2852 
2853 	INIT_LIST_HEAD(&nandc->desc_list);
2854 	INIT_LIST_HEAD(&nandc->host_list);
2855 
2856 	nand_controller_init(&nandc->controller);
2857 	nandc->controller.ops = &qcom_nandc_ops;
2858 
2859 	return 0;
2860 unalloc:
2861 	qcom_nandc_unalloc(nandc);
2862 	return ret;
2863 }
2864 
2865 /* one time setup of a few nand controller registers */
2866 static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
2867 {
2868 	u32 nand_ctrl;
2869 
2870 	/* kill onenand */
2871 	if (!nandc->props->is_qpic)
2872 		nandc_write(nandc, SFLASHC_BURST_CFG, 0);
2873 
2874 	if (!nandc->props->qpic_v2)
2875 		nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
2876 			    NAND_DEV_CMD_VLD_VAL);
2877 
2878 	/* enable ADM or BAM DMA */
2879 	if (nandc->props->is_bam) {
2880 		nand_ctrl = nandc_read(nandc, NAND_CTRL);
2881 
2882 		/*
2883 		 *NAND_CTRL is an operational registers, and CPU
2884 		 * access to operational registers are read only
2885 		 * in BAM mode. So update the NAND_CTRL register
2886 		 * only if it is not in BAM mode. In most cases BAM
2887 		 * mode will be enabled in bootloader
2888 		 */
2889 		if (!(nand_ctrl & BAM_MODE_EN))
2890 			nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
2891 	} else {
2892 		nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
2893 	}
2894 
2895 	/* save the original values of these registers */
2896 	if (!nandc->props->qpic_v2) {
2897 		nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
2898 		nandc->vld = NAND_DEV_CMD_VLD_VAL;
2899 	}
2900 
2901 	return 0;
2902 }
2903 
2904 static const char * const probes[] = { "cmdlinepart", "ofpart", "qcomsmem", NULL };
2905 
2906 static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
2907 					    struct qcom_nand_host *host,
2908 					    struct device_node *dn)
2909 {
2910 	struct nand_chip *chip = &host->chip;
2911 	struct mtd_info *mtd = nand_to_mtd(chip);
2912 	struct device *dev = nandc->dev;
2913 	int ret;
2914 
2915 	ret = of_property_read_u32(dn, "reg", &host->cs);
2916 	if (ret) {
2917 		dev_err(dev, "can't get chip-select\n");
2918 		return -ENXIO;
2919 	}
2920 
2921 	nand_set_flash_node(chip, dn);
2922 	mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2923 	if (!mtd->name)
2924 		return -ENOMEM;
2925 
2926 	mtd->owner = THIS_MODULE;
2927 	mtd->dev.parent = dev;
2928 
2929 	chip->legacy.cmdfunc	= qcom_nandc_command;
2930 	chip->legacy.select_chip	= qcom_nandc_select_chip;
2931 	chip->legacy.read_byte	= qcom_nandc_read_byte;
2932 	chip->legacy.read_buf	= qcom_nandc_read_buf;
2933 	chip->legacy.write_buf	= qcom_nandc_write_buf;
2934 	chip->legacy.set_features	= nand_get_set_features_notsupp;
2935 	chip->legacy.get_features	= nand_get_set_features_notsupp;
2936 
2937 	/*
2938 	 * the bad block marker is readable only when we read the last codeword
2939 	 * of a page with ECC disabled. currently, the nand_base and nand_bbt
2940 	 * helpers don't allow us to read BB from a nand chip with ECC
2941 	 * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
2942 	 * and block_markbad helpers until we permanently switch to using
2943 	 * MTD_OPS_RAW for all drivers (with the help of badblockbits)
2944 	 */
2945 	chip->legacy.block_bad		= qcom_nandc_block_bad;
2946 	chip->legacy.block_markbad	= qcom_nandc_block_markbad;
2947 
2948 	chip->controller = &nandc->controller;
2949 	chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
2950 			 NAND_SKIP_BBTSCAN;
2951 
2952 	/* set up initial status value */
2953 	host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2954 
2955 	ret = nand_scan(chip, 1);
2956 	if (ret)
2957 		return ret;
2958 
2959 	if (nandc->props->is_bam) {
2960 		free_bam_transaction(nandc);
2961 		nandc->bam_txn = alloc_bam_transaction(nandc);
2962 		if (!nandc->bam_txn) {
2963 			dev_err(nandc->dev,
2964 				"failed to allocate bam transaction\n");
2965 			nand_cleanup(chip);
2966 			return -ENOMEM;
2967 		}
2968 	}
2969 
2970 	ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
2971 	if (ret)
2972 		nand_cleanup(chip);
2973 
2974 	return ret;
2975 }
2976 
2977 static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2978 {
2979 	struct device *dev = nandc->dev;
2980 	struct device_node *dn = dev->of_node, *child;
2981 	struct qcom_nand_host *host;
2982 	int ret = -ENODEV;
2983 
2984 	for_each_available_child_of_node(dn, child) {
2985 		host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2986 		if (!host) {
2987 			of_node_put(child);
2988 			return -ENOMEM;
2989 		}
2990 
2991 		ret = qcom_nand_host_init_and_register(nandc, host, child);
2992 		if (ret) {
2993 			devm_kfree(dev, host);
2994 			continue;
2995 		}
2996 
2997 		list_add_tail(&host->node, &nandc->host_list);
2998 	}
2999 
3000 	return ret;
3001 }
3002 
3003 /* parse custom DT properties here */
3004 static int qcom_nandc_parse_dt(struct platform_device *pdev)
3005 {
3006 	struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
3007 	struct device_node *np = nandc->dev->of_node;
3008 	int ret;
3009 
3010 	if (!nandc->props->is_bam) {
3011 		ret = of_property_read_u32(np, "qcom,cmd-crci",
3012 					   &nandc->cmd_crci);
3013 		if (ret) {
3014 			dev_err(nandc->dev, "command CRCI unspecified\n");
3015 			return ret;
3016 		}
3017 
3018 		ret = of_property_read_u32(np, "qcom,data-crci",
3019 					   &nandc->data_crci);
3020 		if (ret) {
3021 			dev_err(nandc->dev, "data CRCI unspecified\n");
3022 			return ret;
3023 		}
3024 	}
3025 
3026 	return 0;
3027 }
3028 
3029 static int qcom_nandc_probe(struct platform_device *pdev)
3030 {
3031 	struct qcom_nand_controller *nandc;
3032 	const void *dev_data;
3033 	struct device *dev = &pdev->dev;
3034 	struct resource *res;
3035 	int ret;
3036 
3037 	nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
3038 	if (!nandc)
3039 		return -ENOMEM;
3040 
3041 	platform_set_drvdata(pdev, nandc);
3042 	nandc->dev = dev;
3043 
3044 	dev_data = of_device_get_match_data(dev);
3045 	if (!dev_data) {
3046 		dev_err(&pdev->dev, "failed to get device data\n");
3047 		return -ENODEV;
3048 	}
3049 
3050 	nandc->props = dev_data;
3051 
3052 	nandc->core_clk = devm_clk_get(dev, "core");
3053 	if (IS_ERR(nandc->core_clk))
3054 		return PTR_ERR(nandc->core_clk);
3055 
3056 	nandc->aon_clk = devm_clk_get(dev, "aon");
3057 	if (IS_ERR(nandc->aon_clk))
3058 		return PTR_ERR(nandc->aon_clk);
3059 
3060 	ret = qcom_nandc_parse_dt(pdev);
3061 	if (ret)
3062 		return ret;
3063 
3064 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3065 	nandc->base = devm_ioremap_resource(dev, res);
3066 	if (IS_ERR(nandc->base))
3067 		return PTR_ERR(nandc->base);
3068 
3069 	nandc->base_phys = res->start;
3070 	nandc->base_dma = dma_map_resource(dev, res->start,
3071 					   resource_size(res),
3072 					   DMA_BIDIRECTIONAL, 0);
3073 	if (dma_mapping_error(dev, nandc->base_dma))
3074 		return -ENXIO;
3075 
3076 	ret = qcom_nandc_alloc(nandc);
3077 	if (ret)
3078 		goto err_nandc_alloc;
3079 
3080 	ret = clk_prepare_enable(nandc->core_clk);
3081 	if (ret)
3082 		goto err_core_clk;
3083 
3084 	ret = clk_prepare_enable(nandc->aon_clk);
3085 	if (ret)
3086 		goto err_aon_clk;
3087 
3088 	ret = qcom_nandc_setup(nandc);
3089 	if (ret)
3090 		goto err_setup;
3091 
3092 	ret = qcom_probe_nand_devices(nandc);
3093 	if (ret)
3094 		goto err_setup;
3095 
3096 	return 0;
3097 
3098 err_setup:
3099 	clk_disable_unprepare(nandc->aon_clk);
3100 err_aon_clk:
3101 	clk_disable_unprepare(nandc->core_clk);
3102 err_core_clk:
3103 	qcom_nandc_unalloc(nandc);
3104 err_nandc_alloc:
3105 	dma_unmap_resource(dev, res->start, resource_size(res),
3106 			   DMA_BIDIRECTIONAL, 0);
3107 
3108 	return ret;
3109 }
3110 
3111 static int qcom_nandc_remove(struct platform_device *pdev)
3112 {
3113 	struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
3114 	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3115 	struct qcom_nand_host *host;
3116 	struct nand_chip *chip;
3117 	int ret;
3118 
3119 	list_for_each_entry(host, &nandc->host_list, node) {
3120 		chip = &host->chip;
3121 		ret = mtd_device_unregister(nand_to_mtd(chip));
3122 		WARN_ON(ret);
3123 		nand_cleanup(chip);
3124 	}
3125 
3126 	qcom_nandc_unalloc(nandc);
3127 
3128 	clk_disable_unprepare(nandc->aon_clk);
3129 	clk_disable_unprepare(nandc->core_clk);
3130 
3131 	dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
3132 			   DMA_BIDIRECTIONAL, 0);
3133 
3134 	return 0;
3135 }
3136 
3137 static const struct qcom_nandc_props ipq806x_nandc_props = {
3138 	.ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
3139 	.is_bam = false,
3140 	.dev_cmd_reg_start = 0x0,
3141 };
3142 
3143 static const struct qcom_nandc_props ipq4019_nandc_props = {
3144 	.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3145 	.is_bam = true,
3146 	.is_qpic = true,
3147 	.dev_cmd_reg_start = 0x0,
3148 };
3149 
3150 static const struct qcom_nandc_props ipq8074_nandc_props = {
3151 	.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3152 	.is_bam = true,
3153 	.is_qpic = true,
3154 	.dev_cmd_reg_start = 0x7000,
3155 };
3156 
3157 static const struct qcom_nandc_props sdx55_nandc_props = {
3158 	.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3159 	.is_bam = true,
3160 	.is_qpic = true,
3161 	.qpic_v2 = true,
3162 	.dev_cmd_reg_start = 0x7000,
3163 };
3164 
3165 /*
3166  * data will hold a struct pointer containing more differences once we support
3167  * more controller variants
3168  */
3169 static const struct of_device_id qcom_nandc_of_match[] = {
3170 	{
3171 		.compatible = "qcom,ipq806x-nand",
3172 		.data = &ipq806x_nandc_props,
3173 	},
3174 	{
3175 		.compatible = "qcom,ipq4019-nand",
3176 		.data = &ipq4019_nandc_props,
3177 	},
3178 	{
3179 		.compatible = "qcom,ipq6018-nand",
3180 		.data = &ipq8074_nandc_props,
3181 	},
3182 	{
3183 		.compatible = "qcom,ipq8074-nand",
3184 		.data = &ipq8074_nandc_props,
3185 	},
3186 	{
3187 		.compatible = "qcom,sdx55-nand",
3188 		.data = &sdx55_nandc_props,
3189 	},
3190 	{}
3191 };
3192 MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
3193 
3194 static struct platform_driver qcom_nandc_driver = {
3195 	.driver = {
3196 		.name = "qcom-nandc",
3197 		.of_match_table = qcom_nandc_of_match,
3198 	},
3199 	.probe   = qcom_nandc_probe,
3200 	.remove  = qcom_nandc_remove,
3201 };
3202 module_platform_driver(qcom_nandc_driver);
3203 
3204 MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
3205 MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
3206 MODULE_LICENSE("GPL v2");
3207