xref: /openbmc/linux/drivers/mtd/nand/raw/denali.c (revision 31af04cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NAND Flash Controller Device Driver
4  * Copyright © 2009-2010, Intel Corporation and its suppliers.
5  *
6  * Copyright (c) 2017 Socionext Inc.
7  *   Reworked by Masahiro Yamada <yamada.masahiro@socionext.com>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/completion.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/module.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/rawnand.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 
21 #include "denali.h"
22 
23 #define DENALI_NAND_NAME    "denali-nand"
24 #define DENALI_DEFAULT_OOB_SKIP_BYTES	8
25 
26 /* for Indexed Addressing */
27 #define DENALI_INDEXED_CTRL	0x00
28 #define DENALI_INDEXED_DATA	0x10
29 
30 #define DENALI_MAP00		(0 << 26)	/* direct access to buffer */
31 #define DENALI_MAP01		(1 << 26)	/* read/write pages in PIO */
32 #define DENALI_MAP10		(2 << 26)	/* high-level control plane */
33 #define DENALI_MAP11		(3 << 26)	/* direct controller access */
34 
35 /* MAP11 access cycle type */
36 #define DENALI_MAP11_CMD	((DENALI_MAP11) | 0)	/* command cycle */
37 #define DENALI_MAP11_ADDR	((DENALI_MAP11) | 1)	/* address cycle */
38 #define DENALI_MAP11_DATA	((DENALI_MAP11) | 2)	/* data cycle */
39 
40 /* MAP10 commands */
41 #define DENALI_ERASE		0x01
42 
43 #define DENALI_BANK(denali)	((denali)->active_bank << 24)
44 
45 #define DENALI_INVALID_BANK	-1
46 #define DENALI_NR_BANKS		4
47 
48 static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
49 {
50 	return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
51 }
52 
53 /*
54  * Direct Addressing - the slave address forms the control information (command
55  * type, bank, block, and page address).  The slave data is the actual data to
56  * be transferred.  This mode requires 28 bits of address region allocated.
57  */
58 static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
59 {
60 	return ioread32(denali->host + addr);
61 }
62 
63 static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
64 				u32 data)
65 {
66 	iowrite32(data, denali->host + addr);
67 }
68 
69 /*
70  * Indexed Addressing - address translation module intervenes in passing the
71  * control information.  This mode reduces the required address range.  The
72  * control information and transferred data are latched by the registers in
73  * the translation module.
74  */
75 static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
76 {
77 	iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
78 	return ioread32(denali->host + DENALI_INDEXED_DATA);
79 }
80 
81 static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
82 				 u32 data)
83 {
84 	iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
85 	iowrite32(data, denali->host + DENALI_INDEXED_DATA);
86 }
87 
88 /*
89  * Use the configuration feature register to determine the maximum number of
90  * banks that the hardware supports.
91  */
92 static void denali_detect_max_banks(struct denali_nand_info *denali)
93 {
94 	uint32_t features = ioread32(denali->reg + FEATURES);
95 
96 	denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
97 
98 	/* the encoding changed from rev 5.0 to 5.1 */
99 	if (denali->revision < 0x0501)
100 		denali->max_banks <<= 1;
101 }
102 
103 static void denali_enable_irq(struct denali_nand_info *denali)
104 {
105 	int i;
106 
107 	for (i = 0; i < DENALI_NR_BANKS; i++)
108 		iowrite32(U32_MAX, denali->reg + INTR_EN(i));
109 	iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
110 }
111 
112 static void denali_disable_irq(struct denali_nand_info *denali)
113 {
114 	int i;
115 
116 	for (i = 0; i < DENALI_NR_BANKS; i++)
117 		iowrite32(0, denali->reg + INTR_EN(i));
118 	iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
119 }
120 
121 static void denali_clear_irq(struct denali_nand_info *denali,
122 			     int bank, uint32_t irq_status)
123 {
124 	/* write one to clear bits */
125 	iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
126 }
127 
128 static void denali_clear_irq_all(struct denali_nand_info *denali)
129 {
130 	int i;
131 
132 	for (i = 0; i < DENALI_NR_BANKS; i++)
133 		denali_clear_irq(denali, i, U32_MAX);
134 }
135 
136 static irqreturn_t denali_isr(int irq, void *dev_id)
137 {
138 	struct denali_nand_info *denali = dev_id;
139 	irqreturn_t ret = IRQ_NONE;
140 	uint32_t irq_status;
141 	int i;
142 
143 	spin_lock(&denali->irq_lock);
144 
145 	for (i = 0; i < DENALI_NR_BANKS; i++) {
146 		irq_status = ioread32(denali->reg + INTR_STATUS(i));
147 		if (irq_status)
148 			ret = IRQ_HANDLED;
149 
150 		denali_clear_irq(denali, i, irq_status);
151 
152 		if (i != denali->active_bank)
153 			continue;
154 
155 		denali->irq_status |= irq_status;
156 
157 		if (denali->irq_status & denali->irq_mask)
158 			complete(&denali->complete);
159 	}
160 
161 	spin_unlock(&denali->irq_lock);
162 
163 	return ret;
164 }
165 
166 static void denali_reset_irq(struct denali_nand_info *denali)
167 {
168 	unsigned long flags;
169 
170 	spin_lock_irqsave(&denali->irq_lock, flags);
171 	denali->irq_status = 0;
172 	denali->irq_mask = 0;
173 	spin_unlock_irqrestore(&denali->irq_lock, flags);
174 }
175 
176 static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
177 				    uint32_t irq_mask)
178 {
179 	unsigned long time_left, flags;
180 	uint32_t irq_status;
181 
182 	spin_lock_irqsave(&denali->irq_lock, flags);
183 
184 	irq_status = denali->irq_status;
185 
186 	if (irq_mask & irq_status) {
187 		/* return immediately if the IRQ has already happened. */
188 		spin_unlock_irqrestore(&denali->irq_lock, flags);
189 		return irq_status;
190 	}
191 
192 	denali->irq_mask = irq_mask;
193 	reinit_completion(&denali->complete);
194 	spin_unlock_irqrestore(&denali->irq_lock, flags);
195 
196 	time_left = wait_for_completion_timeout(&denali->complete,
197 						msecs_to_jiffies(1000));
198 	if (!time_left) {
199 		dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
200 			irq_mask);
201 		return 0;
202 	}
203 
204 	return denali->irq_status;
205 }
206 
207 static void denali_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
208 {
209 	struct mtd_info *mtd = nand_to_mtd(chip);
210 	struct denali_nand_info *denali = mtd_to_denali(mtd);
211 	u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
212 	int i;
213 
214 	for (i = 0; i < len; i++)
215 		buf[i] = denali->host_read(denali, addr);
216 }
217 
218 static void denali_write_buf(struct nand_chip *chip, const uint8_t *buf,
219 			     int len)
220 {
221 	struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
222 	u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
223 	int i;
224 
225 	for (i = 0; i < len; i++)
226 		denali->host_write(denali, addr, buf[i]);
227 }
228 
229 static void denali_read_buf16(struct nand_chip *chip, uint8_t *buf, int len)
230 {
231 	struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
232 	u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
233 	uint16_t *buf16 = (uint16_t *)buf;
234 	int i;
235 
236 	for (i = 0; i < len / 2; i++)
237 		buf16[i] = denali->host_read(denali, addr);
238 }
239 
240 static void denali_write_buf16(struct nand_chip *chip, const uint8_t *buf,
241 			       int len)
242 {
243 	struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
244 	u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
245 	const uint16_t *buf16 = (const uint16_t *)buf;
246 	int i;
247 
248 	for (i = 0; i < len / 2; i++)
249 		denali->host_write(denali, addr, buf16[i]);
250 }
251 
252 static uint8_t denali_read_byte(struct nand_chip *chip)
253 {
254 	uint8_t byte;
255 
256 	denali_read_buf(chip, &byte, 1);
257 
258 	return byte;
259 }
260 
261 static void denali_write_byte(struct nand_chip *chip, uint8_t byte)
262 {
263 	denali_write_buf(chip, &byte, 1);
264 }
265 
266 static void denali_cmd_ctrl(struct nand_chip *chip, int dat, unsigned int ctrl)
267 {
268 	struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
269 	uint32_t type;
270 
271 	if (ctrl & NAND_CLE)
272 		type = DENALI_MAP11_CMD;
273 	else if (ctrl & NAND_ALE)
274 		type = DENALI_MAP11_ADDR;
275 	else
276 		return;
277 
278 	/*
279 	 * Some commands are followed by chip->legacy.waitfunc.
280 	 * irq_status must be cleared here to catch the R/B# interrupt later.
281 	 */
282 	if (ctrl & NAND_CTRL_CHANGE)
283 		denali_reset_irq(denali);
284 
285 	denali->host_write(denali, DENALI_BANK(denali) | type, dat);
286 }
287 
288 static int denali_check_erased_page(struct mtd_info *mtd,
289 				    struct nand_chip *chip, uint8_t *buf,
290 				    unsigned long uncor_ecc_flags,
291 				    unsigned int max_bitflips)
292 {
293 	struct denali_nand_info *denali = mtd_to_denali(mtd);
294 	uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
295 	int ecc_steps = chip->ecc.steps;
296 	int ecc_size = chip->ecc.size;
297 	int ecc_bytes = chip->ecc.bytes;
298 	int i, stat;
299 
300 	for (i = 0; i < ecc_steps; i++) {
301 		if (!(uncor_ecc_flags & BIT(i)))
302 			continue;
303 
304 		stat = nand_check_erased_ecc_chunk(buf, ecc_size,
305 						  ecc_code, ecc_bytes,
306 						  NULL, 0,
307 						  chip->ecc.strength);
308 		if (stat < 0) {
309 			mtd->ecc_stats.failed++;
310 		} else {
311 			mtd->ecc_stats.corrected += stat;
312 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
313 		}
314 
315 		buf += ecc_size;
316 		ecc_code += ecc_bytes;
317 	}
318 
319 	return max_bitflips;
320 }
321 
322 static int denali_hw_ecc_fixup(struct mtd_info *mtd,
323 			       struct denali_nand_info *denali,
324 			       unsigned long *uncor_ecc_flags)
325 {
326 	struct nand_chip *chip = mtd_to_nand(mtd);
327 	int bank = denali->active_bank;
328 	uint32_t ecc_cor;
329 	unsigned int max_bitflips;
330 
331 	ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
332 	ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
333 
334 	if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
335 		/*
336 		 * This flag is set when uncorrectable error occurs at least in
337 		 * one ECC sector.  We can not know "how many sectors", or
338 		 * "which sector(s)".  We need erase-page check for all sectors.
339 		 */
340 		*uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
341 		return 0;
342 	}
343 
344 	max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
345 
346 	/*
347 	 * The register holds the maximum of per-sector corrected bitflips.
348 	 * This is suitable for the return value of the ->read_page() callback.
349 	 * Unfortunately, we can not know the total number of corrected bits in
350 	 * the page.  Increase the stats by max_bitflips. (compromised solution)
351 	 */
352 	mtd->ecc_stats.corrected += max_bitflips;
353 
354 	return max_bitflips;
355 }
356 
357 static int denali_sw_ecc_fixup(struct mtd_info *mtd,
358 			       struct denali_nand_info *denali,
359 			       unsigned long *uncor_ecc_flags, uint8_t *buf)
360 {
361 	unsigned int ecc_size = denali->nand.ecc.size;
362 	unsigned int bitflips = 0;
363 	unsigned int max_bitflips = 0;
364 	uint32_t err_addr, err_cor_info;
365 	unsigned int err_byte, err_sector, err_device;
366 	uint8_t err_cor_value;
367 	unsigned int prev_sector = 0;
368 	uint32_t irq_status;
369 
370 	denali_reset_irq(denali);
371 
372 	do {
373 		err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
374 		err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
375 		err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
376 
377 		err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
378 		err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
379 					  err_cor_info);
380 		err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
381 				       err_cor_info);
382 
383 		/* reset the bitflip counter when crossing ECC sector */
384 		if (err_sector != prev_sector)
385 			bitflips = 0;
386 
387 		if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
388 			/*
389 			 * Check later if this is a real ECC error, or
390 			 * an erased sector.
391 			 */
392 			*uncor_ecc_flags |= BIT(err_sector);
393 		} else if (err_byte < ecc_size) {
394 			/*
395 			 * If err_byte is larger than ecc_size, means error
396 			 * happened in OOB, so we ignore it. It's no need for
397 			 * us to correct it err_device is represented the NAND
398 			 * error bits are happened in if there are more than
399 			 * one NAND connected.
400 			 */
401 			int offset;
402 			unsigned int flips_in_byte;
403 
404 			offset = (err_sector * ecc_size + err_byte) *
405 					denali->devs_per_cs + err_device;
406 
407 			/* correct the ECC error */
408 			flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
409 			buf[offset] ^= err_cor_value;
410 			mtd->ecc_stats.corrected += flips_in_byte;
411 			bitflips += flips_in_byte;
412 
413 			max_bitflips = max(max_bitflips, bitflips);
414 		}
415 
416 		prev_sector = err_sector;
417 	} while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
418 
419 	/*
420 	 * Once handle all ECC errors, controller will trigger an
421 	 * ECC_TRANSACTION_DONE interrupt.
422 	 */
423 	irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
424 	if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
425 		return -EIO;
426 
427 	return max_bitflips;
428 }
429 
430 static void denali_setup_dma64(struct denali_nand_info *denali,
431 			       dma_addr_t dma_addr, int page, int write)
432 {
433 	uint32_t mode;
434 	const int page_count = 1;
435 
436 	mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
437 
438 	/* DMA is a three step process */
439 
440 	/*
441 	 * 1. setup transfer type, interrupt when complete,
442 	 *    burst len = 64 bytes, the number of pages
443 	 */
444 	denali->host_write(denali, mode,
445 			   0x01002000 | (64 << 16) | (write << 8) | page_count);
446 
447 	/* 2. set memory low address */
448 	denali->host_write(denali, mode, lower_32_bits(dma_addr));
449 
450 	/* 3. set memory high address */
451 	denali->host_write(denali, mode, upper_32_bits(dma_addr));
452 }
453 
454 static void denali_setup_dma32(struct denali_nand_info *denali,
455 			       dma_addr_t dma_addr, int page, int write)
456 {
457 	uint32_t mode;
458 	const int page_count = 1;
459 
460 	mode = DENALI_MAP10 | DENALI_BANK(denali);
461 
462 	/* DMA is a four step process */
463 
464 	/* 1. setup transfer type and # of pages */
465 	denali->host_write(denali, mode | page,
466 			   0x2000 | (write << 8) | page_count);
467 
468 	/* 2. set memory high address bits 23:8 */
469 	denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
470 
471 	/* 3. set memory low address bits 23:8 */
472 	denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
473 
474 	/* 4. interrupt when complete, burst len = 64 bytes */
475 	denali->host_write(denali, mode | 0x14000, 0x2400);
476 }
477 
478 static int denali_pio_read(struct denali_nand_info *denali, void *buf,
479 			   size_t size, int page, int raw)
480 {
481 	u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
482 	uint32_t *buf32 = (uint32_t *)buf;
483 	uint32_t irq_status, ecc_err_mask;
484 	int i;
485 
486 	if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
487 		ecc_err_mask = INTR__ECC_UNCOR_ERR;
488 	else
489 		ecc_err_mask = INTR__ECC_ERR;
490 
491 	denali_reset_irq(denali);
492 
493 	for (i = 0; i < size / 4; i++)
494 		*buf32++ = denali->host_read(denali, addr);
495 
496 	irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
497 	if (!(irq_status & INTR__PAGE_XFER_INC))
498 		return -EIO;
499 
500 	if (irq_status & INTR__ERASED_PAGE)
501 		memset(buf, 0xff, size);
502 
503 	return irq_status & ecc_err_mask ? -EBADMSG : 0;
504 }
505 
506 static int denali_pio_write(struct denali_nand_info *denali,
507 			    const void *buf, size_t size, int page, int raw)
508 {
509 	u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
510 	const uint32_t *buf32 = (uint32_t *)buf;
511 	uint32_t irq_status;
512 	int i;
513 
514 	denali_reset_irq(denali);
515 
516 	for (i = 0; i < size / 4; i++)
517 		denali->host_write(denali, addr, *buf32++);
518 
519 	irq_status = denali_wait_for_irq(denali,
520 				INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
521 	if (!(irq_status & INTR__PROGRAM_COMP))
522 		return -EIO;
523 
524 	return 0;
525 }
526 
527 static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
528 			   size_t size, int page, int raw, int write)
529 {
530 	if (write)
531 		return denali_pio_write(denali, buf, size, page, raw);
532 	else
533 		return denali_pio_read(denali, buf, size, page, raw);
534 }
535 
536 static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
537 			   size_t size, int page, int raw, int write)
538 {
539 	dma_addr_t dma_addr;
540 	uint32_t irq_mask, irq_status, ecc_err_mask;
541 	enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
542 	int ret = 0;
543 
544 	dma_addr = dma_map_single(denali->dev, buf, size, dir);
545 	if (dma_mapping_error(denali->dev, dma_addr)) {
546 		dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
547 		return denali_pio_xfer(denali, buf, size, page, raw, write);
548 	}
549 
550 	if (write) {
551 		/*
552 		 * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
553 		 * We can use INTR__DMA_CMD_COMP instead.  This flag is asserted
554 		 * when the page program is completed.
555 		 */
556 		irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
557 		ecc_err_mask = 0;
558 	} else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
559 		irq_mask = INTR__DMA_CMD_COMP;
560 		ecc_err_mask = INTR__ECC_UNCOR_ERR;
561 	} else {
562 		irq_mask = INTR__DMA_CMD_COMP;
563 		ecc_err_mask = INTR__ECC_ERR;
564 	}
565 
566 	iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
567 	/*
568 	 * The ->setup_dma() hook kicks DMA by using the data/command
569 	 * interface, which belongs to a different AXI port from the
570 	 * register interface.  Read back the register to avoid a race.
571 	 */
572 	ioread32(denali->reg + DMA_ENABLE);
573 
574 	denali_reset_irq(denali);
575 	denali->setup_dma(denali, dma_addr, page, write);
576 
577 	irq_status = denali_wait_for_irq(denali, irq_mask);
578 	if (!(irq_status & INTR__DMA_CMD_COMP))
579 		ret = -EIO;
580 	else if (irq_status & ecc_err_mask)
581 		ret = -EBADMSG;
582 
583 	iowrite32(0, denali->reg + DMA_ENABLE);
584 
585 	dma_unmap_single(denali->dev, dma_addr, size, dir);
586 
587 	if (irq_status & INTR__ERASED_PAGE)
588 		memset(buf, 0xff, size);
589 
590 	return ret;
591 }
592 
593 static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
594 			    size_t size, int page, int raw, int write)
595 {
596 	iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
597 	iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
598 		  denali->reg + TRANSFER_SPARE_REG);
599 
600 	if (denali->dma_avail)
601 		return denali_dma_xfer(denali, buf, size, page, raw, write);
602 	else
603 		return denali_pio_xfer(denali, buf, size, page, raw, write);
604 }
605 
606 static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
607 			    int page, int write)
608 {
609 	struct denali_nand_info *denali = mtd_to_denali(mtd);
610 	int writesize = mtd->writesize;
611 	int oobsize = mtd->oobsize;
612 	uint8_t *bufpoi = chip->oob_poi;
613 	int ecc_steps = chip->ecc.steps;
614 	int ecc_size = chip->ecc.size;
615 	int ecc_bytes = chip->ecc.bytes;
616 	int oob_skip = denali->oob_skip_bytes;
617 	size_t size = writesize + oobsize;
618 	int i, pos, len;
619 
620 	/* BBM at the beginning of the OOB area */
621 	if (write)
622 		nand_prog_page_begin_op(chip, page, writesize, bufpoi,
623 					oob_skip);
624 	else
625 		nand_read_page_op(chip, page, writesize, bufpoi, oob_skip);
626 	bufpoi += oob_skip;
627 
628 	/* OOB ECC */
629 	for (i = 0; i < ecc_steps; i++) {
630 		pos = ecc_size + i * (ecc_size + ecc_bytes);
631 		len = ecc_bytes;
632 
633 		if (pos >= writesize)
634 			pos += oob_skip;
635 		else if (pos + len > writesize)
636 			len = writesize - pos;
637 
638 		if (write)
639 			nand_change_write_column_op(chip, pos, bufpoi, len,
640 						    false);
641 		else
642 			nand_change_read_column_op(chip, pos, bufpoi, len,
643 						   false);
644 		bufpoi += len;
645 		if (len < ecc_bytes) {
646 			len = ecc_bytes - len;
647 			if (write)
648 				nand_change_write_column_op(chip, writesize +
649 							    oob_skip, bufpoi,
650 							    len, false);
651 			else
652 				nand_change_read_column_op(chip, writesize +
653 							   oob_skip, bufpoi,
654 							   len, false);
655 			bufpoi += len;
656 		}
657 	}
658 
659 	/* OOB free */
660 	len = oobsize - (bufpoi - chip->oob_poi);
661 	if (write)
662 		nand_change_write_column_op(chip, size - len, bufpoi, len,
663 					    false);
664 	else
665 		nand_change_read_column_op(chip, size - len, bufpoi, len,
666 					   false);
667 }
668 
669 static int denali_read_page_raw(struct nand_chip *chip, uint8_t *buf,
670 				int oob_required, int page)
671 {
672 	struct mtd_info *mtd = nand_to_mtd(chip);
673 	struct denali_nand_info *denali = mtd_to_denali(mtd);
674 	int writesize = mtd->writesize;
675 	int oobsize = mtd->oobsize;
676 	int ecc_steps = chip->ecc.steps;
677 	int ecc_size = chip->ecc.size;
678 	int ecc_bytes = chip->ecc.bytes;
679 	void *tmp_buf = denali->buf;
680 	int oob_skip = denali->oob_skip_bytes;
681 	size_t size = writesize + oobsize;
682 	int ret, i, pos, len;
683 
684 	ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
685 	if (ret)
686 		return ret;
687 
688 	/* Arrange the buffer for syndrome payload/ecc layout */
689 	if (buf) {
690 		for (i = 0; i < ecc_steps; i++) {
691 			pos = i * (ecc_size + ecc_bytes);
692 			len = ecc_size;
693 
694 			if (pos >= writesize)
695 				pos += oob_skip;
696 			else if (pos + len > writesize)
697 				len = writesize - pos;
698 
699 			memcpy(buf, tmp_buf + pos, len);
700 			buf += len;
701 			if (len < ecc_size) {
702 				len = ecc_size - len;
703 				memcpy(buf, tmp_buf + writesize + oob_skip,
704 				       len);
705 				buf += len;
706 			}
707 		}
708 	}
709 
710 	if (oob_required) {
711 		uint8_t *oob = chip->oob_poi;
712 
713 		/* BBM at the beginning of the OOB area */
714 		memcpy(oob, tmp_buf + writesize, oob_skip);
715 		oob += oob_skip;
716 
717 		/* OOB ECC */
718 		for (i = 0; i < ecc_steps; i++) {
719 			pos = ecc_size + i * (ecc_size + ecc_bytes);
720 			len = ecc_bytes;
721 
722 			if (pos >= writesize)
723 				pos += oob_skip;
724 			else if (pos + len > writesize)
725 				len = writesize - pos;
726 
727 			memcpy(oob, tmp_buf + pos, len);
728 			oob += len;
729 			if (len < ecc_bytes) {
730 				len = ecc_bytes - len;
731 				memcpy(oob, tmp_buf + writesize + oob_skip,
732 				       len);
733 				oob += len;
734 			}
735 		}
736 
737 		/* OOB free */
738 		len = oobsize - (oob - chip->oob_poi);
739 		memcpy(oob, tmp_buf + size - len, len);
740 	}
741 
742 	return 0;
743 }
744 
745 static int denali_read_oob(struct nand_chip *chip, int page)
746 {
747 	struct mtd_info *mtd = nand_to_mtd(chip);
748 
749 	denali_oob_xfer(mtd, chip, page, 0);
750 
751 	return 0;
752 }
753 
754 static int denali_write_oob(struct nand_chip *chip, int page)
755 {
756 	struct mtd_info *mtd = nand_to_mtd(chip);
757 	struct denali_nand_info *denali = mtd_to_denali(mtd);
758 
759 	denali_reset_irq(denali);
760 
761 	denali_oob_xfer(mtd, chip, page, 1);
762 
763 	return nand_prog_page_end_op(chip);
764 }
765 
766 static int denali_read_page(struct nand_chip *chip, uint8_t *buf,
767 			    int oob_required, int page)
768 {
769 	struct mtd_info *mtd = nand_to_mtd(chip);
770 	struct denali_nand_info *denali = mtd_to_denali(mtd);
771 	unsigned long uncor_ecc_flags = 0;
772 	int stat = 0;
773 	int ret;
774 
775 	ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
776 	if (ret && ret != -EBADMSG)
777 		return ret;
778 
779 	if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
780 		stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
781 	else if (ret == -EBADMSG)
782 		stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
783 
784 	if (stat < 0)
785 		return stat;
786 
787 	if (uncor_ecc_flags) {
788 		ret = denali_read_oob(chip, page);
789 		if (ret)
790 			return ret;
791 
792 		stat = denali_check_erased_page(mtd, chip, buf,
793 						uncor_ecc_flags, stat);
794 	}
795 
796 	return stat;
797 }
798 
799 static int denali_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
800 				 int oob_required, int page)
801 {
802 	struct mtd_info *mtd = nand_to_mtd(chip);
803 	struct denali_nand_info *denali = mtd_to_denali(mtd);
804 	int writesize = mtd->writesize;
805 	int oobsize = mtd->oobsize;
806 	int ecc_steps = chip->ecc.steps;
807 	int ecc_size = chip->ecc.size;
808 	int ecc_bytes = chip->ecc.bytes;
809 	void *tmp_buf = denali->buf;
810 	int oob_skip = denali->oob_skip_bytes;
811 	size_t size = writesize + oobsize;
812 	int i, pos, len;
813 
814 	/*
815 	 * Fill the buffer with 0xff first except the full page transfer.
816 	 * This simplifies the logic.
817 	 */
818 	if (!buf || !oob_required)
819 		memset(tmp_buf, 0xff, size);
820 
821 	/* Arrange the buffer for syndrome payload/ecc layout */
822 	if (buf) {
823 		for (i = 0; i < ecc_steps; i++) {
824 			pos = i * (ecc_size + ecc_bytes);
825 			len = ecc_size;
826 
827 			if (pos >= writesize)
828 				pos += oob_skip;
829 			else if (pos + len > writesize)
830 				len = writesize - pos;
831 
832 			memcpy(tmp_buf + pos, buf, len);
833 			buf += len;
834 			if (len < ecc_size) {
835 				len = ecc_size - len;
836 				memcpy(tmp_buf + writesize + oob_skip, buf,
837 				       len);
838 				buf += len;
839 			}
840 		}
841 	}
842 
843 	if (oob_required) {
844 		const uint8_t *oob = chip->oob_poi;
845 
846 		/* BBM at the beginning of the OOB area */
847 		memcpy(tmp_buf + writesize, oob, oob_skip);
848 		oob += oob_skip;
849 
850 		/* OOB ECC */
851 		for (i = 0; i < ecc_steps; i++) {
852 			pos = ecc_size + i * (ecc_size + ecc_bytes);
853 			len = ecc_bytes;
854 
855 			if (pos >= writesize)
856 				pos += oob_skip;
857 			else if (pos + len > writesize)
858 				len = writesize - pos;
859 
860 			memcpy(tmp_buf + pos, oob, len);
861 			oob += len;
862 			if (len < ecc_bytes) {
863 				len = ecc_bytes - len;
864 				memcpy(tmp_buf + writesize + oob_skip, oob,
865 				       len);
866 				oob += len;
867 			}
868 		}
869 
870 		/* OOB free */
871 		len = oobsize - (oob - chip->oob_poi);
872 		memcpy(tmp_buf + size - len, oob, len);
873 	}
874 
875 	return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
876 }
877 
878 static int denali_write_page(struct nand_chip *chip, const uint8_t *buf,
879 			     int oob_required, int page)
880 {
881 	struct mtd_info *mtd = nand_to_mtd(chip);
882 	struct denali_nand_info *denali = mtd_to_denali(mtd);
883 
884 	return denali_data_xfer(denali, (void *)buf, mtd->writesize,
885 				page, 0, 1);
886 }
887 
888 static void denali_select_chip(struct nand_chip *chip, int cs)
889 {
890 	struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
891 
892 	denali->active_bank = cs;
893 }
894 
895 static int denali_waitfunc(struct nand_chip *chip)
896 {
897 	struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
898 	uint32_t irq_status;
899 
900 	/* R/B# pin transitioned from low to high? */
901 	irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
902 
903 	return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
904 }
905 
906 static int denali_erase(struct nand_chip *chip, int page)
907 {
908 	struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
909 	uint32_t irq_status;
910 
911 	denali_reset_irq(denali);
912 
913 	denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
914 			   DENALI_ERASE);
915 
916 	/* wait for erase to complete or failure to occur */
917 	irq_status = denali_wait_for_irq(denali,
918 					 INTR__ERASE_COMP | INTR__ERASE_FAIL);
919 
920 	return irq_status & INTR__ERASE_COMP ? 0 : -EIO;
921 }
922 
923 static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
924 				       const struct nand_data_interface *conf)
925 {
926 	struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
927 	const struct nand_sdr_timings *timings;
928 	unsigned long t_x, mult_x;
929 	int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
930 	int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
931 	int addr_2_data_mask;
932 	uint32_t tmp;
933 
934 	timings = nand_get_sdr_timings(conf);
935 	if (IS_ERR(timings))
936 		return PTR_ERR(timings);
937 
938 	/* clk_x period in picoseconds */
939 	t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
940 	if (!t_x)
941 		return -EINVAL;
942 
943 	/*
944 	 * The bus interface clock, clk_x, is phase aligned with the core clock.
945 	 * The clk_x is an integral multiple N of the core clk.  The value N is
946 	 * configured at IP delivery time, and its available value is 4, 5, 6.
947 	 */
948 	mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate);
949 	if (mult_x < 4 || mult_x > 6)
950 		return -EINVAL;
951 
952 	if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
953 		return 0;
954 
955 	/* tREA -> ACC_CLKS */
956 	acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x);
957 	acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
958 
959 	tmp = ioread32(denali->reg + ACC_CLKS);
960 	tmp &= ~ACC_CLKS__VALUE;
961 	tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
962 	iowrite32(tmp, denali->reg + ACC_CLKS);
963 
964 	/* tRWH -> RE_2_WE */
965 	re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
966 	re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
967 
968 	tmp = ioread32(denali->reg + RE_2_WE);
969 	tmp &= ~RE_2_WE__VALUE;
970 	tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
971 	iowrite32(tmp, denali->reg + RE_2_WE);
972 
973 	/* tRHZ -> RE_2_RE */
974 	re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
975 	re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
976 
977 	tmp = ioread32(denali->reg + RE_2_RE);
978 	tmp &= ~RE_2_RE__VALUE;
979 	tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
980 	iowrite32(tmp, denali->reg + RE_2_RE);
981 
982 	/*
983 	 * tCCS, tWHR -> WE_2_RE
984 	 *
985 	 * With WE_2_RE properly set, the Denali controller automatically takes
986 	 * care of the delay; the driver need not set NAND_WAIT_TCCS.
987 	 */
988 	we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x);
989 	we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
990 
991 	tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
992 	tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
993 	tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
994 	iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
995 
996 	/* tADL -> ADDR_2_DATA */
997 
998 	/* for older versions, ADDR_2_DATA is only 6 bit wide */
999 	addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
1000 	if (denali->revision < 0x0501)
1001 		addr_2_data_mask >>= 1;
1002 
1003 	addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x);
1004 	addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
1005 
1006 	tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
1007 	tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
1008 	tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
1009 	iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
1010 
1011 	/* tREH, tWH -> RDWR_EN_HI_CNT */
1012 	rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
1013 				  t_x);
1014 	rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
1015 
1016 	tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
1017 	tmp &= ~RDWR_EN_HI_CNT__VALUE;
1018 	tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
1019 	iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
1020 
1021 	/* tRP, tWP -> RDWR_EN_LO_CNT */
1022 	rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
1023 	rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
1024 				     t_x);
1025 	rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x);
1026 	rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
1027 	rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
1028 
1029 	tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
1030 	tmp &= ~RDWR_EN_LO_CNT__VALUE;
1031 	tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
1032 	iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
1033 
1034 	/* tCS, tCEA -> CS_SETUP_CNT */
1035 	cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
1036 			(int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks,
1037 			0);
1038 	cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
1039 
1040 	tmp = ioread32(denali->reg + CS_SETUP_CNT);
1041 	tmp &= ~CS_SETUP_CNT__VALUE;
1042 	tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
1043 	iowrite32(tmp, denali->reg + CS_SETUP_CNT);
1044 
1045 	return 0;
1046 }
1047 
1048 static void denali_hw_init(struct denali_nand_info *denali)
1049 {
1050 	/*
1051 	 * The REVISION register may not be reliable.  Platforms are allowed to
1052 	 * override it.
1053 	 */
1054 	if (!denali->revision)
1055 		denali->revision = swab16(ioread32(denali->reg + REVISION));
1056 
1057 	/*
1058 	 * Set how many bytes should be skipped before writing data in OOB.
1059 	 * If a non-zero value has already been set (by firmware or something),
1060 	 * just use it.  Otherwise, set the driver default.
1061 	 */
1062 	denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
1063 	if (!denali->oob_skip_bytes) {
1064 		denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES;
1065 		iowrite32(denali->oob_skip_bytes,
1066 			  denali->reg + SPARE_AREA_SKIP_BYTES);
1067 	}
1068 
1069 	denali_detect_max_banks(denali);
1070 	iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
1071 	iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
1072 
1073 	iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
1074 }
1075 
1076 int denali_calc_ecc_bytes(int step_size, int strength)
1077 {
1078 	/* BCH code.  Denali requires ecc.bytes to be multiple of 2 */
1079 	return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
1080 }
1081 EXPORT_SYMBOL(denali_calc_ecc_bytes);
1082 
1083 static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
1084 				struct mtd_oob_region *oobregion)
1085 {
1086 	struct denali_nand_info *denali = mtd_to_denali(mtd);
1087 	struct nand_chip *chip = mtd_to_nand(mtd);
1088 
1089 	if (section)
1090 		return -ERANGE;
1091 
1092 	oobregion->offset = denali->oob_skip_bytes;
1093 	oobregion->length = chip->ecc.total;
1094 
1095 	return 0;
1096 }
1097 
1098 static int denali_ooblayout_free(struct mtd_info *mtd, int section,
1099 				 struct mtd_oob_region *oobregion)
1100 {
1101 	struct denali_nand_info *denali = mtd_to_denali(mtd);
1102 	struct nand_chip *chip = mtd_to_nand(mtd);
1103 
1104 	if (section)
1105 		return -ERANGE;
1106 
1107 	oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
1108 	oobregion->length = mtd->oobsize - oobregion->offset;
1109 
1110 	return 0;
1111 }
1112 
1113 static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
1114 	.ecc = denali_ooblayout_ecc,
1115 	.free = denali_ooblayout_free,
1116 };
1117 
1118 static int denali_multidev_fixup(struct denali_nand_info *denali)
1119 {
1120 	struct nand_chip *chip = &denali->nand;
1121 	struct mtd_info *mtd = nand_to_mtd(chip);
1122 
1123 	/*
1124 	 * Support for multi device:
1125 	 * When the IP configuration is x16 capable and two x8 chips are
1126 	 * connected in parallel, DEVICES_CONNECTED should be set to 2.
1127 	 * In this case, the core framework knows nothing about this fact,
1128 	 * so we should tell it the _logical_ pagesize and anything necessary.
1129 	 */
1130 	denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
1131 
1132 	/*
1133 	 * On some SoCs, DEVICES_CONNECTED is not auto-detected.
1134 	 * For those, DEVICES_CONNECTED is left to 0.  Set 1 if it is the case.
1135 	 */
1136 	if (denali->devs_per_cs == 0) {
1137 		denali->devs_per_cs = 1;
1138 		iowrite32(1, denali->reg + DEVICES_CONNECTED);
1139 	}
1140 
1141 	if (denali->devs_per_cs == 1)
1142 		return 0;
1143 
1144 	if (denali->devs_per_cs != 2) {
1145 		dev_err(denali->dev, "unsupported number of devices %d\n",
1146 			denali->devs_per_cs);
1147 		return -EINVAL;
1148 	}
1149 
1150 	/* 2 chips in parallel */
1151 	mtd->size <<= 1;
1152 	mtd->erasesize <<= 1;
1153 	mtd->writesize <<= 1;
1154 	mtd->oobsize <<= 1;
1155 	chip->chipsize <<= 1;
1156 	chip->page_shift += 1;
1157 	chip->phys_erase_shift += 1;
1158 	chip->bbt_erase_shift += 1;
1159 	chip->chip_shift += 1;
1160 	chip->pagemask <<= 1;
1161 	chip->ecc.size <<= 1;
1162 	chip->ecc.bytes <<= 1;
1163 	chip->ecc.strength <<= 1;
1164 	denali->oob_skip_bytes <<= 1;
1165 
1166 	return 0;
1167 }
1168 
1169 static int denali_attach_chip(struct nand_chip *chip)
1170 {
1171 	struct mtd_info *mtd = nand_to_mtd(chip);
1172 	struct denali_nand_info *denali = mtd_to_denali(mtd);
1173 	int ret;
1174 
1175 	if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
1176 		denali->dma_avail = 1;
1177 
1178 	if (denali->dma_avail) {
1179 		int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
1180 
1181 		ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
1182 		if (ret) {
1183 			dev_info(denali->dev,
1184 				 "Failed to set DMA mask. Disabling DMA.\n");
1185 			denali->dma_avail = 0;
1186 		}
1187 	}
1188 
1189 	if (denali->dma_avail) {
1190 		chip->options |= NAND_USE_BOUNCE_BUFFER;
1191 		chip->buf_align = 16;
1192 		if (denali->caps & DENALI_CAP_DMA_64BIT)
1193 			denali->setup_dma = denali_setup_dma64;
1194 		else
1195 			denali->setup_dma = denali_setup_dma32;
1196 	}
1197 
1198 	chip->bbt_options |= NAND_BBT_USE_FLASH;
1199 	chip->bbt_options |= NAND_BBT_NO_OOB;
1200 	chip->ecc.mode = NAND_ECC_HW_SYNDROME;
1201 	chip->options |= NAND_NO_SUBPAGE_WRITE;
1202 
1203 	ret = nand_ecc_choose_conf(chip, denali->ecc_caps,
1204 				   mtd->oobsize - denali->oob_skip_bytes);
1205 	if (ret) {
1206 		dev_err(denali->dev, "Failed to setup ECC settings.\n");
1207 		return ret;
1208 	}
1209 
1210 	dev_dbg(denali->dev,
1211 		"chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
1212 		chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
1213 
1214 	iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
1215 		  FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
1216 		  denali->reg + ECC_CORRECTION);
1217 	iowrite32(mtd->erasesize / mtd->writesize,
1218 		  denali->reg + PAGES_PER_BLOCK);
1219 	iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
1220 		  denali->reg + DEVICE_WIDTH);
1221 	iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
1222 		  denali->reg + TWO_ROW_ADDR_CYCLES);
1223 	iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
1224 	iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
1225 
1226 	iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
1227 	iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
1228 	/* chip->ecc.steps is set by nand_scan_tail(); not available here */
1229 	iowrite32(mtd->writesize / chip->ecc.size,
1230 		  denali->reg + CFG_NUM_DATA_BLOCKS);
1231 
1232 	mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
1233 
1234 	if (chip->options & NAND_BUSWIDTH_16) {
1235 		chip->legacy.read_buf = denali_read_buf16;
1236 		chip->legacy.write_buf = denali_write_buf16;
1237 	} else {
1238 		chip->legacy.read_buf = denali_read_buf;
1239 		chip->legacy.write_buf = denali_write_buf;
1240 	}
1241 	chip->ecc.read_page = denali_read_page;
1242 	chip->ecc.read_page_raw = denali_read_page_raw;
1243 	chip->ecc.write_page = denali_write_page;
1244 	chip->ecc.write_page_raw = denali_write_page_raw;
1245 	chip->ecc.read_oob = denali_read_oob;
1246 	chip->ecc.write_oob = denali_write_oob;
1247 	chip->legacy.erase = denali_erase;
1248 
1249 	ret = denali_multidev_fixup(denali);
1250 	if (ret)
1251 		return ret;
1252 
1253 	/*
1254 	 * This buffer is DMA-mapped by denali_{read,write}_page_raw.  Do not
1255 	 * use devm_kmalloc() because the memory allocated by devm_ does not
1256 	 * guarantee DMA-safe alignment.
1257 	 */
1258 	denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
1259 	if (!denali->buf)
1260 		return -ENOMEM;
1261 
1262 	return 0;
1263 }
1264 
1265 static void denali_detach_chip(struct nand_chip *chip)
1266 {
1267 	struct mtd_info *mtd = nand_to_mtd(chip);
1268 	struct denali_nand_info *denali = mtd_to_denali(mtd);
1269 
1270 	kfree(denali->buf);
1271 }
1272 
1273 static const struct nand_controller_ops denali_controller_ops = {
1274 	.attach_chip = denali_attach_chip,
1275 	.detach_chip = denali_detach_chip,
1276 	.setup_data_interface = denali_setup_data_interface,
1277 };
1278 
1279 int denali_init(struct denali_nand_info *denali)
1280 {
1281 	struct nand_chip *chip = &denali->nand;
1282 	struct mtd_info *mtd = nand_to_mtd(chip);
1283 	u32 features = ioread32(denali->reg + FEATURES);
1284 	int ret;
1285 
1286 	mtd->dev.parent = denali->dev;
1287 	denali_hw_init(denali);
1288 
1289 	init_completion(&denali->complete);
1290 	spin_lock_init(&denali->irq_lock);
1291 
1292 	denali_clear_irq_all(denali);
1293 
1294 	ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
1295 			       IRQF_SHARED, DENALI_NAND_NAME, denali);
1296 	if (ret) {
1297 		dev_err(denali->dev, "Unable to request IRQ\n");
1298 		return ret;
1299 	}
1300 
1301 	denali_enable_irq(denali);
1302 
1303 	denali->active_bank = DENALI_INVALID_BANK;
1304 
1305 	nand_set_flash_node(chip, denali->dev->of_node);
1306 	/* Fallback to the default name if DT did not give "label" property */
1307 	if (!mtd->name)
1308 		mtd->name = "denali-nand";
1309 
1310 	chip->legacy.select_chip = denali_select_chip;
1311 	chip->legacy.read_byte = denali_read_byte;
1312 	chip->legacy.write_byte = denali_write_byte;
1313 	chip->legacy.cmd_ctrl = denali_cmd_ctrl;
1314 	chip->legacy.waitfunc = denali_waitfunc;
1315 
1316 	if (features & FEATURES__INDEX_ADDR) {
1317 		denali->host_read = denali_indexed_read;
1318 		denali->host_write = denali_indexed_write;
1319 	} else {
1320 		denali->host_read = denali_direct_read;
1321 		denali->host_write = denali_direct_write;
1322 	}
1323 
1324 	/* clk rate info is needed for setup_data_interface */
1325 	if (!denali->clk_rate || !denali->clk_x_rate)
1326 		chip->options |= NAND_KEEP_TIMINGS;
1327 
1328 	chip->legacy.dummy_controller.ops = &denali_controller_ops;
1329 	ret = nand_scan(chip, denali->max_banks);
1330 	if (ret)
1331 		goto disable_irq;
1332 
1333 	ret = mtd_device_register(mtd, NULL, 0);
1334 	if (ret) {
1335 		dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
1336 		goto cleanup_nand;
1337 	}
1338 
1339 	return 0;
1340 
1341 cleanup_nand:
1342 	nand_cleanup(chip);
1343 disable_irq:
1344 	denali_disable_irq(denali);
1345 
1346 	return ret;
1347 }
1348 EXPORT_SYMBOL(denali_init);
1349 
1350 void denali_remove(struct denali_nand_info *denali)
1351 {
1352 	nand_release(&denali->nand);
1353 	denali_disable_irq(denali);
1354 }
1355 EXPORT_SYMBOL(denali_remove);
1356 
1357 MODULE_DESCRIPTION("Driver core for Denali NAND controller");
1358 MODULE_AUTHOR("Intel Corporation and its suppliers");
1359 MODULE_LICENSE("GPL v2");
1360