xref: /openbmc/linux/drivers/mtd/nand/spi/core.c (revision 1d7a0395)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016-2017 Micron Technology, Inc.
4  *
5  * Authors:
6  *	Peter Pan <peterpandong@micron.com>
7  *	Boris Brezillon <boris.brezillon@bootlin.com>
8  */
9 
10 #define pr_fmt(fmt)	"spi-nand: " fmt
11 
12 #include <linux/device.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/spinand.h>
17 #include <linux/of.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
22 
23 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
24 {
25 	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
26 						      spinand->scratchbuf);
27 	int ret;
28 
29 	ret = spi_mem_exec_op(spinand->spimem, &op);
30 	if (ret)
31 		return ret;
32 
33 	*val = *spinand->scratchbuf;
34 	return 0;
35 }
36 
37 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
38 {
39 	struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
40 						      spinand->scratchbuf);
41 
42 	*spinand->scratchbuf = val;
43 	return spi_mem_exec_op(spinand->spimem, &op);
44 }
45 
46 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
47 {
48 	return spinand_read_reg_op(spinand, REG_STATUS, status);
49 }
50 
51 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
52 {
53 	struct nand_device *nand = spinand_to_nand(spinand);
54 
55 	if (WARN_ON(spinand->cur_target < 0 ||
56 		    spinand->cur_target >= nand->memorg.ntargets))
57 		return -EINVAL;
58 
59 	*cfg = spinand->cfg_cache[spinand->cur_target];
60 	return 0;
61 }
62 
63 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
64 {
65 	struct nand_device *nand = spinand_to_nand(spinand);
66 	int ret;
67 
68 	if (WARN_ON(spinand->cur_target < 0 ||
69 		    spinand->cur_target >= nand->memorg.ntargets))
70 		return -EINVAL;
71 
72 	if (spinand->cfg_cache[spinand->cur_target] == cfg)
73 		return 0;
74 
75 	ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
76 	if (ret)
77 		return ret;
78 
79 	spinand->cfg_cache[spinand->cur_target] = cfg;
80 	return 0;
81 }
82 
83 /**
84  * spinand_upd_cfg() - Update the configuration register
85  * @spinand: the spinand device
86  * @mask: the mask encoding the bits to update in the config reg
87  * @val: the new value to apply
88  *
89  * Update the configuration register.
90  *
91  * Return: 0 on success, a negative error code otherwise.
92  */
93 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
94 {
95 	int ret;
96 	u8 cfg;
97 
98 	ret = spinand_get_cfg(spinand, &cfg);
99 	if (ret)
100 		return ret;
101 
102 	cfg &= ~mask;
103 	cfg |= val;
104 
105 	return spinand_set_cfg(spinand, cfg);
106 }
107 
108 /**
109  * spinand_select_target() - Select a specific NAND target/die
110  * @spinand: the spinand device
111  * @target: the target/die to select
112  *
113  * Select a new target/die. If chip only has one die, this function is a NOOP.
114  *
115  * Return: 0 on success, a negative error code otherwise.
116  */
117 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
118 {
119 	struct nand_device *nand = spinand_to_nand(spinand);
120 	int ret;
121 
122 	if (WARN_ON(target >= nand->memorg.ntargets))
123 		return -EINVAL;
124 
125 	if (spinand->cur_target == target)
126 		return 0;
127 
128 	if (nand->memorg.ntargets == 1) {
129 		spinand->cur_target = target;
130 		return 0;
131 	}
132 
133 	ret = spinand->select_target(spinand, target);
134 	if (ret)
135 		return ret;
136 
137 	spinand->cur_target = target;
138 	return 0;
139 }
140 
141 static int spinand_init_cfg_cache(struct spinand_device *spinand)
142 {
143 	struct nand_device *nand = spinand_to_nand(spinand);
144 	struct device *dev = &spinand->spimem->spi->dev;
145 	unsigned int target;
146 	int ret;
147 
148 	spinand->cfg_cache = devm_kcalloc(dev,
149 					  nand->memorg.ntargets,
150 					  sizeof(*spinand->cfg_cache),
151 					  GFP_KERNEL);
152 	if (!spinand->cfg_cache)
153 		return -ENOMEM;
154 
155 	for (target = 0; target < nand->memorg.ntargets; target++) {
156 		ret = spinand_select_target(spinand, target);
157 		if (ret)
158 			return ret;
159 
160 		/*
161 		 * We use spinand_read_reg_op() instead of spinand_get_cfg()
162 		 * here to bypass the config cache.
163 		 */
164 		ret = spinand_read_reg_op(spinand, REG_CFG,
165 					  &spinand->cfg_cache[target]);
166 		if (ret)
167 			return ret;
168 	}
169 
170 	return 0;
171 }
172 
173 static int spinand_init_quad_enable(struct spinand_device *spinand)
174 {
175 	bool enable = false;
176 
177 	if (!(spinand->flags & SPINAND_HAS_QE_BIT))
178 		return 0;
179 
180 	if (spinand->op_templates.read_cache->data.buswidth == 4 ||
181 	    spinand->op_templates.write_cache->data.buswidth == 4 ||
182 	    spinand->op_templates.update_cache->data.buswidth == 4)
183 		enable = true;
184 
185 	return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
186 			       enable ? CFG_QUAD_ENABLE : 0);
187 }
188 
189 static int spinand_ecc_enable(struct spinand_device *spinand,
190 			      bool enable)
191 {
192 	return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
193 			       enable ? CFG_ECC_ENABLE : 0);
194 }
195 
196 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
197 {
198 	struct nand_device *nand = spinand_to_nand(spinand);
199 
200 	if (spinand->eccinfo.get_status)
201 		return spinand->eccinfo.get_status(spinand, status);
202 
203 	switch (status & STATUS_ECC_MASK) {
204 	case STATUS_ECC_NO_BITFLIPS:
205 		return 0;
206 
207 	case STATUS_ECC_HAS_BITFLIPS:
208 		/*
209 		 * We have no way to know exactly how many bitflips have been
210 		 * fixed, so let's return the maximum possible value so that
211 		 * wear-leveling layers move the data immediately.
212 		 */
213 		return nanddev_get_ecc_conf(nand)->strength;
214 
215 	case STATUS_ECC_UNCOR_ERROR:
216 		return -EBADMSG;
217 
218 	default:
219 		break;
220 	}
221 
222 	return -EINVAL;
223 }
224 
225 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
226 				       struct mtd_oob_region *region)
227 {
228 	return -ERANGE;
229 }
230 
231 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
232 					struct mtd_oob_region *region)
233 {
234 	if (section)
235 		return -ERANGE;
236 
237 	/* Reserve 2 bytes for the BBM. */
238 	region->offset = 2;
239 	region->length = 62;
240 
241 	return 0;
242 }
243 
244 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
245 	.ecc = spinand_noecc_ooblayout_ecc,
246 	.free = spinand_noecc_ooblayout_free,
247 };
248 
249 static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
250 {
251 	struct spinand_device *spinand = nand_to_spinand(nand);
252 	struct mtd_info *mtd = nanddev_to_mtd(nand);
253 	struct spinand_ondie_ecc_conf *engine_conf;
254 
255 	nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
256 	nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
257 	nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
258 
259 	engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
260 	if (!engine_conf)
261 		return -ENOMEM;
262 
263 	nand->ecc.ctx.priv = engine_conf;
264 
265 	if (spinand->eccinfo.ooblayout)
266 		mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
267 	else
268 		mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
269 
270 	return 0;
271 }
272 
273 static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
274 {
275 	kfree(nand->ecc.ctx.priv);
276 }
277 
278 static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
279 					    struct nand_page_io_req *req)
280 {
281 	struct spinand_device *spinand = nand_to_spinand(nand);
282 	bool enable = (req->mode != MTD_OPS_RAW);
283 
284 	/* Only enable or disable the engine */
285 	return spinand_ecc_enable(spinand, enable);
286 }
287 
288 static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
289 					   struct nand_page_io_req *req)
290 {
291 	struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
292 	struct spinand_device *spinand = nand_to_spinand(nand);
293 
294 	if (req->mode == MTD_OPS_RAW)
295 		return 0;
296 
297 	/* Nothing to do when finishing a page write */
298 	if (req->type == NAND_PAGE_WRITE)
299 		return 0;
300 
301 	/* Finish a page write: check the status, report errors/bitflips */
302 	return spinand_check_ecc_status(spinand, engine_conf->status);
303 }
304 
305 static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
306 	.init_ctx = spinand_ondie_ecc_init_ctx,
307 	.cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
308 	.prepare_io_req = spinand_ondie_ecc_prepare_io_req,
309 	.finish_io_req = spinand_ondie_ecc_finish_io_req,
310 };
311 
312 static struct nand_ecc_engine spinand_ondie_ecc_engine = {
313 	.ops = &spinand_ondie_ecc_engine_ops,
314 };
315 
316 static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
317 {
318 	struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
319 
320 	if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
321 	    engine_conf)
322 		engine_conf->status = status;
323 }
324 
325 static int spinand_write_enable_op(struct spinand_device *spinand)
326 {
327 	struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
328 
329 	return spi_mem_exec_op(spinand->spimem, &op);
330 }
331 
332 static int spinand_load_page_op(struct spinand_device *spinand,
333 				const struct nand_page_io_req *req)
334 {
335 	struct nand_device *nand = spinand_to_nand(spinand);
336 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
337 	struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
338 
339 	return spi_mem_exec_op(spinand->spimem, &op);
340 }
341 
342 static int spinand_read_from_cache_op(struct spinand_device *spinand,
343 				      const struct nand_page_io_req *req)
344 {
345 	struct nand_device *nand = spinand_to_nand(spinand);
346 	struct mtd_info *mtd = spinand_to_mtd(spinand);
347 	struct spi_mem_dirmap_desc *rdesc;
348 	unsigned int nbytes = 0;
349 	void *buf = NULL;
350 	u16 column = 0;
351 	ssize_t ret;
352 
353 	if (req->datalen) {
354 		buf = spinand->databuf;
355 		nbytes = nanddev_page_size(nand);
356 		column = 0;
357 	}
358 
359 	if (req->ooblen) {
360 		nbytes += nanddev_per_page_oobsize(nand);
361 		if (!buf) {
362 			buf = spinand->oobbuf;
363 			column = nanddev_page_size(nand);
364 		}
365 	}
366 
367 	rdesc = spinand->dirmaps[req->pos.plane].rdesc;
368 
369 	while (nbytes) {
370 		ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
371 		if (ret < 0)
372 			return ret;
373 
374 		if (!ret || ret > nbytes)
375 			return -EIO;
376 
377 		nbytes -= ret;
378 		column += ret;
379 		buf += ret;
380 	}
381 
382 	if (req->datalen)
383 		memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
384 		       req->datalen);
385 
386 	if (req->ooblen) {
387 		if (req->mode == MTD_OPS_AUTO_OOB)
388 			mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
389 						    spinand->oobbuf,
390 						    req->ooboffs,
391 						    req->ooblen);
392 		else
393 			memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
394 			       req->ooblen);
395 	}
396 
397 	return 0;
398 }
399 
400 static int spinand_write_to_cache_op(struct spinand_device *spinand,
401 				     const struct nand_page_io_req *req)
402 {
403 	struct nand_device *nand = spinand_to_nand(spinand);
404 	struct mtd_info *mtd = spinand_to_mtd(spinand);
405 	struct spi_mem_dirmap_desc *wdesc;
406 	unsigned int nbytes, column = 0;
407 	void *buf = spinand->databuf;
408 	ssize_t ret;
409 
410 	/*
411 	 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
412 	 * the cache content to 0xFF (depends on vendor implementation), so we
413 	 * must fill the page cache entirely even if we only want to program
414 	 * the data portion of the page, otherwise we might corrupt the BBM or
415 	 * user data previously programmed in OOB area.
416 	 *
417 	 * Only reset the data buffer manually, the OOB buffer is prepared by
418 	 * ECC engines ->prepare_io_req() callback.
419 	 */
420 	nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
421 	memset(spinand->databuf, 0xff, nanddev_page_size(nand));
422 
423 	if (req->datalen)
424 		memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
425 		       req->datalen);
426 
427 	if (req->ooblen) {
428 		if (req->mode == MTD_OPS_AUTO_OOB)
429 			mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
430 						    spinand->oobbuf,
431 						    req->ooboffs,
432 						    req->ooblen);
433 		else
434 			memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
435 			       req->ooblen);
436 	}
437 
438 	wdesc = spinand->dirmaps[req->pos.plane].wdesc;
439 
440 	while (nbytes) {
441 		ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
442 		if (ret < 0)
443 			return ret;
444 
445 		if (!ret || ret > nbytes)
446 			return -EIO;
447 
448 		nbytes -= ret;
449 		column += ret;
450 		buf += ret;
451 	}
452 
453 	return 0;
454 }
455 
456 static int spinand_program_op(struct spinand_device *spinand,
457 			      const struct nand_page_io_req *req)
458 {
459 	struct nand_device *nand = spinand_to_nand(spinand);
460 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
461 	struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
462 
463 	return spi_mem_exec_op(spinand->spimem, &op);
464 }
465 
466 static int spinand_erase_op(struct spinand_device *spinand,
467 			    const struct nand_pos *pos)
468 {
469 	struct nand_device *nand = spinand_to_nand(spinand);
470 	unsigned int row = nanddev_pos_to_row(nand, pos);
471 	struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
472 
473 	return spi_mem_exec_op(spinand->spimem, &op);
474 }
475 
476 static int spinand_wait(struct spinand_device *spinand,
477 			unsigned long initial_delay_us,
478 			unsigned long poll_delay_us,
479 			u8 *s)
480 {
481 	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
482 						      spinand->scratchbuf);
483 	u8 status;
484 	int ret;
485 
486 	ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
487 				  initial_delay_us,
488 				  poll_delay_us,
489 				  SPINAND_WAITRDY_TIMEOUT_MS);
490 	if (ret)
491 		return ret;
492 
493 	status = *spinand->scratchbuf;
494 	if (!(status & STATUS_BUSY))
495 		goto out;
496 
497 	/*
498 	 * Extra read, just in case the STATUS_READY bit has changed
499 	 * since our last check
500 	 */
501 	ret = spinand_read_status(spinand, &status);
502 	if (ret)
503 		return ret;
504 
505 out:
506 	if (s)
507 		*s = status;
508 
509 	return status & STATUS_BUSY ? -ETIMEDOUT : 0;
510 }
511 
512 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
513 			      u8 ndummy, u8 *buf)
514 {
515 	struct spi_mem_op op = SPINAND_READID_OP(
516 		naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
517 	int ret;
518 
519 	ret = spi_mem_exec_op(spinand->spimem, &op);
520 	if (!ret)
521 		memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
522 
523 	return ret;
524 }
525 
526 static int spinand_reset_op(struct spinand_device *spinand)
527 {
528 	struct spi_mem_op op = SPINAND_RESET_OP;
529 	int ret;
530 
531 	ret = spi_mem_exec_op(spinand->spimem, &op);
532 	if (ret)
533 		return ret;
534 
535 	return spinand_wait(spinand,
536 			    SPINAND_RESET_INITIAL_DELAY_US,
537 			    SPINAND_RESET_POLL_DELAY_US,
538 			    NULL);
539 }
540 
541 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
542 {
543 	return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
544 }
545 
546 static int spinand_read_page(struct spinand_device *spinand,
547 			     const struct nand_page_io_req *req)
548 {
549 	struct nand_device *nand = spinand_to_nand(spinand);
550 	u8 status;
551 	int ret;
552 
553 	ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
554 	if (ret)
555 		return ret;
556 
557 	ret = spinand_load_page_op(spinand, req);
558 	if (ret)
559 		return ret;
560 
561 	ret = spinand_wait(spinand,
562 			   SPINAND_READ_INITIAL_DELAY_US,
563 			   SPINAND_READ_POLL_DELAY_US,
564 			   &status);
565 	if (ret < 0)
566 		return ret;
567 
568 	spinand_ondie_ecc_save_status(nand, status);
569 
570 	ret = spinand_read_from_cache_op(spinand, req);
571 	if (ret)
572 		return ret;
573 
574 	return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
575 }
576 
577 static int spinand_write_page(struct spinand_device *spinand,
578 			      const struct nand_page_io_req *req)
579 {
580 	struct nand_device *nand = spinand_to_nand(spinand);
581 	u8 status;
582 	int ret;
583 
584 	ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
585 	if (ret)
586 		return ret;
587 
588 	ret = spinand_write_enable_op(spinand);
589 	if (ret)
590 		return ret;
591 
592 	ret = spinand_write_to_cache_op(spinand, req);
593 	if (ret)
594 		return ret;
595 
596 	ret = spinand_program_op(spinand, req);
597 	if (ret)
598 		return ret;
599 
600 	ret = spinand_wait(spinand,
601 			   SPINAND_WRITE_INITIAL_DELAY_US,
602 			   SPINAND_WRITE_POLL_DELAY_US,
603 			   &status);
604 	if (!ret && (status & STATUS_PROG_FAILED))
605 		return -EIO;
606 
607 	return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
608 }
609 
610 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
611 			    struct mtd_oob_ops *ops)
612 {
613 	struct spinand_device *spinand = mtd_to_spinand(mtd);
614 	struct nand_device *nand = mtd_to_nanddev(mtd);
615 	unsigned int max_bitflips = 0;
616 	struct nand_io_iter iter;
617 	bool disable_ecc = false;
618 	bool ecc_failed = false;
619 	int ret = 0;
620 
621 	if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
622 		disable_ecc = true;
623 
624 	mutex_lock(&spinand->lock);
625 
626 	nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
627 		if (disable_ecc)
628 			iter.req.mode = MTD_OPS_RAW;
629 
630 		ret = spinand_select_target(spinand, iter.req.pos.target);
631 		if (ret)
632 			break;
633 
634 		ret = spinand_read_page(spinand, &iter.req);
635 		if (ret < 0 && ret != -EBADMSG)
636 			break;
637 
638 		if (ret == -EBADMSG) {
639 			ecc_failed = true;
640 			mtd->ecc_stats.failed++;
641 		} else {
642 			mtd->ecc_stats.corrected += ret;
643 			max_bitflips = max_t(unsigned int, max_bitflips, ret);
644 		}
645 
646 		ret = 0;
647 		ops->retlen += iter.req.datalen;
648 		ops->oobretlen += iter.req.ooblen;
649 	}
650 
651 	mutex_unlock(&spinand->lock);
652 
653 	if (ecc_failed && !ret)
654 		ret = -EBADMSG;
655 
656 	return ret ? ret : max_bitflips;
657 }
658 
659 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
660 			     struct mtd_oob_ops *ops)
661 {
662 	struct spinand_device *spinand = mtd_to_spinand(mtd);
663 	struct nand_device *nand = mtd_to_nanddev(mtd);
664 	struct nand_io_iter iter;
665 	bool disable_ecc = false;
666 	int ret = 0;
667 
668 	if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
669 		disable_ecc = true;
670 
671 	mutex_lock(&spinand->lock);
672 
673 	nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
674 		if (disable_ecc)
675 			iter.req.mode = MTD_OPS_RAW;
676 
677 		ret = spinand_select_target(spinand, iter.req.pos.target);
678 		if (ret)
679 			break;
680 
681 		ret = spinand_write_page(spinand, &iter.req);
682 		if (ret)
683 			break;
684 
685 		ops->retlen += iter.req.datalen;
686 		ops->oobretlen += iter.req.ooblen;
687 	}
688 
689 	mutex_unlock(&spinand->lock);
690 
691 	return ret;
692 }
693 
694 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
695 {
696 	struct spinand_device *spinand = nand_to_spinand(nand);
697 	u8 marker[2] = { };
698 	struct nand_page_io_req req = {
699 		.pos = *pos,
700 		.ooblen = sizeof(marker),
701 		.ooboffs = 0,
702 		.oobbuf.in = marker,
703 		.mode = MTD_OPS_RAW,
704 	};
705 
706 	spinand_select_target(spinand, pos->target);
707 	spinand_read_page(spinand, &req);
708 	if (marker[0] != 0xff || marker[1] != 0xff)
709 		return true;
710 
711 	return false;
712 }
713 
714 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
715 {
716 	struct nand_device *nand = mtd_to_nanddev(mtd);
717 	struct spinand_device *spinand = nand_to_spinand(nand);
718 	struct nand_pos pos;
719 	int ret;
720 
721 	nanddev_offs_to_pos(nand, offs, &pos);
722 	mutex_lock(&spinand->lock);
723 	ret = nanddev_isbad(nand, &pos);
724 	mutex_unlock(&spinand->lock);
725 
726 	return ret;
727 }
728 
729 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
730 {
731 	struct spinand_device *spinand = nand_to_spinand(nand);
732 	u8 marker[2] = { };
733 	struct nand_page_io_req req = {
734 		.pos = *pos,
735 		.ooboffs = 0,
736 		.ooblen = sizeof(marker),
737 		.oobbuf.out = marker,
738 		.mode = MTD_OPS_RAW,
739 	};
740 	int ret;
741 
742 	ret = spinand_select_target(spinand, pos->target);
743 	if (ret)
744 		return ret;
745 
746 	ret = spinand_write_enable_op(spinand);
747 	if (ret)
748 		return ret;
749 
750 	return spinand_write_page(spinand, &req);
751 }
752 
753 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
754 {
755 	struct nand_device *nand = mtd_to_nanddev(mtd);
756 	struct spinand_device *spinand = nand_to_spinand(nand);
757 	struct nand_pos pos;
758 	int ret;
759 
760 	nanddev_offs_to_pos(nand, offs, &pos);
761 	mutex_lock(&spinand->lock);
762 	ret = nanddev_markbad(nand, &pos);
763 	mutex_unlock(&spinand->lock);
764 
765 	return ret;
766 }
767 
768 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
769 {
770 	struct spinand_device *spinand = nand_to_spinand(nand);
771 	u8 status;
772 	int ret;
773 
774 	ret = spinand_select_target(spinand, pos->target);
775 	if (ret)
776 		return ret;
777 
778 	ret = spinand_write_enable_op(spinand);
779 	if (ret)
780 		return ret;
781 
782 	ret = spinand_erase_op(spinand, pos);
783 	if (ret)
784 		return ret;
785 
786 	ret = spinand_wait(spinand,
787 			   SPINAND_ERASE_INITIAL_DELAY_US,
788 			   SPINAND_ERASE_POLL_DELAY_US,
789 			   &status);
790 
791 	if (!ret && (status & STATUS_ERASE_FAILED))
792 		ret = -EIO;
793 
794 	return ret;
795 }
796 
797 static int spinand_mtd_erase(struct mtd_info *mtd,
798 			     struct erase_info *einfo)
799 {
800 	struct spinand_device *spinand = mtd_to_spinand(mtd);
801 	int ret;
802 
803 	mutex_lock(&spinand->lock);
804 	ret = nanddev_mtd_erase(mtd, einfo);
805 	mutex_unlock(&spinand->lock);
806 
807 	return ret;
808 }
809 
810 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
811 {
812 	struct spinand_device *spinand = mtd_to_spinand(mtd);
813 	struct nand_device *nand = mtd_to_nanddev(mtd);
814 	struct nand_pos pos;
815 	int ret;
816 
817 	nanddev_offs_to_pos(nand, offs, &pos);
818 	mutex_lock(&spinand->lock);
819 	ret = nanddev_isreserved(nand, &pos);
820 	mutex_unlock(&spinand->lock);
821 
822 	return ret;
823 }
824 
825 static int spinand_create_dirmap(struct spinand_device *spinand,
826 				 unsigned int plane)
827 {
828 	struct nand_device *nand = spinand_to_nand(spinand);
829 	struct spi_mem_dirmap_info info = {
830 		.length = nanddev_page_size(nand) +
831 			  nanddev_per_page_oobsize(nand),
832 	};
833 	struct spi_mem_dirmap_desc *desc;
834 
835 	/* The plane number is passed in MSB just above the column address */
836 	info.offset = plane << fls(nand->memorg.pagesize);
837 
838 	info.op_tmpl = *spinand->op_templates.update_cache;
839 	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
840 					  spinand->spimem, &info);
841 	if (IS_ERR(desc))
842 		return PTR_ERR(desc);
843 
844 	spinand->dirmaps[plane].wdesc = desc;
845 
846 	info.op_tmpl = *spinand->op_templates.read_cache;
847 	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
848 					  spinand->spimem, &info);
849 	if (IS_ERR(desc))
850 		return PTR_ERR(desc);
851 
852 	spinand->dirmaps[plane].rdesc = desc;
853 
854 	return 0;
855 }
856 
857 static int spinand_create_dirmaps(struct spinand_device *spinand)
858 {
859 	struct nand_device *nand = spinand_to_nand(spinand);
860 	int i, ret;
861 
862 	spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
863 					sizeof(*spinand->dirmaps) *
864 					nand->memorg.planes_per_lun,
865 					GFP_KERNEL);
866 	if (!spinand->dirmaps)
867 		return -ENOMEM;
868 
869 	for (i = 0; i < nand->memorg.planes_per_lun; i++) {
870 		ret = spinand_create_dirmap(spinand, i);
871 		if (ret)
872 			return ret;
873 	}
874 
875 	return 0;
876 }
877 
878 static const struct nand_ops spinand_ops = {
879 	.erase = spinand_erase,
880 	.markbad = spinand_markbad,
881 	.isbad = spinand_isbad,
882 };
883 
884 static const struct spinand_manufacturer *spinand_manufacturers[] = {
885 	&gigadevice_spinand_manufacturer,
886 	&macronix_spinand_manufacturer,
887 	&micron_spinand_manufacturer,
888 	&paragon_spinand_manufacturer,
889 	&toshiba_spinand_manufacturer,
890 	&winbond_spinand_manufacturer,
891 };
892 
893 static int spinand_manufacturer_match(struct spinand_device *spinand,
894 				      enum spinand_readid_method rdid_method)
895 {
896 	u8 *id = spinand->id.data;
897 	unsigned int i;
898 	int ret;
899 
900 	for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
901 		const struct spinand_manufacturer *manufacturer =
902 			spinand_manufacturers[i];
903 
904 		if (id[0] != manufacturer->id)
905 			continue;
906 
907 		ret = spinand_match_and_init(spinand,
908 					     manufacturer->chips,
909 					     manufacturer->nchips,
910 					     rdid_method);
911 		if (ret < 0)
912 			continue;
913 
914 		spinand->manufacturer = manufacturer;
915 		return 0;
916 	}
917 	return -ENOTSUPP;
918 }
919 
920 static int spinand_id_detect(struct spinand_device *spinand)
921 {
922 	u8 *id = spinand->id.data;
923 	int ret;
924 
925 	ret = spinand_read_id_op(spinand, 0, 0, id);
926 	if (ret)
927 		return ret;
928 	ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
929 	if (!ret)
930 		return 0;
931 
932 	ret = spinand_read_id_op(spinand, 1, 0, id);
933 	if (ret)
934 		return ret;
935 	ret = spinand_manufacturer_match(spinand,
936 					 SPINAND_READID_METHOD_OPCODE_ADDR);
937 	if (!ret)
938 		return 0;
939 
940 	ret = spinand_read_id_op(spinand, 0, 1, id);
941 	if (ret)
942 		return ret;
943 	ret = spinand_manufacturer_match(spinand,
944 					 SPINAND_READID_METHOD_OPCODE_DUMMY);
945 
946 	return ret;
947 }
948 
949 static int spinand_manufacturer_init(struct spinand_device *spinand)
950 {
951 	if (spinand->manufacturer->ops->init)
952 		return spinand->manufacturer->ops->init(spinand);
953 
954 	return 0;
955 }
956 
957 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
958 {
959 	/* Release manufacturer private data */
960 	if (spinand->manufacturer->ops->cleanup)
961 		return spinand->manufacturer->ops->cleanup(spinand);
962 }
963 
964 static const struct spi_mem_op *
965 spinand_select_op_variant(struct spinand_device *spinand,
966 			  const struct spinand_op_variants *variants)
967 {
968 	struct nand_device *nand = spinand_to_nand(spinand);
969 	unsigned int i;
970 
971 	for (i = 0; i < variants->nops; i++) {
972 		struct spi_mem_op op = variants->ops[i];
973 		unsigned int nbytes;
974 		int ret;
975 
976 		nbytes = nanddev_per_page_oobsize(nand) +
977 			 nanddev_page_size(nand);
978 
979 		while (nbytes) {
980 			op.data.nbytes = nbytes;
981 			ret = spi_mem_adjust_op_size(spinand->spimem, &op);
982 			if (ret)
983 				break;
984 
985 			if (!spi_mem_supports_op(spinand->spimem, &op))
986 				break;
987 
988 			nbytes -= op.data.nbytes;
989 		}
990 
991 		if (!nbytes)
992 			return &variants->ops[i];
993 	}
994 
995 	return NULL;
996 }
997 
998 /**
999  * spinand_match_and_init() - Try to find a match between a device ID and an
1000  *			      entry in a spinand_info table
1001  * @spinand: SPI NAND object
1002  * @table: SPI NAND device description table
1003  * @table_size: size of the device description table
1004  * @rdid_method: read id method to match
1005  *
1006  * Match between a device ID retrieved through the READ_ID command and an
1007  * entry in the SPI NAND description table. If a match is found, the spinand
1008  * object will be initialized with information provided by the matching
1009  * spinand_info entry.
1010  *
1011  * Return: 0 on success, a negative error code otherwise.
1012  */
1013 int spinand_match_and_init(struct spinand_device *spinand,
1014 			   const struct spinand_info *table,
1015 			   unsigned int table_size,
1016 			   enum spinand_readid_method rdid_method)
1017 {
1018 	u8 *id = spinand->id.data;
1019 	struct nand_device *nand = spinand_to_nand(spinand);
1020 	unsigned int i;
1021 
1022 	for (i = 0; i < table_size; i++) {
1023 		const struct spinand_info *info = &table[i];
1024 		const struct spi_mem_op *op;
1025 
1026 		if (rdid_method != info->devid.method)
1027 			continue;
1028 
1029 		if (memcmp(id + 1, info->devid.id, info->devid.len))
1030 			continue;
1031 
1032 		nand->memorg = table[i].memorg;
1033 		nanddev_set_ecc_requirements(nand, &table[i].eccreq);
1034 		spinand->eccinfo = table[i].eccinfo;
1035 		spinand->flags = table[i].flags;
1036 		spinand->id.len = 1 + table[i].devid.len;
1037 		spinand->select_target = table[i].select_target;
1038 
1039 		op = spinand_select_op_variant(spinand,
1040 					       info->op_variants.read_cache);
1041 		if (!op)
1042 			return -ENOTSUPP;
1043 
1044 		spinand->op_templates.read_cache = op;
1045 
1046 		op = spinand_select_op_variant(spinand,
1047 					       info->op_variants.write_cache);
1048 		if (!op)
1049 			return -ENOTSUPP;
1050 
1051 		spinand->op_templates.write_cache = op;
1052 
1053 		op = spinand_select_op_variant(spinand,
1054 					       info->op_variants.update_cache);
1055 		spinand->op_templates.update_cache = op;
1056 
1057 		return 0;
1058 	}
1059 
1060 	return -ENOTSUPP;
1061 }
1062 
1063 static int spinand_detect(struct spinand_device *spinand)
1064 {
1065 	struct device *dev = &spinand->spimem->spi->dev;
1066 	struct nand_device *nand = spinand_to_nand(spinand);
1067 	int ret;
1068 
1069 	ret = spinand_reset_op(spinand);
1070 	if (ret)
1071 		return ret;
1072 
1073 	ret = spinand_id_detect(spinand);
1074 	if (ret) {
1075 		dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
1076 			spinand->id.data);
1077 		return ret;
1078 	}
1079 
1080 	if (nand->memorg.ntargets > 1 && !spinand->select_target) {
1081 		dev_err(dev,
1082 			"SPI NANDs with more than one die must implement ->select_target()\n");
1083 		return -EINVAL;
1084 	}
1085 
1086 	dev_info(&spinand->spimem->spi->dev,
1087 		 "%s SPI NAND was found.\n", spinand->manufacturer->name);
1088 	dev_info(&spinand->spimem->spi->dev,
1089 		 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
1090 		 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
1091 		 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
1092 
1093 	return 0;
1094 }
1095 
1096 static int spinand_init(struct spinand_device *spinand)
1097 {
1098 	struct device *dev = &spinand->spimem->spi->dev;
1099 	struct mtd_info *mtd = spinand_to_mtd(spinand);
1100 	struct nand_device *nand = mtd_to_nanddev(mtd);
1101 	int ret, i;
1102 
1103 	/*
1104 	 * We need a scratch buffer because the spi_mem interface requires that
1105 	 * buf passed in spi_mem_op->data.buf be DMA-able.
1106 	 */
1107 	spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1108 	if (!spinand->scratchbuf)
1109 		return -ENOMEM;
1110 
1111 	ret = spinand_detect(spinand);
1112 	if (ret)
1113 		goto err_free_bufs;
1114 
1115 	/*
1116 	 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1117 	 * may use this buffer for DMA access.
1118 	 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1119 	 */
1120 	spinand->databuf = kzalloc(nanddev_page_size(nand) +
1121 			       nanddev_per_page_oobsize(nand),
1122 			       GFP_KERNEL);
1123 	if (!spinand->databuf) {
1124 		ret = -ENOMEM;
1125 		goto err_free_bufs;
1126 	}
1127 
1128 	spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1129 
1130 	ret = spinand_init_cfg_cache(spinand);
1131 	if (ret)
1132 		goto err_free_bufs;
1133 
1134 	ret = spinand_init_quad_enable(spinand);
1135 	if (ret)
1136 		goto err_free_bufs;
1137 
1138 	ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1139 	if (ret)
1140 		goto err_free_bufs;
1141 
1142 	ret = spinand_manufacturer_init(spinand);
1143 	if (ret) {
1144 		dev_err(dev,
1145 			"Failed to initialize the SPI NAND chip (err = %d)\n",
1146 			ret);
1147 		goto err_free_bufs;
1148 	}
1149 
1150 	ret = spinand_create_dirmaps(spinand);
1151 	if (ret) {
1152 		dev_err(dev,
1153 			"Failed to create direct mappings for read/write operations (err = %d)\n",
1154 			ret);
1155 		goto err_manuf_cleanup;
1156 	}
1157 
1158 	/* After power up, all blocks are locked, so unlock them here. */
1159 	for (i = 0; i < nand->memorg.ntargets; i++) {
1160 		ret = spinand_select_target(spinand, i);
1161 		if (ret)
1162 			goto err_manuf_cleanup;
1163 
1164 		ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1165 		if (ret)
1166 			goto err_manuf_cleanup;
1167 	}
1168 
1169 	ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1170 	if (ret)
1171 		goto err_manuf_cleanup;
1172 
1173 	/* SPI-NAND default ECC engine is on-die */
1174 	nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
1175 	nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
1176 
1177 	spinand_ecc_enable(spinand, false);
1178 	ret = nanddev_ecc_engine_init(nand);
1179 	if (ret)
1180 		goto err_cleanup_nanddev;
1181 
1182 	mtd->_read_oob = spinand_mtd_read;
1183 	mtd->_write_oob = spinand_mtd_write;
1184 	mtd->_block_isbad = spinand_mtd_block_isbad;
1185 	mtd->_block_markbad = spinand_mtd_block_markbad;
1186 	mtd->_block_isreserved = spinand_mtd_block_isreserved;
1187 	mtd->_erase = spinand_mtd_erase;
1188 	mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
1189 
1190 	if (nand->ecc.engine) {
1191 		ret = mtd_ooblayout_count_freebytes(mtd);
1192 		if (ret < 0)
1193 			goto err_cleanup_ecc_engine;
1194 	}
1195 
1196 	mtd->oobavail = ret;
1197 
1198 	/* Propagate ECC information to mtd_info */
1199 	mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
1200 	mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
1201 
1202 	return 0;
1203 
1204 err_cleanup_ecc_engine:
1205 	nanddev_ecc_engine_cleanup(nand);
1206 
1207 err_cleanup_nanddev:
1208 	nanddev_cleanup(nand);
1209 
1210 err_manuf_cleanup:
1211 	spinand_manufacturer_cleanup(spinand);
1212 
1213 err_free_bufs:
1214 	kfree(spinand->databuf);
1215 	kfree(spinand->scratchbuf);
1216 	return ret;
1217 }
1218 
1219 static void spinand_cleanup(struct spinand_device *spinand)
1220 {
1221 	struct nand_device *nand = spinand_to_nand(spinand);
1222 
1223 	nanddev_cleanup(nand);
1224 	spinand_manufacturer_cleanup(spinand);
1225 	kfree(spinand->databuf);
1226 	kfree(spinand->scratchbuf);
1227 }
1228 
1229 static int spinand_probe(struct spi_mem *mem)
1230 {
1231 	struct spinand_device *spinand;
1232 	struct mtd_info *mtd;
1233 	int ret;
1234 
1235 	spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1236 			       GFP_KERNEL);
1237 	if (!spinand)
1238 		return -ENOMEM;
1239 
1240 	spinand->spimem = mem;
1241 	spi_mem_set_drvdata(mem, spinand);
1242 	spinand_set_of_node(spinand, mem->spi->dev.of_node);
1243 	mutex_init(&spinand->lock);
1244 	mtd = spinand_to_mtd(spinand);
1245 	mtd->dev.parent = &mem->spi->dev;
1246 
1247 	ret = spinand_init(spinand);
1248 	if (ret)
1249 		return ret;
1250 
1251 	ret = mtd_device_register(mtd, NULL, 0);
1252 	if (ret)
1253 		goto err_spinand_cleanup;
1254 
1255 	return 0;
1256 
1257 err_spinand_cleanup:
1258 	spinand_cleanup(spinand);
1259 
1260 	return ret;
1261 }
1262 
1263 static int spinand_remove(struct spi_mem *mem)
1264 {
1265 	struct spinand_device *spinand;
1266 	struct mtd_info *mtd;
1267 	int ret;
1268 
1269 	spinand = spi_mem_get_drvdata(mem);
1270 	mtd = spinand_to_mtd(spinand);
1271 
1272 	ret = mtd_device_unregister(mtd);
1273 	if (ret)
1274 		return ret;
1275 
1276 	spinand_cleanup(spinand);
1277 
1278 	return 0;
1279 }
1280 
1281 static const struct spi_device_id spinand_ids[] = {
1282 	{ .name = "spi-nand" },
1283 	{ /* sentinel */ },
1284 };
1285 MODULE_DEVICE_TABLE(spi, spinand_ids);
1286 
1287 #ifdef CONFIG_OF
1288 static const struct of_device_id spinand_of_ids[] = {
1289 	{ .compatible = "spi-nand" },
1290 	{ /* sentinel */ },
1291 };
1292 MODULE_DEVICE_TABLE(of, spinand_of_ids);
1293 #endif
1294 
1295 static struct spi_mem_driver spinand_drv = {
1296 	.spidrv = {
1297 		.id_table = spinand_ids,
1298 		.driver = {
1299 			.name = "spi-nand",
1300 			.of_match_table = of_match_ptr(spinand_of_ids),
1301 		},
1302 	},
1303 	.probe = spinand_probe,
1304 	.remove = spinand_remove,
1305 };
1306 module_spi_mem_driver(spinand_drv);
1307 
1308 MODULE_DESCRIPTION("SPI NAND framework");
1309 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1310 MODULE_LICENSE("GPL v2");
1311