xref: /openbmc/linux/drivers/mtd/nand/ecc.c (revision c2fc6b69)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Generic Error-Correcting Code (ECC) engine
4  *
5  * Copyright (C) 2019 Macronix
6  * Author:
7  *     Miquèl RAYNAL <miquel.raynal@bootlin.com>
8  *
9  *
10  * This file describes the abstraction of any NAND ECC engine. It has been
11  * designed to fit most cases, including parallel NANDs and SPI-NANDs.
12  *
13  * There are three main situations where instantiating this ECC engine makes
14  * sense:
15  *   - external: The ECC engine is outside the NAND pipeline, typically this
16  *               is a software ECC engine, or an hardware engine that is
17  *               outside the NAND controller pipeline.
18  *   - pipelined: The ECC engine is inside the NAND pipeline, ie. on the
19  *                controller's side. This is the case of most of the raw NAND
20  *                controllers. In the pipeline case, the ECC bytes are
21  *                generated/data corrected on the fly when a page is
22  *                written/read.
23  *   - ondie: The ECC engine is inside the NAND pipeline, on the chip's side.
24  *            Some NAND chips can correct themselves the data.
25  *
26  * Besides the initial setup and final cleanups, the interfaces are rather
27  * simple:
28  *   - prepare: Prepare an I/O request. Enable/disable the ECC engine based on
29  *              the I/O request type. In case of software correction or external
30  *              engine, this step may involve to derive the ECC bytes and place
31  *              them in the OOB area before a write.
32  *   - finish: Finish an I/O request. Correct the data in case of a read
33  *             request and report the number of corrected bits/uncorrectable
34  *             errors. Most likely empty for write operations, unless you have
35  *             hardware specific stuff to do, like shutting down the engine to
36  *             save power.
37  *
38  * The I/O request should be enclosed in a prepare()/finish() pair of calls
39  * and will behave differently depending on the requested I/O type:
40  *   - raw: Correction disabled
41  *   - ecc: Correction enabled
42  *
43  * The request direction is impacting the logic as well:
44  *   - read: Load data from the NAND chip
45  *   - write: Store data in the NAND chip
46  *
47  * Mixing all this combinations together gives the following behavior.
48  * Those are just examples, drivers are free to add custom steps in their
49  * prepare/finish hook.
50  *
51  * [external ECC engine]
52  *   - external + prepare + raw + read: do nothing
53  *   - external + finish  + raw + read: do nothing
54  *   - external + prepare + raw + write: do nothing
55  *   - external + finish  + raw + write: do nothing
56  *   - external + prepare + ecc + read: do nothing
57  *   - external + finish  + ecc + read: calculate expected ECC bytes, extract
58  *                                      ECC bytes from OOB buffer, correct
59  *                                      and report any bitflip/error
60  *   - external + prepare + ecc + write: calculate ECC bytes and store them at
61  *                                       the right place in the OOB buffer based
62  *                                       on the OOB layout
63  *   - external + finish  + ecc + write: do nothing
64  *
65  * [pipelined ECC engine]
66  *   - pipelined + prepare + raw + read: disable the controller's ECC engine if
67  *                                       activated
68  *   - pipelined + finish  + raw + read: do nothing
69  *   - pipelined + prepare + raw + write: disable the controller's ECC engine if
70  *                                        activated
71  *   - pipelined + finish  + raw + write: do nothing
72  *   - pipelined + prepare + ecc + read: enable the controller's ECC engine if
73  *                                       deactivated
74  *   - pipelined + finish  + ecc + read: check the status, report any
75  *                                       error/bitflip
76  *   - pipelined + prepare + ecc + write: enable the controller's ECC engine if
77  *                                        deactivated
78  *   - pipelined + finish  + ecc + write: do nothing
79  *
80  * [ondie ECC engine]
81  *   - ondie + prepare + raw + read: send commands to disable the on-chip ECC
82  *                                   engine if activated
83  *   - ondie + finish  + raw + read: do nothing
84  *   - ondie + prepare + raw + write: send commands to disable the on-chip ECC
85  *                                    engine if activated
86  *   - ondie + finish  + raw + write: do nothing
87  *   - ondie + prepare + ecc + read: send commands to enable the on-chip ECC
88  *                                   engine if deactivated
89  *   - ondie + finish  + ecc + read: send commands to check the status, report
90  *                                   any error/bitflip
91  *   - ondie + prepare + ecc + write: send commands to enable the on-chip ECC
92  *                                    engine if deactivated
93  *   - ondie + finish  + ecc + write: do nothing
94  */
95 
96 #include <linux/module.h>
97 #include <linux/mtd/nand.h>
98 #include <linux/platform_device.h>
99 #include <linux/slab.h>
100 #include <linux/of.h>
101 #include <linux/of_platform.h>
102 
103 static LIST_HEAD(on_host_hw_engines);
104 static DEFINE_MUTEX(on_host_hw_engines_mutex);
105 
106 /**
107  * nand_ecc_init_ctx - Init the ECC engine context
108  * @nand: the NAND device
109  *
110  * On success, the caller is responsible of calling @nand_ecc_cleanup_ctx().
111  */
nand_ecc_init_ctx(struct nand_device * nand)112 int nand_ecc_init_ctx(struct nand_device *nand)
113 {
114 	if (!nand->ecc.engine || !nand->ecc.engine->ops->init_ctx)
115 		return 0;
116 
117 	return nand->ecc.engine->ops->init_ctx(nand);
118 }
119 EXPORT_SYMBOL(nand_ecc_init_ctx);
120 
121 /**
122  * nand_ecc_cleanup_ctx - Cleanup the ECC engine context
123  * @nand: the NAND device
124  */
nand_ecc_cleanup_ctx(struct nand_device * nand)125 void nand_ecc_cleanup_ctx(struct nand_device *nand)
126 {
127 	if (nand->ecc.engine && nand->ecc.engine->ops->cleanup_ctx)
128 		nand->ecc.engine->ops->cleanup_ctx(nand);
129 }
130 EXPORT_SYMBOL(nand_ecc_cleanup_ctx);
131 
132 /**
133  * nand_ecc_prepare_io_req - Prepare an I/O request
134  * @nand: the NAND device
135  * @req: the I/O request
136  */
nand_ecc_prepare_io_req(struct nand_device * nand,struct nand_page_io_req * req)137 int nand_ecc_prepare_io_req(struct nand_device *nand,
138 			    struct nand_page_io_req *req)
139 {
140 	if (!nand->ecc.engine || !nand->ecc.engine->ops->prepare_io_req)
141 		return 0;
142 
143 	return nand->ecc.engine->ops->prepare_io_req(nand, req);
144 }
145 EXPORT_SYMBOL(nand_ecc_prepare_io_req);
146 
147 /**
148  * nand_ecc_finish_io_req - Finish an I/O request
149  * @nand: the NAND device
150  * @req: the I/O request
151  */
nand_ecc_finish_io_req(struct nand_device * nand,struct nand_page_io_req * req)152 int nand_ecc_finish_io_req(struct nand_device *nand,
153 			   struct nand_page_io_req *req)
154 {
155 	if (!nand->ecc.engine || !nand->ecc.engine->ops->finish_io_req)
156 		return 0;
157 
158 	return nand->ecc.engine->ops->finish_io_req(nand, req);
159 }
160 EXPORT_SYMBOL(nand_ecc_finish_io_req);
161 
162 /* Define default OOB placement schemes for large and small page devices */
nand_ooblayout_ecc_sp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)163 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
164 				 struct mtd_oob_region *oobregion)
165 {
166 	struct nand_device *nand = mtd_to_nanddev(mtd);
167 	unsigned int total_ecc_bytes = nand->ecc.ctx.total;
168 
169 	if (section > 1)
170 		return -ERANGE;
171 
172 	if (!section) {
173 		oobregion->offset = 0;
174 		if (mtd->oobsize == 16)
175 			oobregion->length = 4;
176 		else
177 			oobregion->length = 3;
178 	} else {
179 		if (mtd->oobsize == 8)
180 			return -ERANGE;
181 
182 		oobregion->offset = 6;
183 		oobregion->length = total_ecc_bytes - 4;
184 	}
185 
186 	return 0;
187 }
188 
nand_ooblayout_free_sp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)189 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
190 				  struct mtd_oob_region *oobregion)
191 {
192 	if (section > 1)
193 		return -ERANGE;
194 
195 	if (mtd->oobsize == 16) {
196 		if (section)
197 			return -ERANGE;
198 
199 		oobregion->length = 8;
200 		oobregion->offset = 8;
201 	} else {
202 		oobregion->length = 2;
203 		if (!section)
204 			oobregion->offset = 3;
205 		else
206 			oobregion->offset = 6;
207 	}
208 
209 	return 0;
210 }
211 
212 static const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
213 	.ecc = nand_ooblayout_ecc_sp,
214 	.free = nand_ooblayout_free_sp,
215 };
216 
nand_get_small_page_ooblayout(void)217 const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void)
218 {
219 	return &nand_ooblayout_sp_ops;
220 }
221 EXPORT_SYMBOL_GPL(nand_get_small_page_ooblayout);
222 
nand_ooblayout_ecc_lp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)223 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
224 				 struct mtd_oob_region *oobregion)
225 {
226 	struct nand_device *nand = mtd_to_nanddev(mtd);
227 	unsigned int total_ecc_bytes = nand->ecc.ctx.total;
228 
229 	if (section || !total_ecc_bytes)
230 		return -ERANGE;
231 
232 	oobregion->length = total_ecc_bytes;
233 	oobregion->offset = mtd->oobsize - oobregion->length;
234 
235 	return 0;
236 }
237 
nand_ooblayout_free_lp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)238 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
239 				  struct mtd_oob_region *oobregion)
240 {
241 	struct nand_device *nand = mtd_to_nanddev(mtd);
242 	unsigned int total_ecc_bytes = nand->ecc.ctx.total;
243 
244 	if (section)
245 		return -ERANGE;
246 
247 	oobregion->length = mtd->oobsize - total_ecc_bytes - 2;
248 	oobregion->offset = 2;
249 
250 	return 0;
251 }
252 
253 static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
254 	.ecc = nand_ooblayout_ecc_lp,
255 	.free = nand_ooblayout_free_lp,
256 };
257 
nand_get_large_page_ooblayout(void)258 const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void)
259 {
260 	return &nand_ooblayout_lp_ops;
261 }
262 EXPORT_SYMBOL_GPL(nand_get_large_page_ooblayout);
263 
264 /*
265  * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
266  * are placed at a fixed offset.
267  */
nand_ooblayout_ecc_lp_hamming(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)268 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
269 					 struct mtd_oob_region *oobregion)
270 {
271 	struct nand_device *nand = mtd_to_nanddev(mtd);
272 	unsigned int total_ecc_bytes = nand->ecc.ctx.total;
273 
274 	if (section)
275 		return -ERANGE;
276 
277 	switch (mtd->oobsize) {
278 	case 64:
279 		oobregion->offset = 40;
280 		break;
281 	case 128:
282 		oobregion->offset = 80;
283 		break;
284 	default:
285 		return -EINVAL;
286 	}
287 
288 	oobregion->length = total_ecc_bytes;
289 	if (oobregion->offset + oobregion->length > mtd->oobsize)
290 		return -ERANGE;
291 
292 	return 0;
293 }
294 
nand_ooblayout_free_lp_hamming(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)295 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
296 					  struct mtd_oob_region *oobregion)
297 {
298 	struct nand_device *nand = mtd_to_nanddev(mtd);
299 	unsigned int total_ecc_bytes = nand->ecc.ctx.total;
300 	int ecc_offset = 0;
301 
302 	if (section < 0 || section > 1)
303 		return -ERANGE;
304 
305 	switch (mtd->oobsize) {
306 	case 64:
307 		ecc_offset = 40;
308 		break;
309 	case 128:
310 		ecc_offset = 80;
311 		break;
312 	default:
313 		return -EINVAL;
314 	}
315 
316 	if (section == 0) {
317 		oobregion->offset = 2;
318 		oobregion->length = ecc_offset - 2;
319 	} else {
320 		oobregion->offset = ecc_offset + total_ecc_bytes;
321 		oobregion->length = mtd->oobsize - oobregion->offset;
322 	}
323 
324 	return 0;
325 }
326 
327 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
328 	.ecc = nand_ooblayout_ecc_lp_hamming,
329 	.free = nand_ooblayout_free_lp_hamming,
330 };
331 
nand_get_large_page_hamming_ooblayout(void)332 const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void)
333 {
334 	return &nand_ooblayout_lp_hamming_ops;
335 }
336 EXPORT_SYMBOL_GPL(nand_get_large_page_hamming_ooblayout);
337 
338 static enum nand_ecc_engine_type
of_get_nand_ecc_engine_type(struct device_node * np)339 of_get_nand_ecc_engine_type(struct device_node *np)
340 {
341 	struct device_node *eng_np;
342 
343 	if (of_property_read_bool(np, "nand-no-ecc-engine"))
344 		return NAND_ECC_ENGINE_TYPE_NONE;
345 
346 	if (of_property_read_bool(np, "nand-use-soft-ecc-engine"))
347 		return NAND_ECC_ENGINE_TYPE_SOFT;
348 
349 	eng_np = of_parse_phandle(np, "nand-ecc-engine", 0);
350 	of_node_put(eng_np);
351 
352 	if (eng_np) {
353 		if (eng_np == np)
354 			return NAND_ECC_ENGINE_TYPE_ON_DIE;
355 		else
356 			return NAND_ECC_ENGINE_TYPE_ON_HOST;
357 	}
358 
359 	return NAND_ECC_ENGINE_TYPE_INVALID;
360 }
361 
362 static const char * const nand_ecc_placement[] = {
363 	[NAND_ECC_PLACEMENT_OOB] = "oob",
364 	[NAND_ECC_PLACEMENT_INTERLEAVED] = "interleaved",
365 };
366 
of_get_nand_ecc_placement(struct device_node * np)367 static enum nand_ecc_placement of_get_nand_ecc_placement(struct device_node *np)
368 {
369 	enum nand_ecc_placement placement;
370 	const char *pm;
371 	int err;
372 
373 	err = of_property_read_string(np, "nand-ecc-placement", &pm);
374 	if (!err) {
375 		for (placement = NAND_ECC_PLACEMENT_OOB;
376 		     placement < ARRAY_SIZE(nand_ecc_placement); placement++) {
377 			if (!strcasecmp(pm, nand_ecc_placement[placement]))
378 				return placement;
379 		}
380 	}
381 
382 	return NAND_ECC_PLACEMENT_UNKNOWN;
383 }
384 
385 static const char * const nand_ecc_algos[] = {
386 	[NAND_ECC_ALGO_HAMMING] = "hamming",
387 	[NAND_ECC_ALGO_BCH] = "bch",
388 	[NAND_ECC_ALGO_RS] = "rs",
389 };
390 
of_get_nand_ecc_algo(struct device_node * np)391 static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np)
392 {
393 	enum nand_ecc_algo ecc_algo;
394 	const char *pm;
395 	int err;
396 
397 	err = of_property_read_string(np, "nand-ecc-algo", &pm);
398 	if (!err) {
399 		for (ecc_algo = NAND_ECC_ALGO_HAMMING;
400 		     ecc_algo < ARRAY_SIZE(nand_ecc_algos);
401 		     ecc_algo++) {
402 			if (!strcasecmp(pm, nand_ecc_algos[ecc_algo]))
403 				return ecc_algo;
404 		}
405 	}
406 
407 	return NAND_ECC_ALGO_UNKNOWN;
408 }
409 
of_get_nand_ecc_step_size(struct device_node * np)410 static int of_get_nand_ecc_step_size(struct device_node *np)
411 {
412 	int ret;
413 	u32 val;
414 
415 	ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
416 	return ret ? ret : val;
417 }
418 
of_get_nand_ecc_strength(struct device_node * np)419 static int of_get_nand_ecc_strength(struct device_node *np)
420 {
421 	int ret;
422 	u32 val;
423 
424 	ret = of_property_read_u32(np, "nand-ecc-strength", &val);
425 	return ret ? ret : val;
426 }
427 
of_get_nand_ecc_user_config(struct nand_device * nand)428 void of_get_nand_ecc_user_config(struct nand_device *nand)
429 {
430 	struct device_node *dn = nanddev_get_of_node(nand);
431 	int strength, size;
432 
433 	nand->ecc.user_conf.engine_type = of_get_nand_ecc_engine_type(dn);
434 	nand->ecc.user_conf.algo = of_get_nand_ecc_algo(dn);
435 	nand->ecc.user_conf.placement = of_get_nand_ecc_placement(dn);
436 
437 	strength = of_get_nand_ecc_strength(dn);
438 	if (strength >= 0)
439 		nand->ecc.user_conf.strength = strength;
440 
441 	size = of_get_nand_ecc_step_size(dn);
442 	if (size >= 0)
443 		nand->ecc.user_conf.step_size = size;
444 
445 	if (of_property_read_bool(dn, "nand-ecc-maximize"))
446 		nand->ecc.user_conf.flags |= NAND_ECC_MAXIMIZE_STRENGTH;
447 }
448 EXPORT_SYMBOL(of_get_nand_ecc_user_config);
449 
450 /**
451  * nand_ecc_is_strong_enough - Check if the chip configuration meets the
452  *                             datasheet requirements.
453  *
454  * @nand: Device to check
455  *
456  * If our configuration corrects A bits per B bytes and the minimum
457  * required correction level is X bits per Y bytes, then we must ensure
458  * both of the following are true:
459  *
460  * (1) A / B >= X / Y
461  * (2) A >= X
462  *
463  * Requirement (1) ensures we can correct for the required bitflip density.
464  * Requirement (2) ensures we can correct even when all bitflips are clumped
465  * in the same sector.
466  */
nand_ecc_is_strong_enough(struct nand_device * nand)467 bool nand_ecc_is_strong_enough(struct nand_device *nand)
468 {
469 	const struct nand_ecc_props *reqs = nanddev_get_ecc_requirements(nand);
470 	const struct nand_ecc_props *conf = nanddev_get_ecc_conf(nand);
471 	struct mtd_info *mtd = nanddev_to_mtd(nand);
472 	int corr, ds_corr;
473 
474 	if (conf->step_size == 0 || reqs->step_size == 0)
475 		/* Not enough information */
476 		return true;
477 
478 	/*
479 	 * We get the number of corrected bits per page to compare
480 	 * the correction density.
481 	 */
482 	corr = (mtd->writesize * conf->strength) / conf->step_size;
483 	ds_corr = (mtd->writesize * reqs->strength) / reqs->step_size;
484 
485 	return corr >= ds_corr && conf->strength >= reqs->strength;
486 }
487 EXPORT_SYMBOL(nand_ecc_is_strong_enough);
488 
489 /* ECC engine driver internal helpers */
nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx * ctx,struct nand_device * nand)490 int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
491 			       struct nand_device *nand)
492 {
493 	unsigned int total_buffer_size;
494 
495 	ctx->nand = nand;
496 
497 	/* Let the user decide the exact length of each buffer */
498 	if (!ctx->page_buffer_size)
499 		ctx->page_buffer_size = nanddev_page_size(nand);
500 	if (!ctx->oob_buffer_size)
501 		ctx->oob_buffer_size = nanddev_per_page_oobsize(nand);
502 
503 	total_buffer_size = ctx->page_buffer_size + ctx->oob_buffer_size;
504 
505 	ctx->spare_databuf = kzalloc(total_buffer_size, GFP_KERNEL);
506 	if (!ctx->spare_databuf)
507 		return -ENOMEM;
508 
509 	ctx->spare_oobbuf = ctx->spare_databuf + ctx->page_buffer_size;
510 
511 	return 0;
512 }
513 EXPORT_SYMBOL_GPL(nand_ecc_init_req_tweaking);
514 
nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx * ctx)515 void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx)
516 {
517 	kfree(ctx->spare_databuf);
518 }
519 EXPORT_SYMBOL_GPL(nand_ecc_cleanup_req_tweaking);
520 
521 /*
522  * Ensure data and OOB area is fully read/written otherwise the correction might
523  * not work as expected.
524  */
nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx * ctx,struct nand_page_io_req * req)525 void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
526 			struct nand_page_io_req *req)
527 {
528 	struct nand_device *nand = ctx->nand;
529 	struct nand_page_io_req *orig, *tweak;
530 
531 	/* Save the original request */
532 	ctx->orig_req = *req;
533 	ctx->bounce_data = false;
534 	ctx->bounce_oob = false;
535 	orig = &ctx->orig_req;
536 	tweak = req;
537 
538 	/* Ensure the request covers the entire page */
539 	if (orig->datalen < nanddev_page_size(nand)) {
540 		ctx->bounce_data = true;
541 		tweak->dataoffs = 0;
542 		tweak->datalen = nanddev_page_size(nand);
543 		tweak->databuf.in = ctx->spare_databuf;
544 		memset(tweak->databuf.in, 0xFF, ctx->page_buffer_size);
545 	}
546 
547 	if (orig->ooblen < nanddev_per_page_oobsize(nand)) {
548 		ctx->bounce_oob = true;
549 		tweak->ooboffs = 0;
550 		tweak->ooblen = nanddev_per_page_oobsize(nand);
551 		tweak->oobbuf.in = ctx->spare_oobbuf;
552 		memset(tweak->oobbuf.in, 0xFF, ctx->oob_buffer_size);
553 	}
554 
555 	/* Copy the data that must be writen in the bounce buffers, if needed */
556 	if (orig->type == NAND_PAGE_WRITE) {
557 		if (ctx->bounce_data)
558 			memcpy((void *)tweak->databuf.out + orig->dataoffs,
559 			       orig->databuf.out, orig->datalen);
560 
561 		if (ctx->bounce_oob)
562 			memcpy((void *)tweak->oobbuf.out + orig->ooboffs,
563 			       orig->oobbuf.out, orig->ooblen);
564 	}
565 }
566 EXPORT_SYMBOL_GPL(nand_ecc_tweak_req);
567 
nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx * ctx,struct nand_page_io_req * req)568 void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
569 			  struct nand_page_io_req *req)
570 {
571 	struct nand_page_io_req *orig, *tweak;
572 
573 	orig = &ctx->orig_req;
574 	tweak = req;
575 
576 	/* Restore the data read from the bounce buffers, if needed */
577 	if (orig->type == NAND_PAGE_READ) {
578 		if (ctx->bounce_data)
579 			memcpy(orig->databuf.in,
580 			       tweak->databuf.in + orig->dataoffs,
581 			       orig->datalen);
582 
583 		if (ctx->bounce_oob)
584 			memcpy(orig->oobbuf.in,
585 			       tweak->oobbuf.in + orig->ooboffs,
586 			       orig->ooblen);
587 	}
588 
589 	/* Ensure the original request is restored */
590 	*req = *orig;
591 }
592 EXPORT_SYMBOL_GPL(nand_ecc_restore_req);
593 
nand_ecc_get_sw_engine(struct nand_device * nand)594 struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand)
595 {
596 	unsigned int algo = nand->ecc.user_conf.algo;
597 
598 	if (algo == NAND_ECC_ALGO_UNKNOWN)
599 		algo = nand->ecc.defaults.algo;
600 
601 	switch (algo) {
602 	case NAND_ECC_ALGO_HAMMING:
603 		return nand_ecc_sw_hamming_get_engine();
604 	case NAND_ECC_ALGO_BCH:
605 		return nand_ecc_sw_bch_get_engine();
606 	default:
607 		break;
608 	}
609 
610 	return NULL;
611 }
612 EXPORT_SYMBOL(nand_ecc_get_sw_engine);
613 
nand_ecc_get_on_die_hw_engine(struct nand_device * nand)614 struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand)
615 {
616 	return nand->ecc.ondie_engine;
617 }
618 EXPORT_SYMBOL(nand_ecc_get_on_die_hw_engine);
619 
nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine * engine)620 int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine)
621 {
622 	struct nand_ecc_engine *item;
623 
624 	if (!engine)
625 		return -EINVAL;
626 
627 	/* Prevent multiple registrations of one engine */
628 	list_for_each_entry(item, &on_host_hw_engines, node)
629 		if (item == engine)
630 			return 0;
631 
632 	mutex_lock(&on_host_hw_engines_mutex);
633 	list_add_tail(&engine->node, &on_host_hw_engines);
634 	mutex_unlock(&on_host_hw_engines_mutex);
635 
636 	return 0;
637 }
638 EXPORT_SYMBOL(nand_ecc_register_on_host_hw_engine);
639 
nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine * engine)640 int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine)
641 {
642 	if (!engine)
643 		return -EINVAL;
644 
645 	mutex_lock(&on_host_hw_engines_mutex);
646 	list_del(&engine->node);
647 	mutex_unlock(&on_host_hw_engines_mutex);
648 
649 	return 0;
650 }
651 EXPORT_SYMBOL(nand_ecc_unregister_on_host_hw_engine);
652 
nand_ecc_match_on_host_hw_engine(struct device * dev)653 static struct nand_ecc_engine *nand_ecc_match_on_host_hw_engine(struct device *dev)
654 {
655 	struct nand_ecc_engine *item;
656 
657 	list_for_each_entry(item, &on_host_hw_engines, node)
658 		if (item->dev == dev)
659 			return item;
660 
661 	return NULL;
662 }
663 
nand_ecc_get_on_host_hw_engine(struct nand_device * nand)664 struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand)
665 {
666 	struct nand_ecc_engine *engine = NULL;
667 	struct device *dev = &nand->mtd.dev;
668 	struct platform_device *pdev;
669 	struct device_node *np;
670 
671 	if (list_empty(&on_host_hw_engines))
672 		return NULL;
673 
674 	/* Check for an explicit nand-ecc-engine property */
675 	np = of_parse_phandle(dev->of_node, "nand-ecc-engine", 0);
676 	if (np) {
677 		pdev = of_find_device_by_node(np);
678 		if (!pdev)
679 			return ERR_PTR(-EPROBE_DEFER);
680 
681 		engine = nand_ecc_match_on_host_hw_engine(&pdev->dev);
682 		platform_device_put(pdev);
683 		of_node_put(np);
684 
685 		if (!engine)
686 			return ERR_PTR(-EPROBE_DEFER);
687 	}
688 
689 	if (engine)
690 		get_device(engine->dev);
691 
692 	return engine;
693 }
694 EXPORT_SYMBOL(nand_ecc_get_on_host_hw_engine);
695 
nand_ecc_put_on_host_hw_engine(struct nand_device * nand)696 void nand_ecc_put_on_host_hw_engine(struct nand_device *nand)
697 {
698 	put_device(nand->ecc.engine->dev);
699 }
700 EXPORT_SYMBOL(nand_ecc_put_on_host_hw_engine);
701 
702 /*
703  * In the case of a pipelined engine, the device registering the ECC
704  * engine is not necessarily the ECC engine itself but may be a host controller.
705  * It is then useful to provide a helper to retrieve the right device object
706  * which actually represents the ECC engine.
707  */
nand_ecc_get_engine_dev(struct device * host)708 struct device *nand_ecc_get_engine_dev(struct device *host)
709 {
710 	struct platform_device *ecc_pdev;
711 	struct device_node *np;
712 
713 	/*
714 	 * If the device node contains this property, it means we need to follow
715 	 * it in order to get the right ECC engine device we are looking for.
716 	 */
717 	np = of_parse_phandle(host->of_node, "nand-ecc-engine", 0);
718 	if (!np)
719 		return host;
720 
721 	ecc_pdev = of_find_device_by_node(np);
722 	if (!ecc_pdev) {
723 		of_node_put(np);
724 		return NULL;
725 	}
726 
727 	platform_device_put(ecc_pdev);
728 	of_node_put(np);
729 
730 	return &ecc_pdev->dev;
731 }
732 
733 MODULE_LICENSE("GPL");
734 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
735 MODULE_DESCRIPTION("Generic ECC engine");
736