1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Support for Macronix external hardware ECC engine for NAND devices, also
4 * called DPE for Data Processing Engine.
5 *
6 * Copyright © 2019 Macronix
7 * Author: Miquel Raynal <miquel.raynal@bootlin.com>
8 */
9
10 #include <linux/dma-mapping.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/iopoll.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/mtd/nand.h>
19 #include <linux/mtd/nand-ecc-mxic.h>
20 #include <linux/mutex.h>
21 #include <linux/of.h>
22 #include <linux/of_platform.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25
26 /* DPE Configuration */
27 #define DP_CONFIG 0x00
28 #define ECC_EN BIT(0)
29 #define ECC_TYP(idx) (((idx) << 3) & GENMASK(6, 3))
30 /* DPE Interrupt Status */
31 #define INTRPT_STS 0x04
32 #define TRANS_CMPLT BIT(0)
33 #define SDMA_MAIN BIT(1)
34 #define SDMA_SPARE BIT(2)
35 #define ECC_ERR BIT(3)
36 #define TO_SPARE BIT(4)
37 #define TO_MAIN BIT(5)
38 /* DPE Interrupt Status Enable */
39 #define INTRPT_STS_EN 0x08
40 /* DPE Interrupt Signal Enable */
41 #define INTRPT_SIG_EN 0x0C
42 /* Host Controller Configuration */
43 #define HC_CONFIG 0x10
44 #define DEV2MEM 0 /* TRANS_TYP_DMA in the spec */
45 #define MEM2MEM BIT(4) /* TRANS_TYP_IO in the spec */
46 #define MAPPING BIT(5) /* TRANS_TYP_MAPPING in the spec */
47 #define ECC_PACKED 0 /* LAYOUT_TYP_INTEGRATED in the spec */
48 #define ECC_INTERLEAVED BIT(2) /* LAYOUT_TYP_DISTRIBUTED in the spec */
49 #define BURST_TYP_FIXED 0
50 #define BURST_TYP_INCREASING BIT(0)
51 /* Host Controller Slave Address */
52 #define HC_SLV_ADDR 0x14
53 /* ECC Chunk Size */
54 #define CHUNK_SIZE 0x20
55 /* Main Data Size */
56 #define MAIN_SIZE 0x24
57 /* Spare Data Size */
58 #define SPARE_SIZE 0x28
59 #define META_SZ(reg) ((reg) & GENMASK(7, 0))
60 #define PARITY_SZ(reg) (((reg) & GENMASK(15, 8)) >> 8)
61 #define RSV_SZ(reg) (((reg) & GENMASK(23, 16)) >> 16)
62 #define SPARE_SZ(reg) ((reg) >> 24)
63 /* ECC Chunk Count */
64 #define CHUNK_CNT 0x30
65 /* SDMA Control */
66 #define SDMA_CTRL 0x40
67 #define WRITE_NAND 0
68 #define READ_NAND BIT(1)
69 #define CONT_NAND BIT(29)
70 #define CONT_SYSM BIT(30) /* Continue System Memory? */
71 #define SDMA_STRT BIT(31)
72 /* SDMA Address of Main Data */
73 #define SDMA_MAIN_ADDR 0x44
74 /* SDMA Address of Spare Data */
75 #define SDMA_SPARE_ADDR 0x48
76 /* DPE Version Number */
77 #define DP_VER 0xD0
78 #define DP_VER_OFFSET 16
79
80 /* Status bytes between each chunk of spare data */
81 #define STAT_BYTES 4
82 #define NO_ERR 0x00
83 #define MAX_CORR_ERR 0x28
84 #define UNCORR_ERR 0xFE
85 #define ERASED_CHUNK 0xFF
86
87 struct mxic_ecc_engine {
88 struct device *dev;
89 void __iomem *regs;
90 int irq;
91 struct completion complete;
92 struct nand_ecc_engine external_engine;
93 struct nand_ecc_engine pipelined_engine;
94 struct mutex lock;
95 };
96
97 struct mxic_ecc_ctx {
98 /* ECC machinery */
99 unsigned int data_step_sz;
100 unsigned int oob_step_sz;
101 unsigned int parity_sz;
102 unsigned int meta_sz;
103 u8 *status;
104 int steps;
105
106 /* DMA boilerplate */
107 struct nand_ecc_req_tweak_ctx req_ctx;
108 u8 *oobwithstat;
109 struct scatterlist sg[2];
110 struct nand_page_io_req *req;
111 unsigned int pageoffs;
112 };
113
ext_ecc_eng_to_mxic(struct nand_ecc_engine * eng)114 static struct mxic_ecc_engine *ext_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
115 {
116 return container_of(eng, struct mxic_ecc_engine, external_engine);
117 }
118
pip_ecc_eng_to_mxic(struct nand_ecc_engine * eng)119 static struct mxic_ecc_engine *pip_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
120 {
121 return container_of(eng, struct mxic_ecc_engine, pipelined_engine);
122 }
123
nand_to_mxic(struct nand_device * nand)124 static struct mxic_ecc_engine *nand_to_mxic(struct nand_device *nand)
125 {
126 struct nand_ecc_engine *eng = nand->ecc.engine;
127
128 if (eng->integration == NAND_ECC_ENGINE_INTEGRATION_EXTERNAL)
129 return ext_ecc_eng_to_mxic(eng);
130 else
131 return pip_ecc_eng_to_mxic(eng);
132 }
133
mxic_ecc_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)134 static int mxic_ecc_ooblayout_ecc(struct mtd_info *mtd, int section,
135 struct mtd_oob_region *oobregion)
136 {
137 struct nand_device *nand = mtd_to_nanddev(mtd);
138 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
139
140 if (section < 0 || section >= ctx->steps)
141 return -ERANGE;
142
143 oobregion->offset = (section * ctx->oob_step_sz) + ctx->meta_sz;
144 oobregion->length = ctx->parity_sz;
145
146 return 0;
147 }
148
mxic_ecc_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)149 static int mxic_ecc_ooblayout_free(struct mtd_info *mtd, int section,
150 struct mtd_oob_region *oobregion)
151 {
152 struct nand_device *nand = mtd_to_nanddev(mtd);
153 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
154
155 if (section < 0 || section >= ctx->steps)
156 return -ERANGE;
157
158 if (!section) {
159 oobregion->offset = 2;
160 oobregion->length = ctx->meta_sz - 2;
161 } else {
162 oobregion->offset = section * ctx->oob_step_sz;
163 oobregion->length = ctx->meta_sz;
164 }
165
166 return 0;
167 }
168
169 static const struct mtd_ooblayout_ops mxic_ecc_ooblayout_ops = {
170 .ecc = mxic_ecc_ooblayout_ecc,
171 .free = mxic_ecc_ooblayout_free,
172 };
173
mxic_ecc_disable_engine(struct mxic_ecc_engine * mxic)174 static void mxic_ecc_disable_engine(struct mxic_ecc_engine *mxic)
175 {
176 u32 reg;
177
178 reg = readl(mxic->regs + DP_CONFIG);
179 reg &= ~ECC_EN;
180 writel(reg, mxic->regs + DP_CONFIG);
181 }
182
mxic_ecc_enable_engine(struct mxic_ecc_engine * mxic)183 static void mxic_ecc_enable_engine(struct mxic_ecc_engine *mxic)
184 {
185 u32 reg;
186
187 reg = readl(mxic->regs + DP_CONFIG);
188 reg |= ECC_EN;
189 writel(reg, mxic->regs + DP_CONFIG);
190 }
191
mxic_ecc_disable_int(struct mxic_ecc_engine * mxic)192 static void mxic_ecc_disable_int(struct mxic_ecc_engine *mxic)
193 {
194 writel(0, mxic->regs + INTRPT_SIG_EN);
195 }
196
mxic_ecc_enable_int(struct mxic_ecc_engine * mxic)197 static void mxic_ecc_enable_int(struct mxic_ecc_engine *mxic)
198 {
199 writel(TRANS_CMPLT, mxic->regs + INTRPT_SIG_EN);
200 }
201
mxic_ecc_isr(int irq,void * dev_id)202 static irqreturn_t mxic_ecc_isr(int irq, void *dev_id)
203 {
204 struct mxic_ecc_engine *mxic = dev_id;
205 u32 sts;
206
207 sts = readl(mxic->regs + INTRPT_STS);
208 if (!sts)
209 return IRQ_NONE;
210
211 if (sts & TRANS_CMPLT)
212 complete(&mxic->complete);
213
214 writel(sts, mxic->regs + INTRPT_STS);
215
216 return IRQ_HANDLED;
217 }
218
mxic_ecc_init_ctx(struct nand_device * nand,struct device * dev)219 static int mxic_ecc_init_ctx(struct nand_device *nand, struct device *dev)
220 {
221 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
222 struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
223 struct nand_ecc_props *reqs = &nand->ecc.requirements;
224 struct nand_ecc_props *user = &nand->ecc.user_conf;
225 struct mtd_info *mtd = nanddev_to_mtd(nand);
226 int step_size = 0, strength = 0, desired_correction = 0, steps, idx;
227 static const int possible_strength[] = {4, 8, 40, 48};
228 static const int spare_size[] = {32, 32, 96, 96};
229 struct mxic_ecc_ctx *ctx;
230 u32 spare_reg;
231 int ret;
232
233 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
234 if (!ctx)
235 return -ENOMEM;
236
237 nand->ecc.ctx.priv = ctx;
238
239 /* Only large page NAND chips may use BCH */
240 if (mtd->oobsize < 64) {
241 pr_err("BCH cannot be used with small page NAND chips\n");
242 return -EINVAL;
243 }
244
245 mtd_set_ooblayout(mtd, &mxic_ecc_ooblayout_ops);
246
247 /* Enable all status bits */
248 writel(TRANS_CMPLT | SDMA_MAIN | SDMA_SPARE | ECC_ERR |
249 TO_SPARE | TO_MAIN, mxic->regs + INTRPT_STS_EN);
250
251 /* Configure the correction depending on the NAND device topology */
252 if (user->step_size && user->strength) {
253 step_size = user->step_size;
254 strength = user->strength;
255 } else if (reqs->step_size && reqs->strength) {
256 step_size = reqs->step_size;
257 strength = reqs->strength;
258 }
259
260 if (step_size && strength) {
261 steps = mtd->writesize / step_size;
262 desired_correction = steps * strength;
263 }
264
265 /* Step size is fixed to 1kiB, strength may vary (4 possible values) */
266 conf->step_size = SZ_1K;
267 steps = mtd->writesize / conf->step_size;
268
269 ctx->status = devm_kzalloc(dev, steps * sizeof(u8), GFP_KERNEL);
270 if (!ctx->status)
271 return -ENOMEM;
272
273 if (desired_correction) {
274 strength = desired_correction / steps;
275
276 for (idx = 0; idx < ARRAY_SIZE(possible_strength); idx++)
277 if (possible_strength[idx] >= strength)
278 break;
279
280 idx = min_t(unsigned int, idx,
281 ARRAY_SIZE(possible_strength) - 1);
282 } else {
283 /* Missing data, maximize the correction */
284 idx = ARRAY_SIZE(possible_strength) - 1;
285 }
286
287 /* Tune the selected strength until it fits in the OOB area */
288 for (; idx >= 0; idx--) {
289 if (spare_size[idx] * steps <= mtd->oobsize)
290 break;
291 }
292
293 /* This engine cannot be used with this NAND device */
294 if (idx < 0)
295 return -EINVAL;
296
297 /* Configure the engine for the desired strength */
298 writel(ECC_TYP(idx), mxic->regs + DP_CONFIG);
299 conf->strength = possible_strength[idx];
300 spare_reg = readl(mxic->regs + SPARE_SIZE);
301
302 ctx->steps = steps;
303 ctx->data_step_sz = mtd->writesize / steps;
304 ctx->oob_step_sz = mtd->oobsize / steps;
305 ctx->parity_sz = PARITY_SZ(spare_reg);
306 ctx->meta_sz = META_SZ(spare_reg);
307
308 /* Ensure buffers will contain enough bytes to store the STAT_BYTES */
309 ctx->req_ctx.oob_buffer_size = nanddev_per_page_oobsize(nand) +
310 (ctx->steps * STAT_BYTES);
311 ret = nand_ecc_init_req_tweaking(&ctx->req_ctx, nand);
312 if (ret)
313 return ret;
314
315 ctx->oobwithstat = kmalloc(mtd->oobsize + (ctx->steps * STAT_BYTES),
316 GFP_KERNEL);
317 if (!ctx->oobwithstat) {
318 ret = -ENOMEM;
319 goto cleanup_req_tweak;
320 }
321
322 sg_init_table(ctx->sg, 2);
323
324 /* Configuration dump and sanity checks */
325 dev_err(dev, "DPE version number: %d\n",
326 readl(mxic->regs + DP_VER) >> DP_VER_OFFSET);
327 dev_err(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE));
328 dev_err(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE));
329 dev_err(dev, "Spare size: %d\n", SPARE_SZ(spare_reg));
330 dev_err(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg));
331 dev_err(dev, "Parity size: %d\n", ctx->parity_sz);
332 dev_err(dev, "Meta size: %d\n", ctx->meta_sz);
333
334 if ((ctx->meta_sz + ctx->parity_sz + RSV_SZ(spare_reg)) !=
335 SPARE_SZ(spare_reg)) {
336 dev_err(dev, "Wrong OOB configuration: %d + %d + %ld != %d\n",
337 ctx->meta_sz, ctx->parity_sz, RSV_SZ(spare_reg),
338 SPARE_SZ(spare_reg));
339 ret = -EINVAL;
340 goto free_oobwithstat;
341 }
342
343 if (ctx->oob_step_sz != SPARE_SZ(spare_reg)) {
344 dev_err(dev, "Wrong OOB configuration: %d != %d\n",
345 ctx->oob_step_sz, SPARE_SZ(spare_reg));
346 ret = -EINVAL;
347 goto free_oobwithstat;
348 }
349
350 return 0;
351
352 free_oobwithstat:
353 kfree(ctx->oobwithstat);
354 cleanup_req_tweak:
355 nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
356
357 return ret;
358 }
359
mxic_ecc_init_ctx_external(struct nand_device * nand)360 static int mxic_ecc_init_ctx_external(struct nand_device *nand)
361 {
362 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
363 struct device *dev = nand->ecc.engine->dev;
364 int ret;
365
366 dev_info(dev, "Macronix ECC engine in external mode\n");
367
368 ret = mxic_ecc_init_ctx(nand, dev);
369 if (ret)
370 return ret;
371
372 /* Trigger each step manually */
373 writel(1, mxic->regs + CHUNK_CNT);
374 writel(BURST_TYP_INCREASING | ECC_PACKED | MEM2MEM,
375 mxic->regs + HC_CONFIG);
376
377 return 0;
378 }
379
mxic_ecc_init_ctx_pipelined(struct nand_device * nand)380 static int mxic_ecc_init_ctx_pipelined(struct nand_device *nand)
381 {
382 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
383 struct mxic_ecc_ctx *ctx;
384 struct device *dev;
385 int ret;
386
387 dev = nand_ecc_get_engine_dev(nand->ecc.engine->dev);
388 if (!dev)
389 return -EINVAL;
390
391 dev_info(dev, "Macronix ECC engine in pipelined/mapping mode\n");
392
393 ret = mxic_ecc_init_ctx(nand, dev);
394 if (ret)
395 return ret;
396
397 ctx = nand_to_ecc_ctx(nand);
398
399 /* All steps should be handled in one go directly by the internal DMA */
400 writel(ctx->steps, mxic->regs + CHUNK_CNT);
401
402 /*
403 * Interleaved ECC scheme cannot be used otherwise factory bad block
404 * markers would be lost. A packed layout is mandatory.
405 */
406 writel(BURST_TYP_INCREASING | ECC_PACKED | MAPPING,
407 mxic->regs + HC_CONFIG);
408
409 return 0;
410 }
411
mxic_ecc_cleanup_ctx(struct nand_device * nand)412 static void mxic_ecc_cleanup_ctx(struct nand_device *nand)
413 {
414 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
415
416 if (ctx) {
417 nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
418 kfree(ctx->oobwithstat);
419 }
420 }
421
mxic_ecc_data_xfer_wait_for_completion(struct mxic_ecc_engine * mxic)422 static int mxic_ecc_data_xfer_wait_for_completion(struct mxic_ecc_engine *mxic)
423 {
424 u32 val;
425 int ret;
426
427 if (mxic->irq) {
428 reinit_completion(&mxic->complete);
429 mxic_ecc_enable_int(mxic);
430 ret = wait_for_completion_timeout(&mxic->complete,
431 msecs_to_jiffies(1000));
432 ret = ret ? 0 : -ETIMEDOUT;
433 mxic_ecc_disable_int(mxic);
434 } else {
435 ret = readl_poll_timeout(mxic->regs + INTRPT_STS, val,
436 val & TRANS_CMPLT, 10, USEC_PER_SEC);
437 writel(val, mxic->regs + INTRPT_STS);
438 }
439
440 if (ret) {
441 dev_err(mxic->dev, "Timeout on data xfer completion\n");
442 return -ETIMEDOUT;
443 }
444
445 return 0;
446 }
447
mxic_ecc_process_data(struct mxic_ecc_engine * mxic,unsigned int direction)448 static int mxic_ecc_process_data(struct mxic_ecc_engine *mxic,
449 unsigned int direction)
450 {
451 unsigned int dir = (direction == NAND_PAGE_READ) ?
452 READ_NAND : WRITE_NAND;
453 int ret;
454
455 mxic_ecc_enable_engine(mxic);
456
457 /* Trigger processing */
458 writel(SDMA_STRT | dir, mxic->regs + SDMA_CTRL);
459
460 /* Wait for completion */
461 ret = mxic_ecc_data_xfer_wait_for_completion(mxic);
462
463 mxic_ecc_disable_engine(mxic);
464
465 return ret;
466 }
467
mxic_ecc_process_data_pipelined(struct nand_ecc_engine * eng,unsigned int direction,dma_addr_t dirmap)468 int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
469 unsigned int direction, dma_addr_t dirmap)
470 {
471 struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng);
472
473 if (dirmap)
474 writel(dirmap, mxic->regs + HC_SLV_ADDR);
475
476 return mxic_ecc_process_data(mxic, direction);
477 }
478 EXPORT_SYMBOL_GPL(mxic_ecc_process_data_pipelined);
479
mxic_ecc_extract_status_bytes(struct mxic_ecc_ctx * ctx)480 static void mxic_ecc_extract_status_bytes(struct mxic_ecc_ctx *ctx)
481 {
482 u8 *buf = ctx->oobwithstat;
483 int next_stat_pos;
484 int step;
485
486 /* Extract the ECC status */
487 for (step = 0; step < ctx->steps; step++) {
488 next_stat_pos = ctx->oob_step_sz +
489 ((STAT_BYTES + ctx->oob_step_sz) * step);
490
491 ctx->status[step] = buf[next_stat_pos];
492 }
493 }
494
mxic_ecc_reconstruct_oobbuf(struct mxic_ecc_ctx * ctx,u8 * dst,const u8 * src)495 static void mxic_ecc_reconstruct_oobbuf(struct mxic_ecc_ctx *ctx,
496 u8 *dst, const u8 *src)
497 {
498 int step;
499
500 /* Reconstruct the OOB buffer linearly (without the ECC status bytes) */
501 for (step = 0; step < ctx->steps; step++)
502 memcpy(dst + (step * ctx->oob_step_sz),
503 src + (step * (ctx->oob_step_sz + STAT_BYTES)),
504 ctx->oob_step_sz);
505 }
506
mxic_ecc_add_room_in_oobbuf(struct mxic_ecc_ctx * ctx,u8 * dst,const u8 * src)507 static void mxic_ecc_add_room_in_oobbuf(struct mxic_ecc_ctx *ctx,
508 u8 *dst, const u8 *src)
509 {
510 int step;
511
512 /* Add some space in the OOB buffer for the status bytes */
513 for (step = 0; step < ctx->steps; step++)
514 memcpy(dst + (step * (ctx->oob_step_sz + STAT_BYTES)),
515 src + (step * ctx->oob_step_sz),
516 ctx->oob_step_sz);
517 }
518
mxic_ecc_count_biterrs(struct mxic_ecc_engine * mxic,struct nand_device * nand)519 static int mxic_ecc_count_biterrs(struct mxic_ecc_engine *mxic,
520 struct nand_device *nand)
521 {
522 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
523 struct mtd_info *mtd = nanddev_to_mtd(nand);
524 struct device *dev = mxic->dev;
525 unsigned int max_bf = 0;
526 bool failure = false;
527 int step;
528
529 for (step = 0; step < ctx->steps; step++) {
530 u8 stat = ctx->status[step];
531
532 if (stat == NO_ERR) {
533 dev_dbg(dev, "ECC step %d: no error\n", step);
534 } else if (stat == ERASED_CHUNK) {
535 dev_dbg(dev, "ECC step %d: erased\n", step);
536 } else if (stat == UNCORR_ERR || stat > MAX_CORR_ERR) {
537 dev_dbg(dev, "ECC step %d: uncorrectable\n", step);
538 mtd->ecc_stats.failed++;
539 failure = true;
540 } else {
541 dev_dbg(dev, "ECC step %d: %d bits corrected\n",
542 step, stat);
543 max_bf = max_t(unsigned int, max_bf, stat);
544 mtd->ecc_stats.corrected += stat;
545 }
546 }
547
548 return failure ? -EBADMSG : max_bf;
549 }
550
551 /* External ECC engine helpers */
mxic_ecc_prepare_io_req_external(struct nand_device * nand,struct nand_page_io_req * req)552 static int mxic_ecc_prepare_io_req_external(struct nand_device *nand,
553 struct nand_page_io_req *req)
554 {
555 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
556 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
557 struct mtd_info *mtd = nanddev_to_mtd(nand);
558 int offset, nents, step, ret;
559
560 if (req->mode == MTD_OPS_RAW)
561 return 0;
562
563 nand_ecc_tweak_req(&ctx->req_ctx, req);
564 ctx->req = req;
565
566 if (req->type == NAND_PAGE_READ)
567 return 0;
568
569 mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat,
570 ctx->req->oobbuf.out);
571
572 sg_set_buf(&ctx->sg[0], req->databuf.out, req->datalen);
573 sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
574 req->ooblen + (ctx->steps * STAT_BYTES));
575
576 nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
577 if (!nents)
578 return -EINVAL;
579
580 mutex_lock(&mxic->lock);
581
582 for (step = 0; step < ctx->steps; step++) {
583 writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
584 mxic->regs + SDMA_MAIN_ADDR);
585 writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
586 mxic->regs + SDMA_SPARE_ADDR);
587 ret = mxic_ecc_process_data(mxic, ctx->req->type);
588 if (ret)
589 break;
590 }
591
592 mutex_unlock(&mxic->lock);
593
594 dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
595
596 if (ret)
597 return ret;
598
599 /* Retrieve the calculated ECC bytes */
600 for (step = 0; step < ctx->steps; step++) {
601 offset = ctx->meta_sz + (step * ctx->oob_step_sz);
602 mtd_ooblayout_get_eccbytes(mtd,
603 (u8 *)ctx->req->oobbuf.out + offset,
604 ctx->oobwithstat + (step * STAT_BYTES),
605 step * ctx->parity_sz,
606 ctx->parity_sz);
607 }
608
609 return 0;
610 }
611
mxic_ecc_finish_io_req_external(struct nand_device * nand,struct nand_page_io_req * req)612 static int mxic_ecc_finish_io_req_external(struct nand_device *nand,
613 struct nand_page_io_req *req)
614 {
615 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
616 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
617 int nents, step, ret;
618
619 if (req->mode == MTD_OPS_RAW)
620 return 0;
621
622 if (req->type == NAND_PAGE_WRITE) {
623 nand_ecc_restore_req(&ctx->req_ctx, req);
624 return 0;
625 }
626
627 /* Copy the OOB buffer and add room for the ECC engine status bytes */
628 mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat, ctx->req->oobbuf.in);
629
630 sg_set_buf(&ctx->sg[0], req->databuf.in, req->datalen);
631 sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
632 req->ooblen + (ctx->steps * STAT_BYTES));
633 nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
634 if (!nents)
635 return -EINVAL;
636
637 mutex_lock(&mxic->lock);
638
639 for (step = 0; step < ctx->steps; step++) {
640 writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
641 mxic->regs + SDMA_MAIN_ADDR);
642 writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
643 mxic->regs + SDMA_SPARE_ADDR);
644 ret = mxic_ecc_process_data(mxic, ctx->req->type);
645 if (ret)
646 break;
647 }
648
649 mutex_unlock(&mxic->lock);
650
651 dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
652
653 if (ret) {
654 nand_ecc_restore_req(&ctx->req_ctx, req);
655 return ret;
656 }
657
658 /* Extract the status bytes and reconstruct the buffer */
659 mxic_ecc_extract_status_bytes(ctx);
660 mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in, ctx->oobwithstat);
661
662 nand_ecc_restore_req(&ctx->req_ctx, req);
663
664 return mxic_ecc_count_biterrs(mxic, nand);
665 }
666
667 /* Pipelined ECC engine helpers */
mxic_ecc_prepare_io_req_pipelined(struct nand_device * nand,struct nand_page_io_req * req)668 static int mxic_ecc_prepare_io_req_pipelined(struct nand_device *nand,
669 struct nand_page_io_req *req)
670 {
671 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
672 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
673 int nents;
674
675 if (req->mode == MTD_OPS_RAW)
676 return 0;
677
678 nand_ecc_tweak_req(&ctx->req_ctx, req);
679 ctx->req = req;
680
681 /* Copy the OOB buffer and add room for the ECC engine status bytes */
682 mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat, ctx->req->oobbuf.in);
683
684 sg_set_buf(&ctx->sg[0], req->databuf.in, req->datalen);
685 sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
686 req->ooblen + (ctx->steps * STAT_BYTES));
687
688 nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
689 if (!nents)
690 return -EINVAL;
691
692 mutex_lock(&mxic->lock);
693
694 writel(sg_dma_address(&ctx->sg[0]), mxic->regs + SDMA_MAIN_ADDR);
695 writel(sg_dma_address(&ctx->sg[1]), mxic->regs + SDMA_SPARE_ADDR);
696
697 return 0;
698 }
699
mxic_ecc_finish_io_req_pipelined(struct nand_device * nand,struct nand_page_io_req * req)700 static int mxic_ecc_finish_io_req_pipelined(struct nand_device *nand,
701 struct nand_page_io_req *req)
702 {
703 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
704 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
705 int ret = 0;
706
707 if (req->mode == MTD_OPS_RAW)
708 return 0;
709
710 mutex_unlock(&mxic->lock);
711
712 dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
713
714 if (req->type == NAND_PAGE_READ) {
715 mxic_ecc_extract_status_bytes(ctx);
716 mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in,
717 ctx->oobwithstat);
718 ret = mxic_ecc_count_biterrs(mxic, nand);
719 }
720
721 nand_ecc_restore_req(&ctx->req_ctx, req);
722
723 return ret;
724 }
725
726 static struct nand_ecc_engine_ops mxic_ecc_engine_external_ops = {
727 .init_ctx = mxic_ecc_init_ctx_external,
728 .cleanup_ctx = mxic_ecc_cleanup_ctx,
729 .prepare_io_req = mxic_ecc_prepare_io_req_external,
730 .finish_io_req = mxic_ecc_finish_io_req_external,
731 };
732
733 static struct nand_ecc_engine_ops mxic_ecc_engine_pipelined_ops = {
734 .init_ctx = mxic_ecc_init_ctx_pipelined,
735 .cleanup_ctx = mxic_ecc_cleanup_ctx,
736 .prepare_io_req = mxic_ecc_prepare_io_req_pipelined,
737 .finish_io_req = mxic_ecc_finish_io_req_pipelined,
738 };
739
mxic_ecc_get_pipelined_ops(void)740 struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
741 {
742 return &mxic_ecc_engine_pipelined_ops;
743 }
744 EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_ops);
745
746 static struct platform_device *
mxic_ecc_get_pdev(struct platform_device * spi_pdev)747 mxic_ecc_get_pdev(struct platform_device *spi_pdev)
748 {
749 struct platform_device *eng_pdev;
750 struct device_node *np;
751
752 /* Retrieve the nand-ecc-engine phandle */
753 np = of_parse_phandle(spi_pdev->dev.of_node, "nand-ecc-engine", 0);
754 if (!np)
755 return NULL;
756
757 /* Jump to the engine's device node */
758 eng_pdev = of_find_device_by_node(np);
759 of_node_put(np);
760
761 return eng_pdev;
762 }
763
mxic_ecc_put_pipelined_engine(struct nand_ecc_engine * eng)764 void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng)
765 {
766 struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng);
767
768 platform_device_put(to_platform_device(mxic->dev));
769 }
770 EXPORT_SYMBOL_GPL(mxic_ecc_put_pipelined_engine);
771
772 struct nand_ecc_engine *
mxic_ecc_get_pipelined_engine(struct platform_device * spi_pdev)773 mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev)
774 {
775 struct platform_device *eng_pdev;
776 struct mxic_ecc_engine *mxic;
777
778 eng_pdev = mxic_ecc_get_pdev(spi_pdev);
779 if (!eng_pdev)
780 return ERR_PTR(-ENODEV);
781
782 mxic = platform_get_drvdata(eng_pdev);
783 if (!mxic) {
784 platform_device_put(eng_pdev);
785 return ERR_PTR(-EPROBE_DEFER);
786 }
787
788 return &mxic->pipelined_engine;
789 }
790 EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_engine);
791
792 /*
793 * Only the external ECC engine is exported as the pipelined is SoC specific, so
794 * it is registered directly by the drivers that wrap it.
795 */
mxic_ecc_probe(struct platform_device * pdev)796 static int mxic_ecc_probe(struct platform_device *pdev)
797 {
798 struct device *dev = &pdev->dev;
799 struct mxic_ecc_engine *mxic;
800 int ret;
801
802 mxic = devm_kzalloc(&pdev->dev, sizeof(*mxic), GFP_KERNEL);
803 if (!mxic)
804 return -ENOMEM;
805
806 mxic->dev = &pdev->dev;
807
808 /*
809 * Both memory regions for the ECC engine itself and the AXI slave
810 * address are mandatory.
811 */
812 mxic->regs = devm_platform_ioremap_resource(pdev, 0);
813 if (IS_ERR(mxic->regs)) {
814 dev_err(&pdev->dev, "Missing memory region\n");
815 return PTR_ERR(mxic->regs);
816 }
817
818 mxic_ecc_disable_engine(mxic);
819 mxic_ecc_disable_int(mxic);
820
821 /* IRQ is optional yet much more efficient */
822 mxic->irq = platform_get_irq_byname_optional(pdev, "ecc-engine");
823 if (mxic->irq > 0) {
824 ret = devm_request_irq(&pdev->dev, mxic->irq, mxic_ecc_isr, 0,
825 "mxic-ecc", mxic);
826 if (ret)
827 return ret;
828 } else {
829 dev_info(dev, "Invalid or missing IRQ, fallback to polling\n");
830 mxic->irq = 0;
831 }
832
833 mutex_init(&mxic->lock);
834
835 /*
836 * In external mode, the device is the ECC engine. In pipelined mode,
837 * the device is the host controller. The device is used to match the
838 * right ECC engine based on the DT properties.
839 */
840 mxic->external_engine.dev = &pdev->dev;
841 mxic->external_engine.integration = NAND_ECC_ENGINE_INTEGRATION_EXTERNAL;
842 mxic->external_engine.ops = &mxic_ecc_engine_external_ops;
843
844 nand_ecc_register_on_host_hw_engine(&mxic->external_engine);
845
846 platform_set_drvdata(pdev, mxic);
847
848 return 0;
849 }
850
mxic_ecc_remove(struct platform_device * pdev)851 static void mxic_ecc_remove(struct platform_device *pdev)
852 {
853 struct mxic_ecc_engine *mxic = platform_get_drvdata(pdev);
854
855 nand_ecc_unregister_on_host_hw_engine(&mxic->external_engine);
856 }
857
858 static const struct of_device_id mxic_ecc_of_ids[] = {
859 {
860 .compatible = "mxicy,nand-ecc-engine-rev3",
861 },
862 { /* sentinel */ },
863 };
864 MODULE_DEVICE_TABLE(of, mxic_ecc_of_ids);
865
866 static struct platform_driver mxic_ecc_driver = {
867 .driver = {
868 .name = "mxic-nand-ecc-engine",
869 .of_match_table = mxic_ecc_of_ids,
870 },
871 .probe = mxic_ecc_probe,
872 .remove_new = mxic_ecc_remove,
873 };
874 module_platform_driver(mxic_ecc_driver);
875
876 MODULE_LICENSE("GPL");
877 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
878 MODULE_DESCRIPTION("Macronix NAND hardware ECC controller");
879