1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * MTD device concatenation layer
4 *
5 * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
6 * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
7 *
8 * NAND support by Christian Gan <cgan@iders.ca>
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/types.h>
16 #include <linux/backing-dev.h>
17
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/concat.h>
20
21 #include <asm/div64.h>
22
23 /*
24 * Our storage structure:
25 * Subdev points to an array of pointers to struct mtd_info objects
26 * which is allocated along with this structure
27 *
28 */
29 struct mtd_concat {
30 struct mtd_info mtd;
31 int num_subdev;
32 struct mtd_info **subdev;
33 };
34
35 /*
36 * how to calculate the size required for the above structure,
37 * including the pointer array subdev points to:
38 */
39 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
40 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
41
42 /*
43 * Given a pointer to the MTD object in the mtd_concat structure,
44 * we can retrieve the pointer to that structure with this macro.
45 */
46 #define CONCAT(x) ((struct mtd_concat *)(x))
47
48 /*
49 * MTD methods which look up the relevant subdevice, translate the
50 * effective address and pass through to the subdevice.
51 */
52
53 static int
concat_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)54 concat_read(struct mtd_info *mtd, loff_t from, size_t len,
55 size_t * retlen, u_char * buf)
56 {
57 struct mtd_concat *concat = CONCAT(mtd);
58 int ret = 0, err;
59 int i;
60
61 for (i = 0; i < concat->num_subdev; i++) {
62 struct mtd_info *subdev = concat->subdev[i];
63 size_t size, retsize;
64
65 if (from >= subdev->size) {
66 /* Not destined for this subdev */
67 size = 0;
68 from -= subdev->size;
69 continue;
70 }
71 if (from + len > subdev->size)
72 /* First part goes into this subdev */
73 size = subdev->size - from;
74 else
75 /* Entire transaction goes into this subdev */
76 size = len;
77
78 err = mtd_read(subdev, from, size, &retsize, buf);
79
80 /* Save information about bitflips! */
81 if (unlikely(err)) {
82 if (mtd_is_eccerr(err)) {
83 mtd->ecc_stats.failed++;
84 ret = err;
85 } else if (mtd_is_bitflip(err)) {
86 mtd->ecc_stats.corrected++;
87 /* Do not overwrite -EBADMSG !! */
88 if (!ret)
89 ret = err;
90 } else
91 return err;
92 }
93
94 *retlen += retsize;
95 len -= size;
96 if (len == 0)
97 return ret;
98
99 buf += size;
100 from = 0;
101 }
102 return -EINVAL;
103 }
104
105 static int
concat_panic_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)106 concat_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
107 size_t * retlen, const u_char * buf)
108 {
109 struct mtd_concat *concat = CONCAT(mtd);
110 int err = -EINVAL;
111 int i;
112 for (i = 0; i < concat->num_subdev; i++) {
113 struct mtd_info *subdev = concat->subdev[i];
114 size_t size, retsize;
115
116 if (to >= subdev->size) {
117 to -= subdev->size;
118 continue;
119 }
120 if (to + len > subdev->size)
121 size = subdev->size - to;
122 else
123 size = len;
124
125 err = mtd_panic_write(subdev, to, size, &retsize, buf);
126 if (err == -EOPNOTSUPP) {
127 printk(KERN_ERR "mtdconcat: Cannot write from panic without panic_write\n");
128 return err;
129 }
130 if (err)
131 break;
132
133 *retlen += retsize;
134 len -= size;
135 if (len == 0)
136 break;
137
138 err = -EINVAL;
139 buf += size;
140 to = 0;
141 }
142 return err;
143 }
144
145
146 static int
concat_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)147 concat_write(struct mtd_info *mtd, loff_t to, size_t len,
148 size_t * retlen, const u_char * buf)
149 {
150 struct mtd_concat *concat = CONCAT(mtd);
151 int err = -EINVAL;
152 int i;
153
154 for (i = 0; i < concat->num_subdev; i++) {
155 struct mtd_info *subdev = concat->subdev[i];
156 size_t size, retsize;
157
158 if (to >= subdev->size) {
159 size = 0;
160 to -= subdev->size;
161 continue;
162 }
163 if (to + len > subdev->size)
164 size = subdev->size - to;
165 else
166 size = len;
167
168 err = mtd_write(subdev, to, size, &retsize, buf);
169 if (err)
170 break;
171
172 *retlen += retsize;
173 len -= size;
174 if (len == 0)
175 break;
176
177 err = -EINVAL;
178 buf += size;
179 to = 0;
180 }
181 return err;
182 }
183
184 static int
concat_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)185 concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
186 unsigned long count, loff_t to, size_t * retlen)
187 {
188 struct mtd_concat *concat = CONCAT(mtd);
189 struct kvec *vecs_copy;
190 unsigned long entry_low, entry_high;
191 size_t total_len = 0;
192 int i;
193 int err = -EINVAL;
194
195 /* Calculate total length of data */
196 for (i = 0; i < count; i++)
197 total_len += vecs[i].iov_len;
198
199 /* Check alignment */
200 if (mtd->writesize > 1) {
201 uint64_t __to = to;
202 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
203 return -EINVAL;
204 }
205
206 /* make a copy of vecs */
207 vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
208 if (!vecs_copy)
209 return -ENOMEM;
210
211 entry_low = 0;
212 for (i = 0; i < concat->num_subdev; i++) {
213 struct mtd_info *subdev = concat->subdev[i];
214 size_t size, wsize, retsize, old_iov_len;
215
216 if (to >= subdev->size) {
217 to -= subdev->size;
218 continue;
219 }
220
221 size = min_t(uint64_t, total_len, subdev->size - to);
222 wsize = size; /* store for future use */
223
224 entry_high = entry_low;
225 while (entry_high < count) {
226 if (size <= vecs_copy[entry_high].iov_len)
227 break;
228 size -= vecs_copy[entry_high++].iov_len;
229 }
230
231 old_iov_len = vecs_copy[entry_high].iov_len;
232 vecs_copy[entry_high].iov_len = size;
233
234 err = mtd_writev(subdev, &vecs_copy[entry_low],
235 entry_high - entry_low + 1, to, &retsize);
236
237 vecs_copy[entry_high].iov_len = old_iov_len - size;
238 vecs_copy[entry_high].iov_base += size;
239
240 entry_low = entry_high;
241
242 if (err)
243 break;
244
245 *retlen += retsize;
246 total_len -= wsize;
247
248 if (total_len == 0)
249 break;
250
251 err = -EINVAL;
252 to = 0;
253 }
254
255 kfree(vecs_copy);
256 return err;
257 }
258
259 static int
concat_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)260 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
261 {
262 struct mtd_concat *concat = CONCAT(mtd);
263 struct mtd_oob_ops devops = *ops;
264 int i, err, ret = 0;
265
266 ops->retlen = ops->oobretlen = 0;
267
268 for (i = 0; i < concat->num_subdev; i++) {
269 struct mtd_info *subdev = concat->subdev[i];
270
271 if (from >= subdev->size) {
272 from -= subdev->size;
273 continue;
274 }
275
276 /* partial read ? */
277 if (from + devops.len > subdev->size)
278 devops.len = subdev->size - from;
279
280 err = mtd_read_oob(subdev, from, &devops);
281 ops->retlen += devops.retlen;
282 ops->oobretlen += devops.oobretlen;
283
284 /* Save information about bitflips! */
285 if (unlikely(err)) {
286 if (mtd_is_eccerr(err)) {
287 mtd->ecc_stats.failed++;
288 ret = err;
289 } else if (mtd_is_bitflip(err)) {
290 mtd->ecc_stats.corrected++;
291 /* Do not overwrite -EBADMSG !! */
292 if (!ret)
293 ret = err;
294 } else
295 return err;
296 }
297
298 if (devops.datbuf) {
299 devops.len = ops->len - ops->retlen;
300 if (!devops.len)
301 return ret;
302 devops.datbuf += devops.retlen;
303 }
304 if (devops.oobbuf) {
305 devops.ooblen = ops->ooblen - ops->oobretlen;
306 if (!devops.ooblen)
307 return ret;
308 devops.oobbuf += ops->oobretlen;
309 }
310
311 from = 0;
312 }
313 return -EINVAL;
314 }
315
316 static int
concat_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)317 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
318 {
319 struct mtd_concat *concat = CONCAT(mtd);
320 struct mtd_oob_ops devops = *ops;
321 int i, err;
322
323 if (!(mtd->flags & MTD_WRITEABLE))
324 return -EROFS;
325
326 ops->retlen = ops->oobretlen = 0;
327
328 for (i = 0; i < concat->num_subdev; i++) {
329 struct mtd_info *subdev = concat->subdev[i];
330
331 if (to >= subdev->size) {
332 to -= subdev->size;
333 continue;
334 }
335
336 /* partial write ? */
337 if (to + devops.len > subdev->size)
338 devops.len = subdev->size - to;
339
340 err = mtd_write_oob(subdev, to, &devops);
341 ops->retlen += devops.retlen;
342 ops->oobretlen += devops.oobretlen;
343 if (err)
344 return err;
345
346 if (devops.datbuf) {
347 devops.len = ops->len - ops->retlen;
348 if (!devops.len)
349 return 0;
350 devops.datbuf += devops.retlen;
351 }
352 if (devops.oobbuf) {
353 devops.ooblen = ops->ooblen - ops->oobretlen;
354 if (!devops.ooblen)
355 return 0;
356 devops.oobbuf += devops.oobretlen;
357 }
358 to = 0;
359 }
360 return -EINVAL;
361 }
362
concat_erase(struct mtd_info * mtd,struct erase_info * instr)363 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
364 {
365 struct mtd_concat *concat = CONCAT(mtd);
366 struct mtd_info *subdev;
367 int i, err;
368 uint64_t length, offset = 0;
369 struct erase_info *erase;
370
371 /*
372 * Check for proper erase block alignment of the to-be-erased area.
373 * It is easier to do this based on the super device's erase
374 * region info rather than looking at each particular sub-device
375 * in turn.
376 */
377 if (!concat->mtd.numeraseregions) {
378 /* the easy case: device has uniform erase block size */
379 if (instr->addr & (concat->mtd.erasesize - 1))
380 return -EINVAL;
381 if (instr->len & (concat->mtd.erasesize - 1))
382 return -EINVAL;
383 } else {
384 /* device has variable erase size */
385 struct mtd_erase_region_info *erase_regions =
386 concat->mtd.eraseregions;
387
388 /*
389 * Find the erase region where the to-be-erased area begins:
390 */
391 for (i = 0; i < concat->mtd.numeraseregions &&
392 instr->addr >= erase_regions[i].offset; i++) ;
393 --i;
394
395 /*
396 * Now erase_regions[i] is the region in which the
397 * to-be-erased area begins. Verify that the starting
398 * offset is aligned to this region's erase size:
399 */
400 if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
401 return -EINVAL;
402
403 /*
404 * now find the erase region where the to-be-erased area ends:
405 */
406 for (; i < concat->mtd.numeraseregions &&
407 (instr->addr + instr->len) >= erase_regions[i].offset;
408 ++i) ;
409 --i;
410 /*
411 * check if the ending offset is aligned to this region's erase size
412 */
413 if (i < 0 || ((instr->addr + instr->len) &
414 (erase_regions[i].erasesize - 1)))
415 return -EINVAL;
416 }
417
418 /* make a local copy of instr to avoid modifying the caller's struct */
419 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
420
421 if (!erase)
422 return -ENOMEM;
423
424 *erase = *instr;
425 length = instr->len;
426
427 /*
428 * find the subdevice where the to-be-erased area begins, adjust
429 * starting offset to be relative to the subdevice start
430 */
431 for (i = 0; i < concat->num_subdev; i++) {
432 subdev = concat->subdev[i];
433 if (subdev->size <= erase->addr) {
434 erase->addr -= subdev->size;
435 offset += subdev->size;
436 } else {
437 break;
438 }
439 }
440
441 /* must never happen since size limit has been verified above */
442 BUG_ON(i >= concat->num_subdev);
443
444 /* now do the erase: */
445 err = 0;
446 for (; length > 0; i++) {
447 /* loop for all subdevices affected by this request */
448 subdev = concat->subdev[i]; /* get current subdevice */
449
450 /* limit length to subdevice's size: */
451 if (erase->addr + length > subdev->size)
452 erase->len = subdev->size - erase->addr;
453 else
454 erase->len = length;
455
456 length -= erase->len;
457 if ((err = mtd_erase(subdev, erase))) {
458 /* sanity check: should never happen since
459 * block alignment has been checked above */
460 BUG_ON(err == -EINVAL);
461 if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
462 instr->fail_addr = erase->fail_addr + offset;
463 break;
464 }
465 /*
466 * erase->addr specifies the offset of the area to be
467 * erased *within the current subdevice*. It can be
468 * non-zero only the first time through this loop, i.e.
469 * for the first subdevice where blocks need to be erased.
470 * All the following erases must begin at the start of the
471 * current subdevice, i.e. at offset zero.
472 */
473 erase->addr = 0;
474 offset += subdev->size;
475 }
476 kfree(erase);
477
478 return err;
479 }
480
concat_xxlock(struct mtd_info * mtd,loff_t ofs,uint64_t len,bool is_lock)481 static int concat_xxlock(struct mtd_info *mtd, loff_t ofs, uint64_t len,
482 bool is_lock)
483 {
484 struct mtd_concat *concat = CONCAT(mtd);
485 int i, err = -EINVAL;
486
487 for (i = 0; i < concat->num_subdev; i++) {
488 struct mtd_info *subdev = concat->subdev[i];
489 uint64_t size;
490
491 if (ofs >= subdev->size) {
492 size = 0;
493 ofs -= subdev->size;
494 continue;
495 }
496 if (ofs + len > subdev->size)
497 size = subdev->size - ofs;
498 else
499 size = len;
500
501 if (is_lock)
502 err = mtd_lock(subdev, ofs, size);
503 else
504 err = mtd_unlock(subdev, ofs, size);
505 if (err)
506 break;
507
508 len -= size;
509 if (len == 0)
510 break;
511
512 err = -EINVAL;
513 ofs = 0;
514 }
515
516 return err;
517 }
518
concat_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)519 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
520 {
521 return concat_xxlock(mtd, ofs, len, true);
522 }
523
concat_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)524 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
525 {
526 return concat_xxlock(mtd, ofs, len, false);
527 }
528
concat_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)529 static int concat_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
530 {
531 struct mtd_concat *concat = CONCAT(mtd);
532 int i, err = -EINVAL;
533
534 for (i = 0; i < concat->num_subdev; i++) {
535 struct mtd_info *subdev = concat->subdev[i];
536
537 if (ofs >= subdev->size) {
538 ofs -= subdev->size;
539 continue;
540 }
541
542 if (ofs + len > subdev->size)
543 break;
544
545 return mtd_is_locked(subdev, ofs, len);
546 }
547
548 return err;
549 }
550
concat_sync(struct mtd_info * mtd)551 static void concat_sync(struct mtd_info *mtd)
552 {
553 struct mtd_concat *concat = CONCAT(mtd);
554 int i;
555
556 for (i = 0; i < concat->num_subdev; i++) {
557 struct mtd_info *subdev = concat->subdev[i];
558 mtd_sync(subdev);
559 }
560 }
561
concat_suspend(struct mtd_info * mtd)562 static int concat_suspend(struct mtd_info *mtd)
563 {
564 struct mtd_concat *concat = CONCAT(mtd);
565 int i, rc = 0;
566
567 for (i = 0; i < concat->num_subdev; i++) {
568 struct mtd_info *subdev = concat->subdev[i];
569 if ((rc = mtd_suspend(subdev)) < 0)
570 return rc;
571 }
572 return rc;
573 }
574
concat_resume(struct mtd_info * mtd)575 static void concat_resume(struct mtd_info *mtd)
576 {
577 struct mtd_concat *concat = CONCAT(mtd);
578 int i;
579
580 for (i = 0; i < concat->num_subdev; i++) {
581 struct mtd_info *subdev = concat->subdev[i];
582 mtd_resume(subdev);
583 }
584 }
585
concat_block_isbad(struct mtd_info * mtd,loff_t ofs)586 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
587 {
588 struct mtd_concat *concat = CONCAT(mtd);
589 int i, res = 0;
590
591 if (!mtd_can_have_bb(concat->subdev[0]))
592 return res;
593
594 for (i = 0; i < concat->num_subdev; i++) {
595 struct mtd_info *subdev = concat->subdev[i];
596
597 if (ofs >= subdev->size) {
598 ofs -= subdev->size;
599 continue;
600 }
601
602 res = mtd_block_isbad(subdev, ofs);
603 break;
604 }
605
606 return res;
607 }
608
concat_block_markbad(struct mtd_info * mtd,loff_t ofs)609 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
610 {
611 struct mtd_concat *concat = CONCAT(mtd);
612 int i, err = -EINVAL;
613
614 for (i = 0; i < concat->num_subdev; i++) {
615 struct mtd_info *subdev = concat->subdev[i];
616
617 if (ofs >= subdev->size) {
618 ofs -= subdev->size;
619 continue;
620 }
621
622 err = mtd_block_markbad(subdev, ofs);
623 if (!err)
624 mtd->ecc_stats.badblocks++;
625 break;
626 }
627
628 return err;
629 }
630
631 /*
632 * This function constructs a virtual MTD device by concatenating
633 * num_devs MTD devices. A pointer to the new device object is
634 * stored to *new_dev upon success. This function does _not_
635 * register any devices: this is the caller's responsibility.
636 */
mtd_concat_create(struct mtd_info * subdev[],int num_devs,const char * name)637 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
638 int num_devs, /* number of subdevices */
639 const char *name)
640 { /* name for the new device */
641 int i;
642 size_t size;
643 struct mtd_concat *concat;
644 struct mtd_info *subdev_master = NULL;
645 uint32_t max_erasesize, curr_erasesize;
646 int num_erase_region;
647 int max_writebufsize = 0;
648
649 printk(KERN_NOTICE "Concatenating MTD devices:\n");
650 for (i = 0; i < num_devs; i++)
651 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
652 printk(KERN_NOTICE "into device \"%s\"\n", name);
653
654 /* allocate the device structure */
655 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
656 concat = kzalloc(size, GFP_KERNEL);
657 if (!concat) {
658 printk
659 ("memory allocation error while creating concatenated device \"%s\"\n",
660 name);
661 return NULL;
662 }
663 concat->subdev = (struct mtd_info **) (concat + 1);
664
665 /*
666 * Set up the new "super" device's MTD object structure, check for
667 * incompatibilities between the subdevices.
668 */
669 concat->mtd.type = subdev[0]->type;
670 concat->mtd.flags = subdev[0]->flags;
671 concat->mtd.size = subdev[0]->size;
672 concat->mtd.erasesize = subdev[0]->erasesize;
673 concat->mtd.writesize = subdev[0]->writesize;
674
675 for (i = 0; i < num_devs; i++)
676 if (max_writebufsize < subdev[i]->writebufsize)
677 max_writebufsize = subdev[i]->writebufsize;
678 concat->mtd.writebufsize = max_writebufsize;
679
680 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
681 concat->mtd.oobsize = subdev[0]->oobsize;
682 concat->mtd.oobavail = subdev[0]->oobavail;
683
684 subdev_master = mtd_get_master(subdev[0]);
685 if (subdev_master->_writev)
686 concat->mtd._writev = concat_writev;
687 if (subdev_master->_read_oob)
688 concat->mtd._read_oob = concat_read_oob;
689 if (subdev_master->_write_oob)
690 concat->mtd._write_oob = concat_write_oob;
691 if (subdev_master->_block_isbad)
692 concat->mtd._block_isbad = concat_block_isbad;
693 if (subdev_master->_block_markbad)
694 concat->mtd._block_markbad = concat_block_markbad;
695 if (subdev_master->_panic_write)
696 concat->mtd._panic_write = concat_panic_write;
697 if (subdev_master->_read)
698 concat->mtd._read = concat_read;
699 if (subdev_master->_write)
700 concat->mtd._write = concat_write;
701
702 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
703
704 concat->subdev[0] = subdev[0];
705
706 for (i = 1; i < num_devs; i++) {
707 if (concat->mtd.type != subdev[i]->type) {
708 kfree(concat);
709 printk("Incompatible device type on \"%s\"\n",
710 subdev[i]->name);
711 return NULL;
712 }
713 if (concat->mtd.flags != subdev[i]->flags) {
714 /*
715 * Expect all flags except MTD_WRITEABLE to be
716 * equal on all subdevices.
717 */
718 if ((concat->mtd.flags ^ subdev[i]->
719 flags) & ~MTD_WRITEABLE) {
720 kfree(concat);
721 printk("Incompatible device flags on \"%s\"\n",
722 subdev[i]->name);
723 return NULL;
724 } else
725 /* if writeable attribute differs,
726 make super device writeable */
727 concat->mtd.flags |=
728 subdev[i]->flags & MTD_WRITEABLE;
729 }
730
731 subdev_master = mtd_get_master(subdev[i]);
732 concat->mtd.size += subdev[i]->size;
733 concat->mtd.ecc_stats.badblocks +=
734 subdev[i]->ecc_stats.badblocks;
735 if (concat->mtd.writesize != subdev[i]->writesize ||
736 concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
737 concat->mtd.oobsize != subdev[i]->oobsize ||
738 !concat->mtd._read_oob != !subdev_master->_read_oob ||
739 !concat->mtd._write_oob != !subdev_master->_write_oob) {
740 /*
741 * Check against subdev[i] for data members, because
742 * subdev's attributes may be different from master
743 * mtd device. Check against subdev's master mtd
744 * device for callbacks, because the existence of
745 * subdev's callbacks is decided by master mtd device.
746 */
747 kfree(concat);
748 printk("Incompatible OOB or ECC data on \"%s\"\n",
749 subdev[i]->name);
750 return NULL;
751 }
752 concat->subdev[i] = subdev[i];
753
754 }
755
756 mtd_set_ooblayout(&concat->mtd, subdev[0]->ooblayout);
757
758 concat->num_subdev = num_devs;
759 concat->mtd.name = name;
760
761 concat->mtd._erase = concat_erase;
762 concat->mtd._sync = concat_sync;
763 concat->mtd._lock = concat_lock;
764 concat->mtd._unlock = concat_unlock;
765 concat->mtd._is_locked = concat_is_locked;
766 concat->mtd._suspend = concat_suspend;
767 concat->mtd._resume = concat_resume;
768
769 /*
770 * Combine the erase block size info of the subdevices:
771 *
772 * first, walk the map of the new device and see how
773 * many changes in erase size we have
774 */
775 max_erasesize = curr_erasesize = subdev[0]->erasesize;
776 num_erase_region = 1;
777 for (i = 0; i < num_devs; i++) {
778 if (subdev[i]->numeraseregions == 0) {
779 /* current subdevice has uniform erase size */
780 if (subdev[i]->erasesize != curr_erasesize) {
781 /* if it differs from the last subdevice's erase size, count it */
782 ++num_erase_region;
783 curr_erasesize = subdev[i]->erasesize;
784 if (curr_erasesize > max_erasesize)
785 max_erasesize = curr_erasesize;
786 }
787 } else {
788 /* current subdevice has variable erase size */
789 int j;
790 for (j = 0; j < subdev[i]->numeraseregions; j++) {
791
792 /* walk the list of erase regions, count any changes */
793 if (subdev[i]->eraseregions[j].erasesize !=
794 curr_erasesize) {
795 ++num_erase_region;
796 curr_erasesize =
797 subdev[i]->eraseregions[j].
798 erasesize;
799 if (curr_erasesize > max_erasesize)
800 max_erasesize = curr_erasesize;
801 }
802 }
803 }
804 }
805
806 if (num_erase_region == 1) {
807 /*
808 * All subdevices have the same uniform erase size.
809 * This is easy:
810 */
811 concat->mtd.erasesize = curr_erasesize;
812 concat->mtd.numeraseregions = 0;
813 } else {
814 uint64_t tmp64;
815
816 /*
817 * erase block size varies across the subdevices: allocate
818 * space to store the data describing the variable erase regions
819 */
820 struct mtd_erase_region_info *erase_region_p;
821 uint64_t begin, position;
822
823 concat->mtd.erasesize = max_erasesize;
824 concat->mtd.numeraseregions = num_erase_region;
825 concat->mtd.eraseregions = erase_region_p =
826 kmalloc_array(num_erase_region,
827 sizeof(struct mtd_erase_region_info),
828 GFP_KERNEL);
829 if (!erase_region_p) {
830 kfree(concat);
831 printk
832 ("memory allocation error while creating erase region list"
833 " for device \"%s\"\n", name);
834 return NULL;
835 }
836
837 /*
838 * walk the map of the new device once more and fill in
839 * erase region info:
840 */
841 curr_erasesize = subdev[0]->erasesize;
842 begin = position = 0;
843 for (i = 0; i < num_devs; i++) {
844 if (subdev[i]->numeraseregions == 0) {
845 /* current subdevice has uniform erase size */
846 if (subdev[i]->erasesize != curr_erasesize) {
847 /*
848 * fill in an mtd_erase_region_info structure for the area
849 * we have walked so far:
850 */
851 erase_region_p->offset = begin;
852 erase_region_p->erasesize =
853 curr_erasesize;
854 tmp64 = position - begin;
855 do_div(tmp64, curr_erasesize);
856 erase_region_p->numblocks = tmp64;
857 begin = position;
858
859 curr_erasesize = subdev[i]->erasesize;
860 ++erase_region_p;
861 }
862 position += subdev[i]->size;
863 } else {
864 /* current subdevice has variable erase size */
865 int j;
866 for (j = 0; j < subdev[i]->numeraseregions; j++) {
867 /* walk the list of erase regions, count any changes */
868 if (subdev[i]->eraseregions[j].
869 erasesize != curr_erasesize) {
870 erase_region_p->offset = begin;
871 erase_region_p->erasesize =
872 curr_erasesize;
873 tmp64 = position - begin;
874 do_div(tmp64, curr_erasesize);
875 erase_region_p->numblocks = tmp64;
876 begin = position;
877
878 curr_erasesize =
879 subdev[i]->eraseregions[j].
880 erasesize;
881 ++erase_region_p;
882 }
883 position +=
884 subdev[i]->eraseregions[j].
885 numblocks * (uint64_t)curr_erasesize;
886 }
887 }
888 }
889 /* Now write the final entry */
890 erase_region_p->offset = begin;
891 erase_region_p->erasesize = curr_erasesize;
892 tmp64 = position - begin;
893 do_div(tmp64, curr_erasesize);
894 erase_region_p->numblocks = tmp64;
895 }
896
897 return &concat->mtd;
898 }
899
900 /* Cleans the context obtained from mtd_concat_create() */
mtd_concat_destroy(struct mtd_info * mtd)901 void mtd_concat_destroy(struct mtd_info *mtd)
902 {
903 struct mtd_concat *concat = CONCAT(mtd);
904 if (concat->mtd.numeraseregions)
905 kfree(concat->mtd.eraseregions);
906 kfree(concat);
907 }
908
909 EXPORT_SYMBOL(mtd_concat_create);
910 EXPORT_SYMBOL(mtd_concat_destroy);
911
912 MODULE_LICENSE("GPL");
913 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
914 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
915