1*83d290c5STom Rini // SPDX-License-Identifier: GPL-2.0+
20a572655SStefan Roese /*
30a572655SStefan Roese * MTD device concatenation layer
40a572655SStefan Roese *
5ff94bc40SHeiko Schocher * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
6ff94bc40SHeiko Schocher * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
70a572655SStefan Roese *
80a572655SStefan Roese * NAND support by Christian Gan <cgan@iders.ca>
90a572655SStefan Roese *
100a572655SStefan Roese */
110a572655SStefan Roese
12ff94bc40SHeiko Schocher #ifndef __UBOOT__
13ff94bc40SHeiko Schocher #include <linux/kernel.h>
14ff94bc40SHeiko Schocher #include <linux/module.h>
15ff94bc40SHeiko Schocher #include <linux/slab.h>
16ff94bc40SHeiko Schocher #include <linux/sched.h>
17ff94bc40SHeiko Schocher #include <linux/types.h>
18ff94bc40SHeiko Schocher #include <linux/backing-dev.h>
19ff94bc40SHeiko Schocher #include <asm/div64.h>
20ff94bc40SHeiko Schocher #else
21ff94bc40SHeiko Schocher #include <div64.h>
227b15e2bbSMike Frysinger #include <linux/compat.h>
23ff94bc40SHeiko Schocher #endif
24ff94bc40SHeiko Schocher
25ff94bc40SHeiko Schocher #include <linux/mtd/mtd.h>
260a572655SStefan Roese #include <linux/mtd/concat.h>
27ff94bc40SHeiko Schocher
280a572655SStefan Roese #include <ubi_uboot.h>
290a572655SStefan Roese
300a572655SStefan Roese /*
310a572655SStefan Roese * Our storage structure:
320a572655SStefan Roese * Subdev points to an array of pointers to struct mtd_info objects
330a572655SStefan Roese * which is allocated along with this structure
340a572655SStefan Roese *
350a572655SStefan Roese */
360a572655SStefan Roese struct mtd_concat {
370a572655SStefan Roese struct mtd_info mtd;
380a572655SStefan Roese int num_subdev;
390a572655SStefan Roese struct mtd_info **subdev;
400a572655SStefan Roese };
410a572655SStefan Roese
420a572655SStefan Roese /*
430a572655SStefan Roese * how to calculate the size required for the above structure,
440a572655SStefan Roese * including the pointer array subdev points to:
450a572655SStefan Roese */
460a572655SStefan Roese #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
470a572655SStefan Roese ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
480a572655SStefan Roese
490a572655SStefan Roese /*
500a572655SStefan Roese * Given a pointer to the MTD object in the mtd_concat structure,
510a572655SStefan Roese * we can retrieve the pointer to that structure with this macro.
520a572655SStefan Roese */
530a572655SStefan Roese #define CONCAT(x) ((struct mtd_concat *)(x))
540a572655SStefan Roese
550a572655SStefan Roese /*
560a572655SStefan Roese * MTD methods which look up the relevant subdevice, translate the
570a572655SStefan Roese * effective address and pass through to the subdevice.
580a572655SStefan Roese */
590a572655SStefan Roese
600a572655SStefan Roese static int
concat_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)610a572655SStefan Roese concat_read(struct mtd_info *mtd, loff_t from, size_t len,
620a572655SStefan Roese size_t * retlen, u_char * buf)
630a572655SStefan Roese {
640a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
650a572655SStefan Roese int ret = 0, err;
660a572655SStefan Roese int i;
670a572655SStefan Roese
68ff94bc40SHeiko Schocher #ifdef __UBOOT__
690a572655SStefan Roese *retlen = 0;
70ff94bc40SHeiko Schocher #endif
710a572655SStefan Roese
720a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
730a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
740a572655SStefan Roese size_t size, retsize;
750a572655SStefan Roese
760a572655SStefan Roese if (from >= subdev->size) {
770a572655SStefan Roese /* Not destined for this subdev */
780a572655SStefan Roese size = 0;
790a572655SStefan Roese from -= subdev->size;
800a572655SStefan Roese continue;
810a572655SStefan Roese }
820a572655SStefan Roese if (from + len > subdev->size)
830a572655SStefan Roese /* First part goes into this subdev */
840a572655SStefan Roese size = subdev->size - from;
850a572655SStefan Roese else
860a572655SStefan Roese /* Entire transaction goes into this subdev */
870a572655SStefan Roese size = len;
880a572655SStefan Roese
89dfe64e2cSSergey Lapin err = mtd_read(subdev, from, size, &retsize, buf);
900a572655SStefan Roese
910a572655SStefan Roese /* Save information about bitflips! */
920a572655SStefan Roese if (unlikely(err)) {
93dfe64e2cSSergey Lapin if (mtd_is_eccerr(err)) {
940a572655SStefan Roese mtd->ecc_stats.failed++;
950a572655SStefan Roese ret = err;
96dfe64e2cSSergey Lapin } else if (mtd_is_bitflip(err)) {
970a572655SStefan Roese mtd->ecc_stats.corrected++;
980a572655SStefan Roese /* Do not overwrite -EBADMSG !! */
990a572655SStefan Roese if (!ret)
1000a572655SStefan Roese ret = err;
1010a572655SStefan Roese } else
1020a572655SStefan Roese return err;
1030a572655SStefan Roese }
1040a572655SStefan Roese
1050a572655SStefan Roese *retlen += retsize;
1060a572655SStefan Roese len -= size;
1070a572655SStefan Roese if (len == 0)
1080a572655SStefan Roese return ret;
1090a572655SStefan Roese
1100a572655SStefan Roese buf += size;
1110a572655SStefan Roese from = 0;
1120a572655SStefan Roese }
1130a572655SStefan Roese return -EINVAL;
1140a572655SStefan Roese }
1150a572655SStefan Roese
1160a572655SStefan Roese static int
concat_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1170a572655SStefan Roese concat_write(struct mtd_info *mtd, loff_t to, size_t len,
1180a572655SStefan Roese size_t * retlen, const u_char * buf)
1190a572655SStefan Roese {
1200a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
1210a572655SStefan Roese int err = -EINVAL;
1220a572655SStefan Roese int i;
1230a572655SStefan Roese
124ff94bc40SHeiko Schocher #ifdef __UBOOT__
1250a572655SStefan Roese *retlen = 0;
126ff94bc40SHeiko Schocher #endif
1270a572655SStefan Roese
1280a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
1290a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
1300a572655SStefan Roese size_t size, retsize;
1310a572655SStefan Roese
1320a572655SStefan Roese if (to >= subdev->size) {
1330a572655SStefan Roese size = 0;
1340a572655SStefan Roese to -= subdev->size;
1350a572655SStefan Roese continue;
1360a572655SStefan Roese }
1370a572655SStefan Roese if (to + len > subdev->size)
1380a572655SStefan Roese size = subdev->size - to;
1390a572655SStefan Roese else
1400a572655SStefan Roese size = len;
1410a572655SStefan Roese
142dfe64e2cSSergey Lapin err = mtd_write(subdev, to, size, &retsize, buf);
1430a572655SStefan Roese if (err)
1440a572655SStefan Roese break;
1450a572655SStefan Roese
1460a572655SStefan Roese *retlen += retsize;
1470a572655SStefan Roese len -= size;
1480a572655SStefan Roese if (len == 0)
1490a572655SStefan Roese break;
1500a572655SStefan Roese
1510a572655SStefan Roese err = -EINVAL;
1520a572655SStefan Roese buf += size;
1530a572655SStefan Roese to = 0;
1540a572655SStefan Roese }
1550a572655SStefan Roese return err;
1560a572655SStefan Roese }
1570a572655SStefan Roese
158ff94bc40SHeiko Schocher #ifndef __UBOOT__
159ff94bc40SHeiko Schocher static int
concat_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)160ff94bc40SHeiko Schocher concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
161ff94bc40SHeiko Schocher unsigned long count, loff_t to, size_t * retlen)
162ff94bc40SHeiko Schocher {
163ff94bc40SHeiko Schocher struct mtd_concat *concat = CONCAT(mtd);
164ff94bc40SHeiko Schocher struct kvec *vecs_copy;
165ff94bc40SHeiko Schocher unsigned long entry_low, entry_high;
166ff94bc40SHeiko Schocher size_t total_len = 0;
167ff94bc40SHeiko Schocher int i;
168ff94bc40SHeiko Schocher int err = -EINVAL;
169ff94bc40SHeiko Schocher
170ff94bc40SHeiko Schocher /* Calculate total length of data */
171ff94bc40SHeiko Schocher for (i = 0; i < count; i++)
172ff94bc40SHeiko Schocher total_len += vecs[i].iov_len;
173ff94bc40SHeiko Schocher
174ff94bc40SHeiko Schocher /* Check alignment */
175ff94bc40SHeiko Schocher if (mtd->writesize > 1) {
176ff94bc40SHeiko Schocher uint64_t __to = to;
177ff94bc40SHeiko Schocher if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
178ff94bc40SHeiko Schocher return -EINVAL;
179ff94bc40SHeiko Schocher }
180ff94bc40SHeiko Schocher
181ff94bc40SHeiko Schocher /* make a copy of vecs */
182ff94bc40SHeiko Schocher vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
183ff94bc40SHeiko Schocher if (!vecs_copy)
184ff94bc40SHeiko Schocher return -ENOMEM;
185ff94bc40SHeiko Schocher
186ff94bc40SHeiko Schocher entry_low = 0;
187ff94bc40SHeiko Schocher for (i = 0; i < concat->num_subdev; i++) {
188ff94bc40SHeiko Schocher struct mtd_info *subdev = concat->subdev[i];
189ff94bc40SHeiko Schocher size_t size, wsize, retsize, old_iov_len;
190ff94bc40SHeiko Schocher
191ff94bc40SHeiko Schocher if (to >= subdev->size) {
192ff94bc40SHeiko Schocher to -= subdev->size;
193ff94bc40SHeiko Schocher continue;
194ff94bc40SHeiko Schocher }
195ff94bc40SHeiko Schocher
196ff94bc40SHeiko Schocher size = min_t(uint64_t, total_len, subdev->size - to);
197ff94bc40SHeiko Schocher wsize = size; /* store for future use */
198ff94bc40SHeiko Schocher
199ff94bc40SHeiko Schocher entry_high = entry_low;
200ff94bc40SHeiko Schocher while (entry_high < count) {
201ff94bc40SHeiko Schocher if (size <= vecs_copy[entry_high].iov_len)
202ff94bc40SHeiko Schocher break;
203ff94bc40SHeiko Schocher size -= vecs_copy[entry_high++].iov_len;
204ff94bc40SHeiko Schocher }
205ff94bc40SHeiko Schocher
206ff94bc40SHeiko Schocher old_iov_len = vecs_copy[entry_high].iov_len;
207ff94bc40SHeiko Schocher vecs_copy[entry_high].iov_len = size;
208ff94bc40SHeiko Schocher
209ff94bc40SHeiko Schocher err = mtd_writev(subdev, &vecs_copy[entry_low],
210ff94bc40SHeiko Schocher entry_high - entry_low + 1, to, &retsize);
211ff94bc40SHeiko Schocher
212ff94bc40SHeiko Schocher vecs_copy[entry_high].iov_len = old_iov_len - size;
213ff94bc40SHeiko Schocher vecs_copy[entry_high].iov_base += size;
214ff94bc40SHeiko Schocher
215ff94bc40SHeiko Schocher entry_low = entry_high;
216ff94bc40SHeiko Schocher
217ff94bc40SHeiko Schocher if (err)
218ff94bc40SHeiko Schocher break;
219ff94bc40SHeiko Schocher
220ff94bc40SHeiko Schocher *retlen += retsize;
221ff94bc40SHeiko Schocher total_len -= wsize;
222ff94bc40SHeiko Schocher
223ff94bc40SHeiko Schocher if (total_len == 0)
224ff94bc40SHeiko Schocher break;
225ff94bc40SHeiko Schocher
226ff94bc40SHeiko Schocher err = -EINVAL;
227ff94bc40SHeiko Schocher to = 0;
228ff94bc40SHeiko Schocher }
229ff94bc40SHeiko Schocher
230ff94bc40SHeiko Schocher kfree(vecs_copy);
231ff94bc40SHeiko Schocher return err;
232ff94bc40SHeiko Schocher }
233ff94bc40SHeiko Schocher #endif
234ff94bc40SHeiko Schocher
2350a572655SStefan Roese static int
concat_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)2360a572655SStefan Roese concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
2370a572655SStefan Roese {
2380a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
2390a572655SStefan Roese struct mtd_oob_ops devops = *ops;
2400a572655SStefan Roese int i, err, ret = 0;
2410a572655SStefan Roese
2420a572655SStefan Roese ops->retlen = ops->oobretlen = 0;
2430a572655SStefan Roese
2440a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
2450a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
2460a572655SStefan Roese
2470a572655SStefan Roese if (from >= subdev->size) {
2480a572655SStefan Roese from -= subdev->size;
2490a572655SStefan Roese continue;
2500a572655SStefan Roese }
2510a572655SStefan Roese
2520a572655SStefan Roese /* partial read ? */
2530a572655SStefan Roese if (from + devops.len > subdev->size)
2540a572655SStefan Roese devops.len = subdev->size - from;
2550a572655SStefan Roese
256dfe64e2cSSergey Lapin err = mtd_read_oob(subdev, from, &devops);
2570a572655SStefan Roese ops->retlen += devops.retlen;
2580a572655SStefan Roese ops->oobretlen += devops.oobretlen;
2590a572655SStefan Roese
2600a572655SStefan Roese /* Save information about bitflips! */
2610a572655SStefan Roese if (unlikely(err)) {
262dfe64e2cSSergey Lapin if (mtd_is_eccerr(err)) {
2630a572655SStefan Roese mtd->ecc_stats.failed++;
2640a572655SStefan Roese ret = err;
265dfe64e2cSSergey Lapin } else if (mtd_is_bitflip(err)) {
2660a572655SStefan Roese mtd->ecc_stats.corrected++;
2670a572655SStefan Roese /* Do not overwrite -EBADMSG !! */
2680a572655SStefan Roese if (!ret)
2690a572655SStefan Roese ret = err;
2700a572655SStefan Roese } else
2710a572655SStefan Roese return err;
2720a572655SStefan Roese }
2730a572655SStefan Roese
2740a572655SStefan Roese if (devops.datbuf) {
2750a572655SStefan Roese devops.len = ops->len - ops->retlen;
2760a572655SStefan Roese if (!devops.len)
2770a572655SStefan Roese return ret;
2780a572655SStefan Roese devops.datbuf += devops.retlen;
2790a572655SStefan Roese }
2800a572655SStefan Roese if (devops.oobbuf) {
2810a572655SStefan Roese devops.ooblen = ops->ooblen - ops->oobretlen;
2820a572655SStefan Roese if (!devops.ooblen)
2830a572655SStefan Roese return ret;
2840a572655SStefan Roese devops.oobbuf += ops->oobretlen;
2850a572655SStefan Roese }
2860a572655SStefan Roese
2870a572655SStefan Roese from = 0;
2880a572655SStefan Roese }
2890a572655SStefan Roese return -EINVAL;
2900a572655SStefan Roese }
2910a572655SStefan Roese
2920a572655SStefan Roese static int
concat_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)2930a572655SStefan Roese concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
2940a572655SStefan Roese {
2950a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
2960a572655SStefan Roese struct mtd_oob_ops devops = *ops;
2970a572655SStefan Roese int i, err;
2980a572655SStefan Roese
2990a572655SStefan Roese if (!(mtd->flags & MTD_WRITEABLE))
3000a572655SStefan Roese return -EROFS;
3010a572655SStefan Roese
302ff94bc40SHeiko Schocher ops->retlen = ops->oobretlen = 0;
3030a572655SStefan Roese
3040a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
3050a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
3060a572655SStefan Roese
3070a572655SStefan Roese if (to >= subdev->size) {
3080a572655SStefan Roese to -= subdev->size;
3090a572655SStefan Roese continue;
3100a572655SStefan Roese }
3110a572655SStefan Roese
3120a572655SStefan Roese /* partial write ? */
3130a572655SStefan Roese if (to + devops.len > subdev->size)
3140a572655SStefan Roese devops.len = subdev->size - to;
3150a572655SStefan Roese
316dfe64e2cSSergey Lapin err = mtd_write_oob(subdev, to, &devops);
317ff94bc40SHeiko Schocher ops->retlen += devops.oobretlen;
3180a572655SStefan Roese if (err)
3190a572655SStefan Roese return err;
3200a572655SStefan Roese
3210a572655SStefan Roese if (devops.datbuf) {
3220a572655SStefan Roese devops.len = ops->len - ops->retlen;
3230a572655SStefan Roese if (!devops.len)
3240a572655SStefan Roese return 0;
3250a572655SStefan Roese devops.datbuf += devops.retlen;
3260a572655SStefan Roese }
3270a572655SStefan Roese if (devops.oobbuf) {
3280a572655SStefan Roese devops.ooblen = ops->ooblen - ops->oobretlen;
3290a572655SStefan Roese if (!devops.ooblen)
3300a572655SStefan Roese return 0;
3310a572655SStefan Roese devops.oobbuf += devops.oobretlen;
3320a572655SStefan Roese }
3330a572655SStefan Roese to = 0;
3340a572655SStefan Roese }
3350a572655SStefan Roese return -EINVAL;
3360a572655SStefan Roese }
3370a572655SStefan Roese
concat_erase_callback(struct erase_info * instr)3380a572655SStefan Roese static void concat_erase_callback(struct erase_info *instr)
3390a572655SStefan Roese {
3400a572655SStefan Roese /* Nothing to do here in U-Boot */
341ff94bc40SHeiko Schocher #ifndef __UBOOT__
342ff94bc40SHeiko Schocher wake_up((wait_queue_head_t *) instr->priv);
343ff94bc40SHeiko Schocher #endif
3440a572655SStefan Roese }
3450a572655SStefan Roese
concat_dev_erase(struct mtd_info * mtd,struct erase_info * erase)3460a572655SStefan Roese static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
3470a572655SStefan Roese {
3480a572655SStefan Roese int err;
3490a572655SStefan Roese wait_queue_head_t waitq;
3500a572655SStefan Roese DECLARE_WAITQUEUE(wait, current);
3510a572655SStefan Roese
3520a572655SStefan Roese /*
3530a572655SStefan Roese * This code was stol^H^H^H^Hinspired by mtdchar.c
3540a572655SStefan Roese */
3550a572655SStefan Roese init_waitqueue_head(&waitq);
3560a572655SStefan Roese
3570a572655SStefan Roese erase->mtd = mtd;
3580a572655SStefan Roese erase->callback = concat_erase_callback;
3590a572655SStefan Roese erase->priv = (unsigned long) &waitq;
3600a572655SStefan Roese
3610a572655SStefan Roese /*
3620a572655SStefan Roese * FIXME: Allow INTERRUPTIBLE. Which means
3630a572655SStefan Roese * not having the wait_queue head on the stack.
3640a572655SStefan Roese */
365dfe64e2cSSergey Lapin err = mtd_erase(mtd, erase);
3660a572655SStefan Roese if (!err) {
3670a572655SStefan Roese set_current_state(TASK_UNINTERRUPTIBLE);
3680a572655SStefan Roese add_wait_queue(&waitq, &wait);
3690a572655SStefan Roese if (erase->state != MTD_ERASE_DONE
3700a572655SStefan Roese && erase->state != MTD_ERASE_FAILED)
3710a572655SStefan Roese schedule();
3720a572655SStefan Roese remove_wait_queue(&waitq, &wait);
3730a572655SStefan Roese set_current_state(TASK_RUNNING);
3740a572655SStefan Roese
3750a572655SStefan Roese err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
3760a572655SStefan Roese }
3770a572655SStefan Roese return err;
3780a572655SStefan Roese }
3790a572655SStefan Roese
concat_erase(struct mtd_info * mtd,struct erase_info * instr)3800a572655SStefan Roese static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
3810a572655SStefan Roese {
3820a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
3830a572655SStefan Roese struct mtd_info *subdev;
3840a572655SStefan Roese int i, err;
3850a572655SStefan Roese uint64_t length, offset = 0;
3860a572655SStefan Roese struct erase_info *erase;
3870a572655SStefan Roese
3880a572655SStefan Roese /*
3890a572655SStefan Roese * Check for proper erase block alignment of the to-be-erased area.
3900a572655SStefan Roese * It is easier to do this based on the super device's erase
3910a572655SStefan Roese * region info rather than looking at each particular sub-device
3920a572655SStefan Roese * in turn.
3930a572655SStefan Roese */
3940a572655SStefan Roese if (!concat->mtd.numeraseregions) {
3950a572655SStefan Roese /* the easy case: device has uniform erase block size */
3960a572655SStefan Roese if (instr->addr & (concat->mtd.erasesize - 1))
3970a572655SStefan Roese return -EINVAL;
3980a572655SStefan Roese if (instr->len & (concat->mtd.erasesize - 1))
3990a572655SStefan Roese return -EINVAL;
4000a572655SStefan Roese } else {
4010a572655SStefan Roese /* device has variable erase size */
4020a572655SStefan Roese struct mtd_erase_region_info *erase_regions =
4030a572655SStefan Roese concat->mtd.eraseregions;
4040a572655SStefan Roese
4050a572655SStefan Roese /*
4060a572655SStefan Roese * Find the erase region where the to-be-erased area begins:
4070a572655SStefan Roese */
4080a572655SStefan Roese for (i = 0; i < concat->mtd.numeraseregions &&
4090a572655SStefan Roese instr->addr >= erase_regions[i].offset; i++) ;
4100a572655SStefan Roese --i;
4110a572655SStefan Roese
4120a572655SStefan Roese /*
4130a572655SStefan Roese * Now erase_regions[i] is the region in which the
4140a572655SStefan Roese * to-be-erased area begins. Verify that the starting
4150a572655SStefan Roese * offset is aligned to this region's erase size:
4160a572655SStefan Roese */
417ff94bc40SHeiko Schocher if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
4180a572655SStefan Roese return -EINVAL;
4190a572655SStefan Roese
4200a572655SStefan Roese /*
4210a572655SStefan Roese * now find the erase region where the to-be-erased area ends:
4220a572655SStefan Roese */
4230a572655SStefan Roese for (; i < concat->mtd.numeraseregions &&
4240a572655SStefan Roese (instr->addr + instr->len) >= erase_regions[i].offset;
4250a572655SStefan Roese ++i) ;
4260a572655SStefan Roese --i;
4270a572655SStefan Roese /*
4280a572655SStefan Roese * check if the ending offset is aligned to this region's erase size
4290a572655SStefan Roese */
430ff94bc40SHeiko Schocher if (i < 0 || ((instr->addr + instr->len) &
431ff94bc40SHeiko Schocher (erase_regions[i].erasesize - 1)))
4320a572655SStefan Roese return -EINVAL;
4330a572655SStefan Roese }
4340a572655SStefan Roese
4350a572655SStefan Roese /* make a local copy of instr to avoid modifying the caller's struct */
4360a572655SStefan Roese erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
4370a572655SStefan Roese
4380a572655SStefan Roese if (!erase)
4390a572655SStefan Roese return -ENOMEM;
4400a572655SStefan Roese
4410a572655SStefan Roese *erase = *instr;
4420a572655SStefan Roese length = instr->len;
4430a572655SStefan Roese
4440a572655SStefan Roese /*
4450a572655SStefan Roese * find the subdevice where the to-be-erased area begins, adjust
4460a572655SStefan Roese * starting offset to be relative to the subdevice start
4470a572655SStefan Roese */
4480a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
4490a572655SStefan Roese subdev = concat->subdev[i];
4500a572655SStefan Roese if (subdev->size <= erase->addr) {
4510a572655SStefan Roese erase->addr -= subdev->size;
4520a572655SStefan Roese offset += subdev->size;
4530a572655SStefan Roese } else {
4540a572655SStefan Roese break;
4550a572655SStefan Roese }
4560a572655SStefan Roese }
4570a572655SStefan Roese
4580a572655SStefan Roese /* must never happen since size limit has been verified above */
4590a572655SStefan Roese BUG_ON(i >= concat->num_subdev);
4600a572655SStefan Roese
4610a572655SStefan Roese /* now do the erase: */
4620a572655SStefan Roese err = 0;
4630a572655SStefan Roese for (; length > 0; i++) {
4640a572655SStefan Roese /* loop for all subdevices affected by this request */
4650a572655SStefan Roese subdev = concat->subdev[i]; /* get current subdevice */
4660a572655SStefan Roese
4670a572655SStefan Roese /* limit length to subdevice's size: */
4680a572655SStefan Roese if (erase->addr + length > subdev->size)
4690a572655SStefan Roese erase->len = subdev->size - erase->addr;
4700a572655SStefan Roese else
4710a572655SStefan Roese erase->len = length;
4720a572655SStefan Roese
4730a572655SStefan Roese length -= erase->len;
4740a572655SStefan Roese if ((err = concat_dev_erase(subdev, erase))) {
4750a572655SStefan Roese /* sanity check: should never happen since
4760a572655SStefan Roese * block alignment has been checked above */
4770a572655SStefan Roese BUG_ON(err == -EINVAL);
4780a572655SStefan Roese if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
4790a572655SStefan Roese instr->fail_addr = erase->fail_addr + offset;
4800a572655SStefan Roese break;
4810a572655SStefan Roese }
4820a572655SStefan Roese /*
4830a572655SStefan Roese * erase->addr specifies the offset of the area to be
4840a572655SStefan Roese * erased *within the current subdevice*. It can be
4850a572655SStefan Roese * non-zero only the first time through this loop, i.e.
4860a572655SStefan Roese * for the first subdevice where blocks need to be erased.
4870a572655SStefan Roese * All the following erases must begin at the start of the
4880a572655SStefan Roese * current subdevice, i.e. at offset zero.
4890a572655SStefan Roese */
4900a572655SStefan Roese erase->addr = 0;
4910a572655SStefan Roese offset += subdev->size;
4920a572655SStefan Roese }
4930a572655SStefan Roese instr->state = erase->state;
4940a572655SStefan Roese kfree(erase);
4950a572655SStefan Roese if (err)
4960a572655SStefan Roese return err;
4970a572655SStefan Roese
4980a572655SStefan Roese if (instr->callback)
4990a572655SStefan Roese instr->callback(instr);
5000a572655SStefan Roese return 0;
5010a572655SStefan Roese }
5020a572655SStefan Roese
concat_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)5030a572655SStefan Roese static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
5040a572655SStefan Roese {
5050a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
5060a572655SStefan Roese int i, err = -EINVAL;
5070a572655SStefan Roese
5080a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
5090a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
5100a572655SStefan Roese uint64_t size;
5110a572655SStefan Roese
5120a572655SStefan Roese if (ofs >= subdev->size) {
5130a572655SStefan Roese size = 0;
5140a572655SStefan Roese ofs -= subdev->size;
5150a572655SStefan Roese continue;
5160a572655SStefan Roese }
5170a572655SStefan Roese if (ofs + len > subdev->size)
5180a572655SStefan Roese size = subdev->size - ofs;
5190a572655SStefan Roese else
5200a572655SStefan Roese size = len;
5210a572655SStefan Roese
522dfe64e2cSSergey Lapin err = mtd_lock(subdev, ofs, size);
5230a572655SStefan Roese if (err)
5240a572655SStefan Roese break;
5250a572655SStefan Roese
5260a572655SStefan Roese len -= size;
5270a572655SStefan Roese if (len == 0)
5280a572655SStefan Roese break;
5290a572655SStefan Roese
5300a572655SStefan Roese err = -EINVAL;
5310a572655SStefan Roese ofs = 0;
5320a572655SStefan Roese }
5330a572655SStefan Roese
5340a572655SStefan Roese return err;
5350a572655SStefan Roese }
5360a572655SStefan Roese
concat_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)5370a572655SStefan Roese static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
5380a572655SStefan Roese {
5390a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
5400a572655SStefan Roese int i, err = 0;
5410a572655SStefan Roese
5420a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
5430a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
5440a572655SStefan Roese uint64_t size;
5450a572655SStefan Roese
5460a572655SStefan Roese if (ofs >= subdev->size) {
5470a572655SStefan Roese size = 0;
5480a572655SStefan Roese ofs -= subdev->size;
5490a572655SStefan Roese continue;
5500a572655SStefan Roese }
5510a572655SStefan Roese if (ofs + len > subdev->size)
5520a572655SStefan Roese size = subdev->size - ofs;
5530a572655SStefan Roese else
5540a572655SStefan Roese size = len;
5550a572655SStefan Roese
556dfe64e2cSSergey Lapin err = mtd_unlock(subdev, ofs, size);
5570a572655SStefan Roese if (err)
5580a572655SStefan Roese break;
5590a572655SStefan Roese
5600a572655SStefan Roese len -= size;
5610a572655SStefan Roese if (len == 0)
5620a572655SStefan Roese break;
5630a572655SStefan Roese
5640a572655SStefan Roese err = -EINVAL;
5650a572655SStefan Roese ofs = 0;
5660a572655SStefan Roese }
5670a572655SStefan Roese
5680a572655SStefan Roese return err;
5690a572655SStefan Roese }
5700a572655SStefan Roese
concat_sync(struct mtd_info * mtd)5710a572655SStefan Roese static void concat_sync(struct mtd_info *mtd)
5720a572655SStefan Roese {
5730a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
5740a572655SStefan Roese int i;
5750a572655SStefan Roese
5760a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
5770a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
578dfe64e2cSSergey Lapin mtd_sync(subdev);
5790a572655SStefan Roese }
5800a572655SStefan Roese }
5810a572655SStefan Roese
582ff94bc40SHeiko Schocher #ifndef __UBOOT__
concat_suspend(struct mtd_info * mtd)583ff94bc40SHeiko Schocher static int concat_suspend(struct mtd_info *mtd)
584ff94bc40SHeiko Schocher {
585ff94bc40SHeiko Schocher struct mtd_concat *concat = CONCAT(mtd);
586ff94bc40SHeiko Schocher int i, rc = 0;
587ff94bc40SHeiko Schocher
588ff94bc40SHeiko Schocher for (i = 0; i < concat->num_subdev; i++) {
589ff94bc40SHeiko Schocher struct mtd_info *subdev = concat->subdev[i];
590ff94bc40SHeiko Schocher if ((rc = mtd_suspend(subdev)) < 0)
591ff94bc40SHeiko Schocher return rc;
592ff94bc40SHeiko Schocher }
593ff94bc40SHeiko Schocher return rc;
594ff94bc40SHeiko Schocher }
595ff94bc40SHeiko Schocher
concat_resume(struct mtd_info * mtd)596ff94bc40SHeiko Schocher static void concat_resume(struct mtd_info *mtd)
597ff94bc40SHeiko Schocher {
598ff94bc40SHeiko Schocher struct mtd_concat *concat = CONCAT(mtd);
599ff94bc40SHeiko Schocher int i;
600ff94bc40SHeiko Schocher
601ff94bc40SHeiko Schocher for (i = 0; i < concat->num_subdev; i++) {
602ff94bc40SHeiko Schocher struct mtd_info *subdev = concat->subdev[i];
603ff94bc40SHeiko Schocher mtd_resume(subdev);
604ff94bc40SHeiko Schocher }
605ff94bc40SHeiko Schocher }
606ff94bc40SHeiko Schocher #endif
607ff94bc40SHeiko Schocher
concat_block_isbad(struct mtd_info * mtd,loff_t ofs)6080a572655SStefan Roese static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
6090a572655SStefan Roese {
6100a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
6110a572655SStefan Roese int i, res = 0;
6120a572655SStefan Roese
613dfe64e2cSSergey Lapin if (!mtd_can_have_bb(concat->subdev[0]))
6140a572655SStefan Roese return res;
6150a572655SStefan Roese
6160a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
6170a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
6180a572655SStefan Roese
6190a572655SStefan Roese if (ofs >= subdev->size) {
6200a572655SStefan Roese ofs -= subdev->size;
6210a572655SStefan Roese continue;
6220a572655SStefan Roese }
6230a572655SStefan Roese
624dfe64e2cSSergey Lapin res = mtd_block_isbad(subdev, ofs);
6250a572655SStefan Roese break;
6260a572655SStefan Roese }
6270a572655SStefan Roese
6280a572655SStefan Roese return res;
6290a572655SStefan Roese }
6300a572655SStefan Roese
concat_block_markbad(struct mtd_info * mtd,loff_t ofs)6310a572655SStefan Roese static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
6320a572655SStefan Roese {
6330a572655SStefan Roese struct mtd_concat *concat = CONCAT(mtd);
6340a572655SStefan Roese int i, err = -EINVAL;
6350a572655SStefan Roese
6360a572655SStefan Roese for (i = 0; i < concat->num_subdev; i++) {
6370a572655SStefan Roese struct mtd_info *subdev = concat->subdev[i];
6380a572655SStefan Roese
6390a572655SStefan Roese if (ofs >= subdev->size) {
6400a572655SStefan Roese ofs -= subdev->size;
6410a572655SStefan Roese continue;
6420a572655SStefan Roese }
6430a572655SStefan Roese
644dfe64e2cSSergey Lapin err = mtd_block_markbad(subdev, ofs);
6450a572655SStefan Roese if (!err)
6460a572655SStefan Roese mtd->ecc_stats.badblocks++;
6470a572655SStefan Roese break;
6480a572655SStefan Roese }
6490a572655SStefan Roese
6500a572655SStefan Roese return err;
6510a572655SStefan Roese }
6520a572655SStefan Roese
6530a572655SStefan Roese /*
654ff94bc40SHeiko Schocher * try to support NOMMU mmaps on concatenated devices
655ff94bc40SHeiko Schocher * - we don't support subdev spanning as we can't guarantee it'll work
656ff94bc40SHeiko Schocher */
concat_get_unmapped_area(struct mtd_info * mtd,unsigned long len,unsigned long offset,unsigned long flags)657ff94bc40SHeiko Schocher static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
658ff94bc40SHeiko Schocher unsigned long len,
659ff94bc40SHeiko Schocher unsigned long offset,
660ff94bc40SHeiko Schocher unsigned long flags)
661ff94bc40SHeiko Schocher {
662ff94bc40SHeiko Schocher struct mtd_concat *concat = CONCAT(mtd);
663ff94bc40SHeiko Schocher int i;
664ff94bc40SHeiko Schocher
665ff94bc40SHeiko Schocher for (i = 0; i < concat->num_subdev; i++) {
666ff94bc40SHeiko Schocher struct mtd_info *subdev = concat->subdev[i];
667ff94bc40SHeiko Schocher
668ff94bc40SHeiko Schocher if (offset >= subdev->size) {
669ff94bc40SHeiko Schocher offset -= subdev->size;
670ff94bc40SHeiko Schocher continue;
671ff94bc40SHeiko Schocher }
672ff94bc40SHeiko Schocher
673ff94bc40SHeiko Schocher return mtd_get_unmapped_area(subdev, len, offset, flags);
674ff94bc40SHeiko Schocher }
675ff94bc40SHeiko Schocher
676ff94bc40SHeiko Schocher return (unsigned long) -ENOSYS;
677ff94bc40SHeiko Schocher }
678ff94bc40SHeiko Schocher
679ff94bc40SHeiko Schocher /*
6800a572655SStefan Roese * This function constructs a virtual MTD device by concatenating
6810a572655SStefan Roese * num_devs MTD devices. A pointer to the new device object is
6820a572655SStefan Roese * stored to *new_dev upon success. This function does _not_
6830a572655SStefan Roese * register any devices: this is the caller's responsibility.
6840a572655SStefan Roese */
mtd_concat_create(struct mtd_info * subdev[],int num_devs,const char * name)6850a572655SStefan Roese struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
6860a572655SStefan Roese int num_devs, /* number of subdevices */
687ff94bc40SHeiko Schocher #ifndef __UBOOT__
6880a572655SStefan Roese const char *name)
689ff94bc40SHeiko Schocher #else
690ff94bc40SHeiko Schocher char *name)
691ff94bc40SHeiko Schocher #endif
6920a572655SStefan Roese { /* name for the new device */
6930a572655SStefan Roese int i;
6940a572655SStefan Roese size_t size;
6950a572655SStefan Roese struct mtd_concat *concat;
6960a572655SStefan Roese uint32_t max_erasesize, curr_erasesize;
6970a572655SStefan Roese int num_erase_region;
698ff94bc40SHeiko Schocher int max_writebufsize = 0;
6990a572655SStefan Roese
7000a572655SStefan Roese debug("Concatenating MTD devices:\n");
7010a572655SStefan Roese for (i = 0; i < num_devs; i++)
702ff94bc40SHeiko Schocher printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
7030a572655SStefan Roese debug("into device \"%s\"\n", name);
7040a572655SStefan Roese
7050a572655SStefan Roese /* allocate the device structure */
7060a572655SStefan Roese size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
7070a572655SStefan Roese concat = kzalloc(size, GFP_KERNEL);
7080a572655SStefan Roese if (!concat) {
7090a572655SStefan Roese printk
7100a572655SStefan Roese ("memory allocation error while creating concatenated device \"%s\"\n",
7110a572655SStefan Roese name);
7120a572655SStefan Roese return NULL;
7130a572655SStefan Roese }
7140a572655SStefan Roese concat->subdev = (struct mtd_info **) (concat + 1);
7150a572655SStefan Roese
7160a572655SStefan Roese /*
7170a572655SStefan Roese * Set up the new "super" device's MTD object structure, check for
718ff94bc40SHeiko Schocher * incompatibilities between the subdevices.
7190a572655SStefan Roese */
7200a572655SStefan Roese concat->mtd.type = subdev[0]->type;
7210a572655SStefan Roese concat->mtd.flags = subdev[0]->flags;
7220a572655SStefan Roese concat->mtd.size = subdev[0]->size;
7230a572655SStefan Roese concat->mtd.erasesize = subdev[0]->erasesize;
7240a572655SStefan Roese concat->mtd.writesize = subdev[0]->writesize;
725ff94bc40SHeiko Schocher
726ff94bc40SHeiko Schocher for (i = 0; i < num_devs; i++)
727ff94bc40SHeiko Schocher if (max_writebufsize < subdev[i]->writebufsize)
728ff94bc40SHeiko Schocher max_writebufsize = subdev[i]->writebufsize;
729ff94bc40SHeiko Schocher concat->mtd.writebufsize = max_writebufsize;
730ff94bc40SHeiko Schocher
7310a572655SStefan Roese concat->mtd.subpage_sft = subdev[0]->subpage_sft;
7320a572655SStefan Roese concat->mtd.oobsize = subdev[0]->oobsize;
7330a572655SStefan Roese concat->mtd.oobavail = subdev[0]->oobavail;
734ff94bc40SHeiko Schocher #ifndef __UBOOT__
735ff94bc40SHeiko Schocher if (subdev[0]->_writev)
736ff94bc40SHeiko Schocher concat->mtd._writev = concat_writev;
737ff94bc40SHeiko Schocher #endif
738dfe64e2cSSergey Lapin if (subdev[0]->_read_oob)
739dfe64e2cSSergey Lapin concat->mtd._read_oob = concat_read_oob;
740dfe64e2cSSergey Lapin if (subdev[0]->_write_oob)
741dfe64e2cSSergey Lapin concat->mtd._write_oob = concat_write_oob;
742dfe64e2cSSergey Lapin if (subdev[0]->_block_isbad)
743dfe64e2cSSergey Lapin concat->mtd._block_isbad = concat_block_isbad;
744dfe64e2cSSergey Lapin if (subdev[0]->_block_markbad)
745dfe64e2cSSergey Lapin concat->mtd._block_markbad = concat_block_markbad;
7460a572655SStefan Roese
7470a572655SStefan Roese concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
7480a572655SStefan Roese
749ff94bc40SHeiko Schocher #ifndef __UBOOT__
750ff94bc40SHeiko Schocher concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
751ff94bc40SHeiko Schocher #endif
752ff94bc40SHeiko Schocher
7530a572655SStefan Roese concat->subdev[0] = subdev[0];
7540a572655SStefan Roese
7550a572655SStefan Roese for (i = 1; i < num_devs; i++) {
7560a572655SStefan Roese if (concat->mtd.type != subdev[i]->type) {
7570a572655SStefan Roese kfree(concat);
7580a572655SStefan Roese printk("Incompatible device type on \"%s\"\n",
7590a572655SStefan Roese subdev[i]->name);
7600a572655SStefan Roese return NULL;
7610a572655SStefan Roese }
7620a572655SStefan Roese if (concat->mtd.flags != subdev[i]->flags) {
7630a572655SStefan Roese /*
7640a572655SStefan Roese * Expect all flags except MTD_WRITEABLE to be
7650a572655SStefan Roese * equal on all subdevices.
7660a572655SStefan Roese */
7670a572655SStefan Roese if ((concat->mtd.flags ^ subdev[i]->
7680a572655SStefan Roese flags) & ~MTD_WRITEABLE) {
7690a572655SStefan Roese kfree(concat);
7700a572655SStefan Roese printk("Incompatible device flags on \"%s\"\n",
7710a572655SStefan Roese subdev[i]->name);
7720a572655SStefan Roese return NULL;
7730a572655SStefan Roese } else
7740a572655SStefan Roese /* if writeable attribute differs,
7750a572655SStefan Roese make super device writeable */
7760a572655SStefan Roese concat->mtd.flags |=
7770a572655SStefan Roese subdev[i]->flags & MTD_WRITEABLE;
7780a572655SStefan Roese }
7790a572655SStefan Roese
780ff94bc40SHeiko Schocher #ifndef __UBOOT__
781ff94bc40SHeiko Schocher /* only permit direct mapping if the BDIs are all the same
782ff94bc40SHeiko Schocher * - copy-mapping is still permitted
783ff94bc40SHeiko Schocher */
784ff94bc40SHeiko Schocher if (concat->mtd.backing_dev_info !=
785ff94bc40SHeiko Schocher subdev[i]->backing_dev_info)
786ff94bc40SHeiko Schocher concat->mtd.backing_dev_info =
787ff94bc40SHeiko Schocher &default_backing_dev_info;
788ff94bc40SHeiko Schocher #endif
789ff94bc40SHeiko Schocher
7900a572655SStefan Roese concat->mtd.size += subdev[i]->size;
7910a572655SStefan Roese concat->mtd.ecc_stats.badblocks +=
7920a572655SStefan Roese subdev[i]->ecc_stats.badblocks;
7930a572655SStefan Roese if (concat->mtd.writesize != subdev[i]->writesize ||
7940a572655SStefan Roese concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
7950a572655SStefan Roese concat->mtd.oobsize != subdev[i]->oobsize ||
796dfe64e2cSSergey Lapin !concat->mtd._read_oob != !subdev[i]->_read_oob ||
797dfe64e2cSSergey Lapin !concat->mtd._write_oob != !subdev[i]->_write_oob) {
7980a572655SStefan Roese kfree(concat);
7990a572655SStefan Roese printk("Incompatible OOB or ECC data on \"%s\"\n",
8000a572655SStefan Roese subdev[i]->name);
8010a572655SStefan Roese return NULL;
8020a572655SStefan Roese }
8030a572655SStefan Roese concat->subdev[i] = subdev[i];
8040a572655SStefan Roese
8050a572655SStefan Roese }
8060a572655SStefan Roese
8070a572655SStefan Roese concat->mtd.ecclayout = subdev[0]->ecclayout;
8080a572655SStefan Roese
8090a572655SStefan Roese concat->num_subdev = num_devs;
8100a572655SStefan Roese concat->mtd.name = name;
8110a572655SStefan Roese
812dfe64e2cSSergey Lapin concat->mtd._erase = concat_erase;
813dfe64e2cSSergey Lapin concat->mtd._read = concat_read;
814dfe64e2cSSergey Lapin concat->mtd._write = concat_write;
815dfe64e2cSSergey Lapin concat->mtd._sync = concat_sync;
816dfe64e2cSSergey Lapin concat->mtd._lock = concat_lock;
817dfe64e2cSSergey Lapin concat->mtd._unlock = concat_unlock;
818ff94bc40SHeiko Schocher #ifndef __UBOOT__
819ff94bc40SHeiko Schocher concat->mtd._suspend = concat_suspend;
820ff94bc40SHeiko Schocher concat->mtd._resume = concat_resume;
821ff94bc40SHeiko Schocher #endif
822ff94bc40SHeiko Schocher concat->mtd._get_unmapped_area = concat_get_unmapped_area;
8230a572655SStefan Roese
8240a572655SStefan Roese /*
8250a572655SStefan Roese * Combine the erase block size info of the subdevices:
8260a572655SStefan Roese *
8270a572655SStefan Roese * first, walk the map of the new device and see how
8280a572655SStefan Roese * many changes in erase size we have
8290a572655SStefan Roese */
8300a572655SStefan Roese max_erasesize = curr_erasesize = subdev[0]->erasesize;
8310a572655SStefan Roese num_erase_region = 1;
8320a572655SStefan Roese for (i = 0; i < num_devs; i++) {
8330a572655SStefan Roese if (subdev[i]->numeraseregions == 0) {
8340a572655SStefan Roese /* current subdevice has uniform erase size */
8350a572655SStefan Roese if (subdev[i]->erasesize != curr_erasesize) {
8360a572655SStefan Roese /* if it differs from the last subdevice's erase size, count it */
8370a572655SStefan Roese ++num_erase_region;
8380a572655SStefan Roese curr_erasesize = subdev[i]->erasesize;
8390a572655SStefan Roese if (curr_erasesize > max_erasesize)
8400a572655SStefan Roese max_erasesize = curr_erasesize;
8410a572655SStefan Roese }
8420a572655SStefan Roese } else {
8430a572655SStefan Roese /* current subdevice has variable erase size */
8440a572655SStefan Roese int j;
8450a572655SStefan Roese for (j = 0; j < subdev[i]->numeraseregions; j++) {
8460a572655SStefan Roese
8470a572655SStefan Roese /* walk the list of erase regions, count any changes */
8480a572655SStefan Roese if (subdev[i]->eraseregions[j].erasesize !=
8490a572655SStefan Roese curr_erasesize) {
8500a572655SStefan Roese ++num_erase_region;
8510a572655SStefan Roese curr_erasesize =
8520a572655SStefan Roese subdev[i]->eraseregions[j].
8530a572655SStefan Roese erasesize;
8540a572655SStefan Roese if (curr_erasesize > max_erasesize)
8550a572655SStefan Roese max_erasesize = curr_erasesize;
8560a572655SStefan Roese }
8570a572655SStefan Roese }
8580a572655SStefan Roese }
8590a572655SStefan Roese }
8600a572655SStefan Roese
8610a572655SStefan Roese if (num_erase_region == 1) {
8620a572655SStefan Roese /*
8630a572655SStefan Roese * All subdevices have the same uniform erase size.
8640a572655SStefan Roese * This is easy:
8650a572655SStefan Roese */
8660a572655SStefan Roese concat->mtd.erasesize = curr_erasesize;
8670a572655SStefan Roese concat->mtd.numeraseregions = 0;
8680a572655SStefan Roese } else {
8690a572655SStefan Roese uint64_t tmp64;
8700a572655SStefan Roese
8710a572655SStefan Roese /*
8720a572655SStefan Roese * erase block size varies across the subdevices: allocate
8730a572655SStefan Roese * space to store the data describing the variable erase regions
8740a572655SStefan Roese */
8750a572655SStefan Roese struct mtd_erase_region_info *erase_region_p;
8760a572655SStefan Roese uint64_t begin, position;
8770a572655SStefan Roese
8780a572655SStefan Roese concat->mtd.erasesize = max_erasesize;
8790a572655SStefan Roese concat->mtd.numeraseregions = num_erase_region;
8800a572655SStefan Roese concat->mtd.eraseregions = erase_region_p =
8810a572655SStefan Roese kmalloc(num_erase_region *
8820a572655SStefan Roese sizeof (struct mtd_erase_region_info), GFP_KERNEL);
8830a572655SStefan Roese if (!erase_region_p) {
8840a572655SStefan Roese kfree(concat);
8850a572655SStefan Roese printk
8860a572655SStefan Roese ("memory allocation error while creating erase region list"
8870a572655SStefan Roese " for device \"%s\"\n", name);
8880a572655SStefan Roese return NULL;
8890a572655SStefan Roese }
8900a572655SStefan Roese
8910a572655SStefan Roese /*
8920a572655SStefan Roese * walk the map of the new device once more and fill in
8930a572655SStefan Roese * in erase region info:
8940a572655SStefan Roese */
8950a572655SStefan Roese curr_erasesize = subdev[0]->erasesize;
8960a572655SStefan Roese begin = position = 0;
8970a572655SStefan Roese for (i = 0; i < num_devs; i++) {
8980a572655SStefan Roese if (subdev[i]->numeraseregions == 0) {
8990a572655SStefan Roese /* current subdevice has uniform erase size */
9000a572655SStefan Roese if (subdev[i]->erasesize != curr_erasesize) {
9010a572655SStefan Roese /*
9020a572655SStefan Roese * fill in an mtd_erase_region_info structure for the area
9030a572655SStefan Roese * we have walked so far:
9040a572655SStefan Roese */
9050a572655SStefan Roese erase_region_p->offset = begin;
9060a572655SStefan Roese erase_region_p->erasesize =
9070a572655SStefan Roese curr_erasesize;
9080a572655SStefan Roese tmp64 = position - begin;
9090a572655SStefan Roese do_div(tmp64, curr_erasesize);
9100a572655SStefan Roese erase_region_p->numblocks = tmp64;
9110a572655SStefan Roese begin = position;
9120a572655SStefan Roese
9130a572655SStefan Roese curr_erasesize = subdev[i]->erasesize;
9140a572655SStefan Roese ++erase_region_p;
9150a572655SStefan Roese }
9160a572655SStefan Roese position += subdev[i]->size;
9170a572655SStefan Roese } else {
9180a572655SStefan Roese /* current subdevice has variable erase size */
9190a572655SStefan Roese int j;
9200a572655SStefan Roese for (j = 0; j < subdev[i]->numeraseregions; j++) {
9210a572655SStefan Roese /* walk the list of erase regions, count any changes */
9220a572655SStefan Roese if (subdev[i]->eraseregions[j].
9230a572655SStefan Roese erasesize != curr_erasesize) {
9240a572655SStefan Roese erase_region_p->offset = begin;
9250a572655SStefan Roese erase_region_p->erasesize =
9260a572655SStefan Roese curr_erasesize;
9270a572655SStefan Roese tmp64 = position - begin;
9280a572655SStefan Roese do_div(tmp64, curr_erasesize);
9290a572655SStefan Roese erase_region_p->numblocks = tmp64;
9300a572655SStefan Roese begin = position;
9310a572655SStefan Roese
9320a572655SStefan Roese curr_erasesize =
9330a572655SStefan Roese subdev[i]->eraseregions[j].
9340a572655SStefan Roese erasesize;
9350a572655SStefan Roese ++erase_region_p;
9360a572655SStefan Roese }
9370a572655SStefan Roese position +=
9380a572655SStefan Roese subdev[i]->eraseregions[j].
9390a572655SStefan Roese numblocks * (uint64_t)curr_erasesize;
9400a572655SStefan Roese }
9410a572655SStefan Roese }
9420a572655SStefan Roese }
9430a572655SStefan Roese /* Now write the final entry */
9440a572655SStefan Roese erase_region_p->offset = begin;
9450a572655SStefan Roese erase_region_p->erasesize = curr_erasesize;
9460a572655SStefan Roese tmp64 = position - begin;
9470a572655SStefan Roese do_div(tmp64, curr_erasesize);
9480a572655SStefan Roese erase_region_p->numblocks = tmp64;
9490a572655SStefan Roese }
9500a572655SStefan Roese
9510a572655SStefan Roese return &concat->mtd;
9520a572655SStefan Roese }
953ff94bc40SHeiko Schocher
954ff94bc40SHeiko Schocher /*
955ff94bc40SHeiko Schocher * This function destroys an MTD object obtained from concat_mtd_devs()
956ff94bc40SHeiko Schocher */
957ff94bc40SHeiko Schocher
mtd_concat_destroy(struct mtd_info * mtd)958ff94bc40SHeiko Schocher void mtd_concat_destroy(struct mtd_info *mtd)
959ff94bc40SHeiko Schocher {
960ff94bc40SHeiko Schocher struct mtd_concat *concat = CONCAT(mtd);
961ff94bc40SHeiko Schocher if (concat->mtd.numeraseregions)
962ff94bc40SHeiko Schocher kfree(concat->mtd.eraseregions);
963ff94bc40SHeiko Schocher kfree(concat);
964ff94bc40SHeiko Schocher }
965ff94bc40SHeiko Schocher
966ff94bc40SHeiko Schocher EXPORT_SYMBOL(mtd_concat_create);
967ff94bc40SHeiko Schocher EXPORT_SYMBOL(mtd_concat_destroy);
968ff94bc40SHeiko Schocher
969ff94bc40SHeiko Schocher MODULE_LICENSE("GPL");
970ff94bc40SHeiko Schocher MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
971ff94bc40SHeiko Schocher MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
972