xref: /openbmc/u-boot/drivers/mtd/mtdconcat.c (revision 7d3ca0f8)
1 /*
2  * MTD device concatenation layer
3  *
4  * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
5  *
6  * NAND support by Christian Gan <cgan@iders.ca>
7  *
8  * This code is GPL
9  */
10 
11 #include <linux/mtd/mtd.h>
12 #include <linux/compat.h>
13 #include <linux/mtd/concat.h>
14 #include <ubi_uboot.h>
15 
16 /*
17  * Our storage structure:
18  * Subdev points to an array of pointers to struct mtd_info objects
19  * which is allocated along with this structure
20  *
21  */
22 struct mtd_concat {
23 	struct mtd_info mtd;
24 	int num_subdev;
25 	struct mtd_info **subdev;
26 };
27 
28 /*
29  * how to calculate the size required for the above structure,
30  * including the pointer array subdev points to:
31  */
32 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev)	\
33 	((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
34 
35 /*
36  * Given a pointer to the MTD object in the mtd_concat structure,
37  * we can retrieve the pointer to that structure with this macro.
38  */
39 #define CONCAT(x)  ((struct mtd_concat *)(x))
40 
41 /*
42  * MTD methods which look up the relevant subdevice, translate the
43  * effective address and pass through to the subdevice.
44  */
45 
46 static int
47 concat_read(struct mtd_info *mtd, loff_t from, size_t len,
48 	    size_t * retlen, u_char * buf)
49 {
50 	struct mtd_concat *concat = CONCAT(mtd);
51 	int ret = 0, err;
52 	int i;
53 
54 	*retlen = 0;
55 
56 	for (i = 0; i < concat->num_subdev; i++) {
57 		struct mtd_info *subdev = concat->subdev[i];
58 		size_t size, retsize;
59 
60 		if (from >= subdev->size) {
61 			/* Not destined for this subdev */
62 			size = 0;
63 			from -= subdev->size;
64 			continue;
65 		}
66 		if (from + len > subdev->size)
67 			/* First part goes into this subdev */
68 			size = subdev->size - from;
69 		else
70 			/* Entire transaction goes into this subdev */
71 			size = len;
72 
73 		err = mtd_read(subdev, from, size, &retsize, buf);
74 
75 		/* Save information about bitflips! */
76 		if (unlikely(err)) {
77 			if (mtd_is_eccerr(err)) {
78 				mtd->ecc_stats.failed++;
79 				ret = err;
80 			} else if (mtd_is_bitflip(err)) {
81 				mtd->ecc_stats.corrected++;
82 				/* Do not overwrite -EBADMSG !! */
83 				if (!ret)
84 					ret = err;
85 			} else
86 				return err;
87 		}
88 
89 		*retlen += retsize;
90 		len -= size;
91 		if (len == 0)
92 			return ret;
93 
94 		buf += size;
95 		from = 0;
96 	}
97 	return -EINVAL;
98 }
99 
100 static int
101 concat_write(struct mtd_info *mtd, loff_t to, size_t len,
102 	     size_t * retlen, const u_char * buf)
103 {
104 	struct mtd_concat *concat = CONCAT(mtd);
105 	int err = -EINVAL;
106 	int i;
107 
108 	*retlen = 0;
109 
110 	for (i = 0; i < concat->num_subdev; i++) {
111 		struct mtd_info *subdev = concat->subdev[i];
112 		size_t size, retsize;
113 
114 		if (to >= subdev->size) {
115 			size = 0;
116 			to -= subdev->size;
117 			continue;
118 		}
119 		if (to + len > subdev->size)
120 			size = subdev->size - to;
121 		else
122 			size = len;
123 
124 		err = mtd_write(subdev, to, size, &retsize, buf);
125 		if (err)
126 			break;
127 
128 		*retlen += retsize;
129 		len -= size;
130 		if (len == 0)
131 			break;
132 
133 		err = -EINVAL;
134 		buf += size;
135 		to = 0;
136 	}
137 	return err;
138 }
139 
140 static int
141 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
142 {
143 	struct mtd_concat *concat = CONCAT(mtd);
144 	struct mtd_oob_ops devops = *ops;
145 	int i, err, ret = 0;
146 
147 	ops->retlen = ops->oobretlen = 0;
148 
149 	for (i = 0; i < concat->num_subdev; i++) {
150 		struct mtd_info *subdev = concat->subdev[i];
151 
152 		if (from >= subdev->size) {
153 			from -= subdev->size;
154 			continue;
155 		}
156 
157 		/* partial read ? */
158 		if (from + devops.len > subdev->size)
159 			devops.len = subdev->size - from;
160 
161 		err = mtd_read_oob(subdev, from, &devops);
162 		ops->retlen += devops.retlen;
163 		ops->oobretlen += devops.oobretlen;
164 
165 		/* Save information about bitflips! */
166 		if (unlikely(err)) {
167 			if (mtd_is_eccerr(err)) {
168 				mtd->ecc_stats.failed++;
169 				ret = err;
170 			} else if (mtd_is_bitflip(err)) {
171 				mtd->ecc_stats.corrected++;
172 				/* Do not overwrite -EBADMSG !! */
173 				if (!ret)
174 					ret = err;
175 			} else
176 				return err;
177 		}
178 
179 		if (devops.datbuf) {
180 			devops.len = ops->len - ops->retlen;
181 			if (!devops.len)
182 				return ret;
183 			devops.datbuf += devops.retlen;
184 		}
185 		if (devops.oobbuf) {
186 			devops.ooblen = ops->ooblen - ops->oobretlen;
187 			if (!devops.ooblen)
188 				return ret;
189 			devops.oobbuf += ops->oobretlen;
190 		}
191 
192 		from = 0;
193 	}
194 	return -EINVAL;
195 }
196 
197 static int
198 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
199 {
200 	struct mtd_concat *concat = CONCAT(mtd);
201 	struct mtd_oob_ops devops = *ops;
202 	int i, err;
203 
204 	if (!(mtd->flags & MTD_WRITEABLE))
205 		return -EROFS;
206 
207 	ops->retlen = 0;
208 
209 	for (i = 0; i < concat->num_subdev; i++) {
210 		struct mtd_info *subdev = concat->subdev[i];
211 
212 		if (to >= subdev->size) {
213 			to -= subdev->size;
214 			continue;
215 		}
216 
217 		/* partial write ? */
218 		if (to + devops.len > subdev->size)
219 			devops.len = subdev->size - to;
220 
221 		err = mtd_write_oob(subdev, to, &devops);
222 		ops->retlen += devops.retlen;
223 		if (err)
224 			return err;
225 
226 		if (devops.datbuf) {
227 			devops.len = ops->len - ops->retlen;
228 			if (!devops.len)
229 				return 0;
230 			devops.datbuf += devops.retlen;
231 		}
232 		if (devops.oobbuf) {
233 			devops.ooblen = ops->ooblen - ops->oobretlen;
234 			if (!devops.ooblen)
235 				return 0;
236 			devops.oobbuf += devops.oobretlen;
237 		}
238 		to = 0;
239 	}
240 	return -EINVAL;
241 }
242 
243 static void concat_erase_callback(struct erase_info *instr)
244 {
245 	/* Nothing to do here in U-Boot */
246 }
247 
248 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
249 {
250 	int err;
251 	wait_queue_head_t waitq;
252 	DECLARE_WAITQUEUE(wait, current);
253 
254 	/*
255 	 * This code was stol^H^H^H^Hinspired by mtdchar.c
256 	 */
257 	init_waitqueue_head(&waitq);
258 
259 	erase->mtd = mtd;
260 	erase->callback = concat_erase_callback;
261 	erase->priv = (unsigned long) &waitq;
262 
263 	/*
264 	 * FIXME: Allow INTERRUPTIBLE. Which means
265 	 * not having the wait_queue head on the stack.
266 	 */
267 	err = mtd_erase(mtd, erase);
268 	if (!err) {
269 		set_current_state(TASK_UNINTERRUPTIBLE);
270 		add_wait_queue(&waitq, &wait);
271 		if (erase->state != MTD_ERASE_DONE
272 		    && erase->state != MTD_ERASE_FAILED)
273 			schedule();
274 		remove_wait_queue(&waitq, &wait);
275 		set_current_state(TASK_RUNNING);
276 
277 		err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
278 	}
279 	return err;
280 }
281 
282 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
283 {
284 	struct mtd_concat *concat = CONCAT(mtd);
285 	struct mtd_info *subdev;
286 	int i, err;
287 	uint64_t length, offset = 0;
288 	struct erase_info *erase;
289 
290 	/*
291 	 * Check for proper erase block alignment of the to-be-erased area.
292 	 * It is easier to do this based on the super device's erase
293 	 * region info rather than looking at each particular sub-device
294 	 * in turn.
295 	 */
296 	if (!concat->mtd.numeraseregions) {
297 		/* the easy case: device has uniform erase block size */
298 		if (instr->addr & (concat->mtd.erasesize - 1))
299 			return -EINVAL;
300 		if (instr->len & (concat->mtd.erasesize - 1))
301 			return -EINVAL;
302 	} else {
303 		/* device has variable erase size */
304 		struct mtd_erase_region_info *erase_regions =
305 		    concat->mtd.eraseregions;
306 
307 		/*
308 		 * Find the erase region where the to-be-erased area begins:
309 		 */
310 		for (i = 0; i < concat->mtd.numeraseregions &&
311 		     instr->addr >= erase_regions[i].offset; i++) ;
312 		--i;
313 
314 		/*
315 		 * Now erase_regions[i] is the region in which the
316 		 * to-be-erased area begins. Verify that the starting
317 		 * offset is aligned to this region's erase size:
318 		 */
319 		if (instr->addr & (erase_regions[i].erasesize - 1))
320 			return -EINVAL;
321 
322 		/*
323 		 * now find the erase region where the to-be-erased area ends:
324 		 */
325 		for (; i < concat->mtd.numeraseregions &&
326 		     (instr->addr + instr->len) >= erase_regions[i].offset;
327 		     ++i) ;
328 		--i;
329 		/*
330 		 * check if the ending offset is aligned to this region's erase size
331 		 */
332 		if ((instr->addr + instr->len) & (erase_regions[i].erasesize -
333 						  1))
334 			return -EINVAL;
335 	}
336 
337 	/* make a local copy of instr to avoid modifying the caller's struct */
338 	erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
339 
340 	if (!erase)
341 		return -ENOMEM;
342 
343 	*erase = *instr;
344 	length = instr->len;
345 
346 	/*
347 	 * find the subdevice where the to-be-erased area begins, adjust
348 	 * starting offset to be relative to the subdevice start
349 	 */
350 	for (i = 0; i < concat->num_subdev; i++) {
351 		subdev = concat->subdev[i];
352 		if (subdev->size <= erase->addr) {
353 			erase->addr -= subdev->size;
354 			offset += subdev->size;
355 		} else {
356 			break;
357 		}
358 	}
359 
360 	/* must never happen since size limit has been verified above */
361 	BUG_ON(i >= concat->num_subdev);
362 
363 	/* now do the erase: */
364 	err = 0;
365 	for (; length > 0; i++) {
366 		/* loop for all subdevices affected by this request */
367 		subdev = concat->subdev[i];	/* get current subdevice */
368 
369 		/* limit length to subdevice's size: */
370 		if (erase->addr + length > subdev->size)
371 			erase->len = subdev->size - erase->addr;
372 		else
373 			erase->len = length;
374 
375 		length -= erase->len;
376 		if ((err = concat_dev_erase(subdev, erase))) {
377 			/* sanity check: should never happen since
378 			 * block alignment has been checked above */
379 			BUG_ON(err == -EINVAL);
380 			if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
381 				instr->fail_addr = erase->fail_addr + offset;
382 			break;
383 		}
384 		/*
385 		 * erase->addr specifies the offset of the area to be
386 		 * erased *within the current subdevice*. It can be
387 		 * non-zero only the first time through this loop, i.e.
388 		 * for the first subdevice where blocks need to be erased.
389 		 * All the following erases must begin at the start of the
390 		 * current subdevice, i.e. at offset zero.
391 		 */
392 		erase->addr = 0;
393 		offset += subdev->size;
394 	}
395 	instr->state = erase->state;
396 	kfree(erase);
397 	if (err)
398 		return err;
399 
400 	if (instr->callback)
401 		instr->callback(instr);
402 	return 0;
403 }
404 
405 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
406 {
407 	struct mtd_concat *concat = CONCAT(mtd);
408 	int i, err = -EINVAL;
409 
410 	for (i = 0; i < concat->num_subdev; i++) {
411 		struct mtd_info *subdev = concat->subdev[i];
412 		uint64_t size;
413 
414 		if (ofs >= subdev->size) {
415 			size = 0;
416 			ofs -= subdev->size;
417 			continue;
418 		}
419 		if (ofs + len > subdev->size)
420 			size = subdev->size - ofs;
421 		else
422 			size = len;
423 
424 		err = mtd_lock(subdev, ofs, size);
425 
426 		if (err)
427 			break;
428 
429 		len -= size;
430 		if (len == 0)
431 			break;
432 
433 		err = -EINVAL;
434 		ofs = 0;
435 	}
436 
437 	return err;
438 }
439 
440 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
441 {
442 	struct mtd_concat *concat = CONCAT(mtd);
443 	int i, err = 0;
444 
445 	for (i = 0; i < concat->num_subdev; i++) {
446 		struct mtd_info *subdev = concat->subdev[i];
447 		uint64_t size;
448 
449 		if (ofs >= subdev->size) {
450 			size = 0;
451 			ofs -= subdev->size;
452 			continue;
453 		}
454 		if (ofs + len > subdev->size)
455 			size = subdev->size - ofs;
456 		else
457 			size = len;
458 
459 		err = mtd_unlock(subdev, ofs, size);
460 
461 		if (err)
462 			break;
463 
464 		len -= size;
465 		if (len == 0)
466 			break;
467 
468 		err = -EINVAL;
469 		ofs = 0;
470 	}
471 
472 	return err;
473 }
474 
475 static void concat_sync(struct mtd_info *mtd)
476 {
477 	struct mtd_concat *concat = CONCAT(mtd);
478 	int i;
479 
480 	for (i = 0; i < concat->num_subdev; i++) {
481 		struct mtd_info *subdev = concat->subdev[i];
482 		mtd_sync(subdev);
483 	}
484 }
485 
486 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
487 {
488 	struct mtd_concat *concat = CONCAT(mtd);
489 	int i, res = 0;
490 
491 	if (!mtd_can_have_bb(concat->subdev[0]))
492 		return res;
493 
494 	for (i = 0; i < concat->num_subdev; i++) {
495 		struct mtd_info *subdev = concat->subdev[i];
496 
497 		if (ofs >= subdev->size) {
498 			ofs -= subdev->size;
499 			continue;
500 		}
501 
502 		res = mtd_block_isbad(subdev, ofs);
503 		break;
504 	}
505 
506 	return res;
507 }
508 
509 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
510 {
511 	struct mtd_concat *concat = CONCAT(mtd);
512 	int i, err = -EINVAL;
513 
514 	if (!mtd_can_have_bb(concat->subdev[0]))
515 		return 0;
516 
517 	for (i = 0; i < concat->num_subdev; i++) {
518 		struct mtd_info *subdev = concat->subdev[i];
519 
520 		if (ofs >= subdev->size) {
521 			ofs -= subdev->size;
522 			continue;
523 		}
524 
525 		err = mtd_block_markbad(subdev, ofs);
526 		if (!err)
527 			mtd->ecc_stats.badblocks++;
528 		break;
529 	}
530 
531 	return err;
532 }
533 
534 /*
535  * This function constructs a virtual MTD device by concatenating
536  * num_devs MTD devices. A pointer to the new device object is
537  * stored to *new_dev upon success. This function does _not_
538  * register any devices: this is the caller's responsibility.
539  */
540 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],	/* subdevices to concatenate */
541 				   int num_devs,	/* number of subdevices      */
542 				   const char *name)
543 {				/* name for the new device   */
544 	int i;
545 	size_t size;
546 	struct mtd_concat *concat;
547 	uint32_t max_erasesize, curr_erasesize;
548 	int num_erase_region;
549 
550 	debug("Concatenating MTD devices:\n");
551 	for (i = 0; i < num_devs; i++)
552 		debug("(%d): \"%s\"\n", i, subdev[i]->name);
553 	debug("into device \"%s\"\n", name);
554 
555 	/* allocate the device structure */
556 	size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
557 	concat = kzalloc(size, GFP_KERNEL);
558 	if (!concat) {
559 		printk
560 		    ("memory allocation error while creating concatenated device \"%s\"\n",
561 		     name);
562 		return NULL;
563 	}
564 	concat->subdev = (struct mtd_info **) (concat + 1);
565 
566 	/*
567 	 * Set up the new "super" device's MTD object structure, check for
568 	 * incompatibilites between the subdevices.
569 	 */
570 	concat->mtd.type = subdev[0]->type;
571 	concat->mtd.flags = subdev[0]->flags;
572 	concat->mtd.size = subdev[0]->size;
573 	concat->mtd.erasesize = subdev[0]->erasesize;
574 	concat->mtd.writesize = subdev[0]->writesize;
575 	concat->mtd.subpage_sft = subdev[0]->subpage_sft;
576 	concat->mtd.oobsize = subdev[0]->oobsize;
577 	concat->mtd.oobavail = subdev[0]->oobavail;
578 	if (subdev[0]->_read_oob)
579 		concat->mtd._read_oob = concat_read_oob;
580 	if (subdev[0]->_write_oob)
581 		concat->mtd._write_oob = concat_write_oob;
582 	if (subdev[0]->_block_isbad)
583 		concat->mtd._block_isbad = concat_block_isbad;
584 	if (subdev[0]->_block_markbad)
585 		concat->mtd._block_markbad = concat_block_markbad;
586 
587 	concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
588 
589 	concat->subdev[0] = subdev[0];
590 
591 	for (i = 1; i < num_devs; i++) {
592 		if (concat->mtd.type != subdev[i]->type) {
593 			kfree(concat);
594 			printk("Incompatible device type on \"%s\"\n",
595 			       subdev[i]->name);
596 			return NULL;
597 		}
598 		if (concat->mtd.flags != subdev[i]->flags) {
599 			/*
600 			 * Expect all flags except MTD_WRITEABLE to be
601 			 * equal on all subdevices.
602 			 */
603 			if ((concat->mtd.flags ^ subdev[i]->
604 			     flags) & ~MTD_WRITEABLE) {
605 				kfree(concat);
606 				printk("Incompatible device flags on \"%s\"\n",
607 				       subdev[i]->name);
608 				return NULL;
609 			} else
610 				/* if writeable attribute differs,
611 				   make super device writeable */
612 				concat->mtd.flags |=
613 				    subdev[i]->flags & MTD_WRITEABLE;
614 		}
615 
616 		concat->mtd.size += subdev[i]->size;
617 		concat->mtd.ecc_stats.badblocks +=
618 			subdev[i]->ecc_stats.badblocks;
619 		if (concat->mtd.writesize   !=  subdev[i]->writesize ||
620 		    concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
621 		    concat->mtd.oobsize    !=  subdev[i]->oobsize ||
622 		    !concat->mtd._read_oob  != !subdev[i]->_read_oob ||
623 		    !concat->mtd._write_oob != !subdev[i]->_write_oob) {
624 			kfree(concat);
625 			printk("Incompatible OOB or ECC data on \"%s\"\n",
626 			       subdev[i]->name);
627 			return NULL;
628 		}
629 		concat->subdev[i] = subdev[i];
630 
631 	}
632 
633 	concat->mtd.ecclayout = subdev[0]->ecclayout;
634 
635 	concat->num_subdev = num_devs;
636 	concat->mtd.name = name;
637 
638 	concat->mtd._erase = concat_erase;
639 	concat->mtd._read = concat_read;
640 	concat->mtd._write = concat_write;
641 	concat->mtd._sync = concat_sync;
642 	concat->mtd._lock = concat_lock;
643 	concat->mtd._unlock = concat_unlock;
644 
645 	/*
646 	 * Combine the erase block size info of the subdevices:
647 	 *
648 	 * first, walk the map of the new device and see how
649 	 * many changes in erase size we have
650 	 */
651 	max_erasesize = curr_erasesize = subdev[0]->erasesize;
652 	num_erase_region = 1;
653 	for (i = 0; i < num_devs; i++) {
654 		if (subdev[i]->numeraseregions == 0) {
655 			/* current subdevice has uniform erase size */
656 			if (subdev[i]->erasesize != curr_erasesize) {
657 				/* if it differs from the last subdevice's erase size, count it */
658 				++num_erase_region;
659 				curr_erasesize = subdev[i]->erasesize;
660 				if (curr_erasesize > max_erasesize)
661 					max_erasesize = curr_erasesize;
662 			}
663 		} else {
664 			/* current subdevice has variable erase size */
665 			int j;
666 			for (j = 0; j < subdev[i]->numeraseregions; j++) {
667 
668 				/* walk the list of erase regions, count any changes */
669 				if (subdev[i]->eraseregions[j].erasesize !=
670 				    curr_erasesize) {
671 					++num_erase_region;
672 					curr_erasesize =
673 					    subdev[i]->eraseregions[j].
674 					    erasesize;
675 					if (curr_erasesize > max_erasesize)
676 						max_erasesize = curr_erasesize;
677 				}
678 			}
679 		}
680 	}
681 
682 	if (num_erase_region == 1) {
683 		/*
684 		 * All subdevices have the same uniform erase size.
685 		 * This is easy:
686 		 */
687 		concat->mtd.erasesize = curr_erasesize;
688 		concat->mtd.numeraseregions = 0;
689 	} else {
690 		uint64_t tmp64;
691 
692 		/*
693 		 * erase block size varies across the subdevices: allocate
694 		 * space to store the data describing the variable erase regions
695 		 */
696 		struct mtd_erase_region_info *erase_region_p;
697 		uint64_t begin, position;
698 
699 		concat->mtd.erasesize = max_erasesize;
700 		concat->mtd.numeraseregions = num_erase_region;
701 		concat->mtd.eraseregions = erase_region_p =
702 		    kmalloc(num_erase_region *
703 			    sizeof (struct mtd_erase_region_info), GFP_KERNEL);
704 		if (!erase_region_p) {
705 			kfree(concat);
706 			printk
707 			    ("memory allocation error while creating erase region list"
708 			     " for device \"%s\"\n", name);
709 			return NULL;
710 		}
711 
712 		/*
713 		 * walk the map of the new device once more and fill in
714 		 * in erase region info:
715 		 */
716 		curr_erasesize = subdev[0]->erasesize;
717 		begin = position = 0;
718 		for (i = 0; i < num_devs; i++) {
719 			if (subdev[i]->numeraseregions == 0) {
720 				/* current subdevice has uniform erase size */
721 				if (subdev[i]->erasesize != curr_erasesize) {
722 					/*
723 					 *  fill in an mtd_erase_region_info structure for the area
724 					 *  we have walked so far:
725 					 */
726 					erase_region_p->offset = begin;
727 					erase_region_p->erasesize =
728 					    curr_erasesize;
729 					tmp64 = position - begin;
730 					do_div(tmp64, curr_erasesize);
731 					erase_region_p->numblocks = tmp64;
732 					begin = position;
733 
734 					curr_erasesize = subdev[i]->erasesize;
735 					++erase_region_p;
736 				}
737 				position += subdev[i]->size;
738 			} else {
739 				/* current subdevice has variable erase size */
740 				int j;
741 				for (j = 0; j < subdev[i]->numeraseregions; j++) {
742 					/* walk the list of erase regions, count any changes */
743 					if (subdev[i]->eraseregions[j].
744 					    erasesize != curr_erasesize) {
745 						erase_region_p->offset = begin;
746 						erase_region_p->erasesize =
747 						    curr_erasesize;
748 						tmp64 = position - begin;
749 						do_div(tmp64, curr_erasesize);
750 						erase_region_p->numblocks = tmp64;
751 						begin = position;
752 
753 						curr_erasesize =
754 						    subdev[i]->eraseregions[j].
755 						    erasesize;
756 						++erase_region_p;
757 					}
758 					position +=
759 					    subdev[i]->eraseregions[j].
760 					    numblocks * (uint64_t)curr_erasesize;
761 				}
762 			}
763 		}
764 		/* Now write the final entry */
765 		erase_region_p->offset = begin;
766 		erase_region_p->erasesize = curr_erasesize;
767 		tmp64 = position - begin;
768 		do_div(tmp64, curr_erasesize);
769 		erase_region_p->numblocks = tmp64;
770 	}
771 
772 	return &concat->mtd;
773 }
774