1 /* 2 * MTD device concatenation layer 3 * 4 * (C) 2002 Robert Kaiser <rkaiser@sysgo.de> 5 * 6 * NAND support by Christian Gan <cgan@iders.ca> 7 * 8 * This code is GPL 9 */ 10 11 #include <linux/mtd/mtd.h> 12 #include <linux/mtd/compat.h> 13 #include <linux/mtd/concat.h> 14 #include <ubi_uboot.h> 15 16 /* 17 * Our storage structure: 18 * Subdev points to an array of pointers to struct mtd_info objects 19 * which is allocated along with this structure 20 * 21 */ 22 struct mtd_concat { 23 struct mtd_info mtd; 24 int num_subdev; 25 struct mtd_info **subdev; 26 }; 27 28 /* 29 * how to calculate the size required for the above structure, 30 * including the pointer array subdev points to: 31 */ 32 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \ 33 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *))) 34 35 /* 36 * Given a pointer to the MTD object in the mtd_concat structure, 37 * we can retrieve the pointer to that structure with this macro. 38 */ 39 #define CONCAT(x) ((struct mtd_concat *)(x)) 40 41 /* 42 * MTD methods which look up the relevant subdevice, translate the 43 * effective address and pass through to the subdevice. 44 */ 45 46 static int 47 concat_read(struct mtd_info *mtd, loff_t from, size_t len, 48 size_t * retlen, u_char * buf) 49 { 50 struct mtd_concat *concat = CONCAT(mtd); 51 int ret = 0, err; 52 int i; 53 54 *retlen = 0; 55 56 for (i = 0; i < concat->num_subdev; i++) { 57 struct mtd_info *subdev = concat->subdev[i]; 58 size_t size, retsize; 59 60 if (from >= subdev->size) { 61 /* Not destined for this subdev */ 62 size = 0; 63 from -= subdev->size; 64 continue; 65 } 66 if (from + len > subdev->size) 67 /* First part goes into this subdev */ 68 size = subdev->size - from; 69 else 70 /* Entire transaction goes into this subdev */ 71 size = len; 72 73 err = subdev->read(subdev, from, size, &retsize, buf); 74 75 /* Save information about bitflips! */ 76 if (unlikely(err)) { 77 if (err == -EBADMSG) { 78 mtd->ecc_stats.failed++; 79 ret = err; 80 } else if (err == -EUCLEAN) { 81 mtd->ecc_stats.corrected++; 82 /* Do not overwrite -EBADMSG !! */ 83 if (!ret) 84 ret = err; 85 } else 86 return err; 87 } 88 89 *retlen += retsize; 90 len -= size; 91 if (len == 0) 92 return ret; 93 94 buf += size; 95 from = 0; 96 } 97 return -EINVAL; 98 } 99 100 static int 101 concat_write(struct mtd_info *mtd, loff_t to, size_t len, 102 size_t * retlen, const u_char * buf) 103 { 104 struct mtd_concat *concat = CONCAT(mtd); 105 int err = -EINVAL; 106 int i; 107 108 if (!(mtd->flags & MTD_WRITEABLE)) 109 return -EROFS; 110 111 *retlen = 0; 112 113 for (i = 0; i < concat->num_subdev; i++) { 114 struct mtd_info *subdev = concat->subdev[i]; 115 size_t size, retsize; 116 117 if (to >= subdev->size) { 118 size = 0; 119 to -= subdev->size; 120 continue; 121 } 122 if (to + len > subdev->size) 123 size = subdev->size - to; 124 else 125 size = len; 126 127 if (!(subdev->flags & MTD_WRITEABLE)) 128 err = -EROFS; 129 else 130 err = subdev->write(subdev, to, size, &retsize, buf); 131 132 if (err) 133 break; 134 135 *retlen += retsize; 136 len -= size; 137 if (len == 0) 138 break; 139 140 err = -EINVAL; 141 buf += size; 142 to = 0; 143 } 144 return err; 145 } 146 147 static int 148 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) 149 { 150 struct mtd_concat *concat = CONCAT(mtd); 151 struct mtd_oob_ops devops = *ops; 152 int i, err, ret = 0; 153 154 ops->retlen = ops->oobretlen = 0; 155 156 for (i = 0; i < concat->num_subdev; i++) { 157 struct mtd_info *subdev = concat->subdev[i]; 158 159 if (from >= subdev->size) { 160 from -= subdev->size; 161 continue; 162 } 163 164 /* partial read ? */ 165 if (from + devops.len > subdev->size) 166 devops.len = subdev->size - from; 167 168 err = subdev->read_oob(subdev, from, &devops); 169 ops->retlen += devops.retlen; 170 ops->oobretlen += devops.oobretlen; 171 172 /* Save information about bitflips! */ 173 if (unlikely(err)) { 174 if (err == -EBADMSG) { 175 mtd->ecc_stats.failed++; 176 ret = err; 177 } else if (err == -EUCLEAN) { 178 mtd->ecc_stats.corrected++; 179 /* Do not overwrite -EBADMSG !! */ 180 if (!ret) 181 ret = err; 182 } else 183 return err; 184 } 185 186 if (devops.datbuf) { 187 devops.len = ops->len - ops->retlen; 188 if (!devops.len) 189 return ret; 190 devops.datbuf += devops.retlen; 191 } 192 if (devops.oobbuf) { 193 devops.ooblen = ops->ooblen - ops->oobretlen; 194 if (!devops.ooblen) 195 return ret; 196 devops.oobbuf += ops->oobretlen; 197 } 198 199 from = 0; 200 } 201 return -EINVAL; 202 } 203 204 static int 205 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) 206 { 207 struct mtd_concat *concat = CONCAT(mtd); 208 struct mtd_oob_ops devops = *ops; 209 int i, err; 210 211 if (!(mtd->flags & MTD_WRITEABLE)) 212 return -EROFS; 213 214 ops->retlen = 0; 215 216 for (i = 0; i < concat->num_subdev; i++) { 217 struct mtd_info *subdev = concat->subdev[i]; 218 219 if (to >= subdev->size) { 220 to -= subdev->size; 221 continue; 222 } 223 224 /* partial write ? */ 225 if (to + devops.len > subdev->size) 226 devops.len = subdev->size - to; 227 228 err = subdev->write_oob(subdev, to, &devops); 229 ops->retlen += devops.retlen; 230 if (err) 231 return err; 232 233 if (devops.datbuf) { 234 devops.len = ops->len - ops->retlen; 235 if (!devops.len) 236 return 0; 237 devops.datbuf += devops.retlen; 238 } 239 if (devops.oobbuf) { 240 devops.ooblen = ops->ooblen - ops->oobretlen; 241 if (!devops.ooblen) 242 return 0; 243 devops.oobbuf += devops.oobretlen; 244 } 245 to = 0; 246 } 247 return -EINVAL; 248 } 249 250 static void concat_erase_callback(struct erase_info *instr) 251 { 252 /* Nothing to do here in U-Boot */ 253 } 254 255 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase) 256 { 257 int err; 258 wait_queue_head_t waitq; 259 DECLARE_WAITQUEUE(wait, current); 260 261 /* 262 * This code was stol^H^H^H^Hinspired by mtdchar.c 263 */ 264 init_waitqueue_head(&waitq); 265 266 erase->mtd = mtd; 267 erase->callback = concat_erase_callback; 268 erase->priv = (unsigned long) &waitq; 269 270 /* 271 * FIXME: Allow INTERRUPTIBLE. Which means 272 * not having the wait_queue head on the stack. 273 */ 274 err = mtd->erase(mtd, erase); 275 if (!err) { 276 set_current_state(TASK_UNINTERRUPTIBLE); 277 add_wait_queue(&waitq, &wait); 278 if (erase->state != MTD_ERASE_DONE 279 && erase->state != MTD_ERASE_FAILED) 280 schedule(); 281 remove_wait_queue(&waitq, &wait); 282 set_current_state(TASK_RUNNING); 283 284 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0; 285 } 286 return err; 287 } 288 289 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) 290 { 291 struct mtd_concat *concat = CONCAT(mtd); 292 struct mtd_info *subdev; 293 int i, err; 294 uint64_t length, offset = 0; 295 struct erase_info *erase; 296 297 if (!(mtd->flags & MTD_WRITEABLE)) 298 return -EROFS; 299 300 if (instr->addr > concat->mtd.size) 301 return -EINVAL; 302 303 if (instr->len + instr->addr > concat->mtd.size) 304 return -EINVAL; 305 306 /* 307 * Check for proper erase block alignment of the to-be-erased area. 308 * It is easier to do this based on the super device's erase 309 * region info rather than looking at each particular sub-device 310 * in turn. 311 */ 312 if (!concat->mtd.numeraseregions) { 313 /* the easy case: device has uniform erase block size */ 314 if (instr->addr & (concat->mtd.erasesize - 1)) 315 return -EINVAL; 316 if (instr->len & (concat->mtd.erasesize - 1)) 317 return -EINVAL; 318 } else { 319 /* device has variable erase size */ 320 struct mtd_erase_region_info *erase_regions = 321 concat->mtd.eraseregions; 322 323 /* 324 * Find the erase region where the to-be-erased area begins: 325 */ 326 for (i = 0; i < concat->mtd.numeraseregions && 327 instr->addr >= erase_regions[i].offset; i++) ; 328 --i; 329 330 /* 331 * Now erase_regions[i] is the region in which the 332 * to-be-erased area begins. Verify that the starting 333 * offset is aligned to this region's erase size: 334 */ 335 if (instr->addr & (erase_regions[i].erasesize - 1)) 336 return -EINVAL; 337 338 /* 339 * now find the erase region where the to-be-erased area ends: 340 */ 341 for (; i < concat->mtd.numeraseregions && 342 (instr->addr + instr->len) >= erase_regions[i].offset; 343 ++i) ; 344 --i; 345 /* 346 * check if the ending offset is aligned to this region's erase size 347 */ 348 if ((instr->addr + instr->len) & (erase_regions[i].erasesize - 349 1)) 350 return -EINVAL; 351 } 352 353 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 354 355 /* make a local copy of instr to avoid modifying the caller's struct */ 356 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL); 357 358 if (!erase) 359 return -ENOMEM; 360 361 *erase = *instr; 362 length = instr->len; 363 364 /* 365 * find the subdevice where the to-be-erased area begins, adjust 366 * starting offset to be relative to the subdevice start 367 */ 368 for (i = 0; i < concat->num_subdev; i++) { 369 subdev = concat->subdev[i]; 370 if (subdev->size <= erase->addr) { 371 erase->addr -= subdev->size; 372 offset += subdev->size; 373 } else { 374 break; 375 } 376 } 377 378 /* must never happen since size limit has been verified above */ 379 BUG_ON(i >= concat->num_subdev); 380 381 /* now do the erase: */ 382 err = 0; 383 for (; length > 0; i++) { 384 /* loop for all subdevices affected by this request */ 385 subdev = concat->subdev[i]; /* get current subdevice */ 386 387 /* limit length to subdevice's size: */ 388 if (erase->addr + length > subdev->size) 389 erase->len = subdev->size - erase->addr; 390 else 391 erase->len = length; 392 393 if (!(subdev->flags & MTD_WRITEABLE)) { 394 err = -EROFS; 395 break; 396 } 397 length -= erase->len; 398 if ((err = concat_dev_erase(subdev, erase))) { 399 /* sanity check: should never happen since 400 * block alignment has been checked above */ 401 BUG_ON(err == -EINVAL); 402 if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 403 instr->fail_addr = erase->fail_addr + offset; 404 break; 405 } 406 /* 407 * erase->addr specifies the offset of the area to be 408 * erased *within the current subdevice*. It can be 409 * non-zero only the first time through this loop, i.e. 410 * for the first subdevice where blocks need to be erased. 411 * All the following erases must begin at the start of the 412 * current subdevice, i.e. at offset zero. 413 */ 414 erase->addr = 0; 415 offset += subdev->size; 416 } 417 instr->state = erase->state; 418 kfree(erase); 419 if (err) 420 return err; 421 422 if (instr->callback) 423 instr->callback(instr); 424 return 0; 425 } 426 427 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 428 { 429 struct mtd_concat *concat = CONCAT(mtd); 430 int i, err = -EINVAL; 431 432 if ((len + ofs) > mtd->size) 433 return -EINVAL; 434 435 for (i = 0; i < concat->num_subdev; i++) { 436 struct mtd_info *subdev = concat->subdev[i]; 437 uint64_t size; 438 439 if (ofs >= subdev->size) { 440 size = 0; 441 ofs -= subdev->size; 442 continue; 443 } 444 if (ofs + len > subdev->size) 445 size = subdev->size - ofs; 446 else 447 size = len; 448 449 err = subdev->lock(subdev, ofs, size); 450 451 if (err) 452 break; 453 454 len -= size; 455 if (len == 0) 456 break; 457 458 err = -EINVAL; 459 ofs = 0; 460 } 461 462 return err; 463 } 464 465 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 466 { 467 struct mtd_concat *concat = CONCAT(mtd); 468 int i, err = 0; 469 470 if ((len + ofs) > mtd->size) 471 return -EINVAL; 472 473 for (i = 0; i < concat->num_subdev; i++) { 474 struct mtd_info *subdev = concat->subdev[i]; 475 uint64_t size; 476 477 if (ofs >= subdev->size) { 478 size = 0; 479 ofs -= subdev->size; 480 continue; 481 } 482 if (ofs + len > subdev->size) 483 size = subdev->size - ofs; 484 else 485 size = len; 486 487 err = subdev->unlock(subdev, ofs, size); 488 489 if (err) 490 break; 491 492 len -= size; 493 if (len == 0) 494 break; 495 496 err = -EINVAL; 497 ofs = 0; 498 } 499 500 return err; 501 } 502 503 static void concat_sync(struct mtd_info *mtd) 504 { 505 struct mtd_concat *concat = CONCAT(mtd); 506 int i; 507 508 for (i = 0; i < concat->num_subdev; i++) { 509 struct mtd_info *subdev = concat->subdev[i]; 510 subdev->sync(subdev); 511 } 512 } 513 514 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs) 515 { 516 struct mtd_concat *concat = CONCAT(mtd); 517 int i, res = 0; 518 519 if (!concat->subdev[0]->block_isbad) 520 return res; 521 522 if (ofs > mtd->size) 523 return -EINVAL; 524 525 for (i = 0; i < concat->num_subdev; i++) { 526 struct mtd_info *subdev = concat->subdev[i]; 527 528 if (ofs >= subdev->size) { 529 ofs -= subdev->size; 530 continue; 531 } 532 533 res = subdev->block_isbad(subdev, ofs); 534 break; 535 } 536 537 return res; 538 } 539 540 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs) 541 { 542 struct mtd_concat *concat = CONCAT(mtd); 543 int i, err = -EINVAL; 544 545 if (!concat->subdev[0]->block_markbad) 546 return 0; 547 548 if (ofs > mtd->size) 549 return -EINVAL; 550 551 for (i = 0; i < concat->num_subdev; i++) { 552 struct mtd_info *subdev = concat->subdev[i]; 553 554 if (ofs >= subdev->size) { 555 ofs -= subdev->size; 556 continue; 557 } 558 559 err = subdev->block_markbad(subdev, ofs); 560 if (!err) 561 mtd->ecc_stats.badblocks++; 562 break; 563 } 564 565 return err; 566 } 567 568 /* 569 * This function constructs a virtual MTD device by concatenating 570 * num_devs MTD devices. A pointer to the new device object is 571 * stored to *new_dev upon success. This function does _not_ 572 * register any devices: this is the caller's responsibility. 573 */ 574 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */ 575 int num_devs, /* number of subdevices */ 576 const char *name) 577 { /* name for the new device */ 578 int i; 579 size_t size; 580 struct mtd_concat *concat; 581 uint32_t max_erasesize, curr_erasesize; 582 int num_erase_region; 583 584 debug("Concatenating MTD devices:\n"); 585 for (i = 0; i < num_devs; i++) 586 debug("(%d): \"%s\"\n", i, subdev[i]->name); 587 debug("into device \"%s\"\n", name); 588 589 /* allocate the device structure */ 590 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs); 591 concat = kzalloc(size, GFP_KERNEL); 592 if (!concat) { 593 printk 594 ("memory allocation error while creating concatenated device \"%s\"\n", 595 name); 596 return NULL; 597 } 598 concat->subdev = (struct mtd_info **) (concat + 1); 599 600 /* 601 * Set up the new "super" device's MTD object structure, check for 602 * incompatibilites between the subdevices. 603 */ 604 concat->mtd.type = subdev[0]->type; 605 concat->mtd.flags = subdev[0]->flags; 606 concat->mtd.size = subdev[0]->size; 607 concat->mtd.erasesize = subdev[0]->erasesize; 608 concat->mtd.writesize = subdev[0]->writesize; 609 concat->mtd.subpage_sft = subdev[0]->subpage_sft; 610 concat->mtd.oobsize = subdev[0]->oobsize; 611 concat->mtd.oobavail = subdev[0]->oobavail; 612 if (subdev[0]->read_oob) 613 concat->mtd.read_oob = concat_read_oob; 614 if (subdev[0]->write_oob) 615 concat->mtd.write_oob = concat_write_oob; 616 if (subdev[0]->block_isbad) 617 concat->mtd.block_isbad = concat_block_isbad; 618 if (subdev[0]->block_markbad) 619 concat->mtd.block_markbad = concat_block_markbad; 620 621 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks; 622 623 concat->subdev[0] = subdev[0]; 624 625 for (i = 1; i < num_devs; i++) { 626 if (concat->mtd.type != subdev[i]->type) { 627 kfree(concat); 628 printk("Incompatible device type on \"%s\"\n", 629 subdev[i]->name); 630 return NULL; 631 } 632 if (concat->mtd.flags != subdev[i]->flags) { 633 /* 634 * Expect all flags except MTD_WRITEABLE to be 635 * equal on all subdevices. 636 */ 637 if ((concat->mtd.flags ^ subdev[i]-> 638 flags) & ~MTD_WRITEABLE) { 639 kfree(concat); 640 printk("Incompatible device flags on \"%s\"\n", 641 subdev[i]->name); 642 return NULL; 643 } else 644 /* if writeable attribute differs, 645 make super device writeable */ 646 concat->mtd.flags |= 647 subdev[i]->flags & MTD_WRITEABLE; 648 } 649 650 concat->mtd.size += subdev[i]->size; 651 concat->mtd.ecc_stats.badblocks += 652 subdev[i]->ecc_stats.badblocks; 653 if (concat->mtd.writesize != subdev[i]->writesize || 654 concat->mtd.subpage_sft != subdev[i]->subpage_sft || 655 concat->mtd.oobsize != subdev[i]->oobsize || 656 !concat->mtd.read_oob != !subdev[i]->read_oob || 657 !concat->mtd.write_oob != !subdev[i]->write_oob) { 658 kfree(concat); 659 printk("Incompatible OOB or ECC data on \"%s\"\n", 660 subdev[i]->name); 661 return NULL; 662 } 663 concat->subdev[i] = subdev[i]; 664 665 } 666 667 concat->mtd.ecclayout = subdev[0]->ecclayout; 668 669 concat->num_subdev = num_devs; 670 concat->mtd.name = name; 671 672 concat->mtd.erase = concat_erase; 673 concat->mtd.read = concat_read; 674 concat->mtd.write = concat_write; 675 concat->mtd.sync = concat_sync; 676 concat->mtd.lock = concat_lock; 677 concat->mtd.unlock = concat_unlock; 678 679 /* 680 * Combine the erase block size info of the subdevices: 681 * 682 * first, walk the map of the new device and see how 683 * many changes in erase size we have 684 */ 685 max_erasesize = curr_erasesize = subdev[0]->erasesize; 686 num_erase_region = 1; 687 for (i = 0; i < num_devs; i++) { 688 if (subdev[i]->numeraseregions == 0) { 689 /* current subdevice has uniform erase size */ 690 if (subdev[i]->erasesize != curr_erasesize) { 691 /* if it differs from the last subdevice's erase size, count it */ 692 ++num_erase_region; 693 curr_erasesize = subdev[i]->erasesize; 694 if (curr_erasesize > max_erasesize) 695 max_erasesize = curr_erasesize; 696 } 697 } else { 698 /* current subdevice has variable erase size */ 699 int j; 700 for (j = 0; j < subdev[i]->numeraseregions; j++) { 701 702 /* walk the list of erase regions, count any changes */ 703 if (subdev[i]->eraseregions[j].erasesize != 704 curr_erasesize) { 705 ++num_erase_region; 706 curr_erasesize = 707 subdev[i]->eraseregions[j]. 708 erasesize; 709 if (curr_erasesize > max_erasesize) 710 max_erasesize = curr_erasesize; 711 } 712 } 713 } 714 } 715 716 if (num_erase_region == 1) { 717 /* 718 * All subdevices have the same uniform erase size. 719 * This is easy: 720 */ 721 concat->mtd.erasesize = curr_erasesize; 722 concat->mtd.numeraseregions = 0; 723 } else { 724 uint64_t tmp64; 725 726 /* 727 * erase block size varies across the subdevices: allocate 728 * space to store the data describing the variable erase regions 729 */ 730 struct mtd_erase_region_info *erase_region_p; 731 uint64_t begin, position; 732 733 concat->mtd.erasesize = max_erasesize; 734 concat->mtd.numeraseregions = num_erase_region; 735 concat->mtd.eraseregions = erase_region_p = 736 kmalloc(num_erase_region * 737 sizeof (struct mtd_erase_region_info), GFP_KERNEL); 738 if (!erase_region_p) { 739 kfree(concat); 740 printk 741 ("memory allocation error while creating erase region list" 742 " for device \"%s\"\n", name); 743 return NULL; 744 } 745 746 /* 747 * walk the map of the new device once more and fill in 748 * in erase region info: 749 */ 750 curr_erasesize = subdev[0]->erasesize; 751 begin = position = 0; 752 for (i = 0; i < num_devs; i++) { 753 if (subdev[i]->numeraseregions == 0) { 754 /* current subdevice has uniform erase size */ 755 if (subdev[i]->erasesize != curr_erasesize) { 756 /* 757 * fill in an mtd_erase_region_info structure for the area 758 * we have walked so far: 759 */ 760 erase_region_p->offset = begin; 761 erase_region_p->erasesize = 762 curr_erasesize; 763 tmp64 = position - begin; 764 do_div(tmp64, curr_erasesize); 765 erase_region_p->numblocks = tmp64; 766 begin = position; 767 768 curr_erasesize = subdev[i]->erasesize; 769 ++erase_region_p; 770 } 771 position += subdev[i]->size; 772 } else { 773 /* current subdevice has variable erase size */ 774 int j; 775 for (j = 0; j < subdev[i]->numeraseregions; j++) { 776 /* walk the list of erase regions, count any changes */ 777 if (subdev[i]->eraseregions[j]. 778 erasesize != curr_erasesize) { 779 erase_region_p->offset = begin; 780 erase_region_p->erasesize = 781 curr_erasesize; 782 tmp64 = position - begin; 783 do_div(tmp64, curr_erasesize); 784 erase_region_p->numblocks = tmp64; 785 begin = position; 786 787 curr_erasesize = 788 subdev[i]->eraseregions[j]. 789 erasesize; 790 ++erase_region_p; 791 } 792 position += 793 subdev[i]->eraseregions[j]. 794 numblocks * (uint64_t)curr_erasesize; 795 } 796 } 797 } 798 /* Now write the final entry */ 799 erase_region_p->offset = begin; 800 erase_region_p->erasesize = curr_erasesize; 801 tmp64 = position - begin; 802 do_div(tmp64, curr_erasesize); 803 erase_region_p->numblocks = tmp64; 804 } 805 806 return &concat->mtd; 807 } 808