1 /* 2 * rfd_ftl.c -- resident flash disk (flash translation layer) 3 * 4 * Copyright © 2005 Sean Young <sean@mess.org> 5 * 6 * This type of flash translation layer (FTL) is used by the Embedded BIOS 7 * by General Software. It is known as the Resident Flash Disk (RFD), see: 8 * 9 * http://www.gensw.com/pages/prod/bios/rfd.htm 10 * 11 * based on ftl.c 12 */ 13 14 #include <linux/hdreg.h> 15 #include <linux/init.h> 16 #include <linux/mtd/blktrans.h> 17 #include <linux/mtd/mtd.h> 18 #include <linux/vmalloc.h> 19 #include <linux/slab.h> 20 #include <linux/jiffies.h> 21 #include <linux/module.h> 22 23 #include <asm/types.h> 24 25 static int block_size = 0; 26 module_param(block_size, int, 0); 27 MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size"); 28 29 #define PREFIX "rfd_ftl: " 30 31 /* This major has been assigned by device@lanana.org */ 32 #ifndef RFD_FTL_MAJOR 33 #define RFD_FTL_MAJOR 256 34 #endif 35 36 /* Maximum number of partitions in an FTL region */ 37 #define PART_BITS 4 38 39 /* An erase unit should start with this value */ 40 #define RFD_MAGIC 0x9193 41 42 /* the second value is 0xffff or 0xffc8; function unknown */ 43 44 /* the third value is always 0xffff, ignored */ 45 46 /* next is an array of mapping for each corresponding sector */ 47 #define HEADER_MAP_OFFSET 3 48 #define SECTOR_DELETED 0x0000 49 #define SECTOR_ZERO 0xfffe 50 #define SECTOR_FREE 0xffff 51 52 #define SECTOR_SIZE 512 53 54 #define SECTORS_PER_TRACK 63 55 56 struct block { 57 enum { 58 BLOCK_OK, 59 BLOCK_ERASING, 60 BLOCK_ERASED, 61 BLOCK_UNUSED, 62 BLOCK_FAILED 63 } state; 64 int free_sectors; 65 int used_sectors; 66 int erases; 67 u_long offset; 68 }; 69 70 struct partition { 71 struct mtd_blktrans_dev mbd; 72 73 u_int block_size; /* size of erase unit */ 74 u_int total_blocks; /* number of erase units */ 75 u_int header_sectors_per_block; /* header sectors in erase unit */ 76 u_int data_sectors_per_block; /* data sectors in erase unit */ 77 u_int sector_count; /* sectors in translated disk */ 78 u_int header_size; /* bytes in header sector */ 79 int reserved_block; /* block next up for reclaim */ 80 int current_block; /* block to write to */ 81 u16 *header_cache; /* cached header */ 82 83 int is_reclaiming; 84 int cylinders; 85 int errors; 86 u_long *sector_map; 87 struct block *blocks; 88 }; 89 90 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf); 91 92 static int build_block_map(struct partition *part, int block_no) 93 { 94 struct block *block = &part->blocks[block_no]; 95 int i; 96 97 block->offset = part->block_size * block_no; 98 99 if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) { 100 block->state = BLOCK_UNUSED; 101 return -ENOENT; 102 } 103 104 block->state = BLOCK_OK; 105 106 for (i=0; i<part->data_sectors_per_block; i++) { 107 u16 entry; 108 109 entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]); 110 111 if (entry == SECTOR_DELETED) 112 continue; 113 114 if (entry == SECTOR_FREE) { 115 block->free_sectors++; 116 continue; 117 } 118 119 if (entry == SECTOR_ZERO) 120 entry = 0; 121 122 if (entry >= part->sector_count) { 123 printk(KERN_WARNING PREFIX 124 "'%s': unit #%d: entry %d corrupt, " 125 "sector %d out of range\n", 126 part->mbd.mtd->name, block_no, i, entry); 127 continue; 128 } 129 130 if (part->sector_map[entry] != -1) { 131 printk(KERN_WARNING PREFIX 132 "'%s': more than one entry for sector %d\n", 133 part->mbd.mtd->name, entry); 134 part->errors = 1; 135 continue; 136 } 137 138 part->sector_map[entry] = block->offset + 139 (i + part->header_sectors_per_block) * SECTOR_SIZE; 140 141 block->used_sectors++; 142 } 143 144 if (block->free_sectors == part->data_sectors_per_block) 145 part->reserved_block = block_no; 146 147 return 0; 148 } 149 150 static int scan_header(struct partition *part) 151 { 152 int sectors_per_block; 153 int i, rc = -ENOMEM; 154 int blocks_found; 155 size_t retlen; 156 157 sectors_per_block = part->block_size / SECTOR_SIZE; 158 part->total_blocks = (u32)part->mbd.mtd->size / part->block_size; 159 160 if (part->total_blocks < 2) 161 return -ENOENT; 162 163 /* each erase block has three bytes header, followed by the map */ 164 part->header_sectors_per_block = 165 ((HEADER_MAP_OFFSET + sectors_per_block) * 166 sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE; 167 168 part->data_sectors_per_block = sectors_per_block - 169 part->header_sectors_per_block; 170 171 part->header_size = (HEADER_MAP_OFFSET + 172 part->data_sectors_per_block) * sizeof(u16); 173 174 part->cylinders = (part->data_sectors_per_block * 175 (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK; 176 177 part->sector_count = part->cylinders * SECTORS_PER_TRACK; 178 179 part->current_block = -1; 180 part->reserved_block = -1; 181 part->is_reclaiming = 0; 182 183 part->header_cache = kmalloc(part->header_size, GFP_KERNEL); 184 if (!part->header_cache) 185 goto err; 186 187 part->blocks = kcalloc(part->total_blocks, sizeof(struct block), 188 GFP_KERNEL); 189 if (!part->blocks) 190 goto err; 191 192 part->sector_map = vmalloc(part->sector_count * sizeof(u_long)); 193 if (!part->sector_map) { 194 printk(KERN_ERR PREFIX "'%s': unable to allocate memory for " 195 "sector map", part->mbd.mtd->name); 196 goto err; 197 } 198 199 for (i=0; i<part->sector_count; i++) 200 part->sector_map[i] = -1; 201 202 for (i=0, blocks_found=0; i<part->total_blocks; i++) { 203 rc = mtd_read(part->mbd.mtd, i * part->block_size, 204 part->header_size, &retlen, 205 (u_char *)part->header_cache); 206 207 if (!rc && retlen != part->header_size) 208 rc = -EIO; 209 210 if (rc) 211 goto err; 212 213 if (!build_block_map(part, i)) 214 blocks_found++; 215 } 216 217 if (blocks_found == 0) { 218 printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n", 219 part->mbd.mtd->name); 220 rc = -ENOENT; 221 goto err; 222 } 223 224 if (part->reserved_block == -1) { 225 printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n", 226 part->mbd.mtd->name); 227 228 part->errors = 1; 229 } 230 231 return 0; 232 233 err: 234 vfree(part->sector_map); 235 kfree(part->header_cache); 236 kfree(part->blocks); 237 238 return rc; 239 } 240 241 static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) 242 { 243 struct partition *part = (struct partition*)dev; 244 u_long addr; 245 size_t retlen; 246 int rc; 247 248 if (sector >= part->sector_count) 249 return -EIO; 250 251 addr = part->sector_map[sector]; 252 if (addr != -1) { 253 rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, 254 (u_char *)buf); 255 if (!rc && retlen != SECTOR_SIZE) 256 rc = -EIO; 257 258 if (rc) { 259 printk(KERN_WARNING PREFIX "error reading '%s' at " 260 "0x%lx\n", part->mbd.mtd->name, addr); 261 return rc; 262 } 263 } else 264 memset(buf, 0, SECTOR_SIZE); 265 266 return 0; 267 } 268 269 static void erase_callback(struct erase_info *erase) 270 { 271 struct partition *part; 272 u16 magic; 273 int i, rc; 274 size_t retlen; 275 276 part = (struct partition*)erase->priv; 277 278 i = (u32)erase->addr / part->block_size; 279 if (i >= part->total_blocks || part->blocks[i].offset != erase->addr || 280 erase->addr > UINT_MAX) { 281 printk(KERN_ERR PREFIX "erase callback for unknown offset %llx " 282 "on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name); 283 return; 284 } 285 286 if (erase->state != MTD_ERASE_DONE) { 287 printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', " 288 "state %d\n", (unsigned long long)erase->addr, 289 part->mbd.mtd->name, erase->state); 290 291 part->blocks[i].state = BLOCK_FAILED; 292 part->blocks[i].free_sectors = 0; 293 part->blocks[i].used_sectors = 0; 294 295 kfree(erase); 296 297 return; 298 } 299 300 magic = cpu_to_le16(RFD_MAGIC); 301 302 part->blocks[i].state = BLOCK_ERASED; 303 part->blocks[i].free_sectors = part->data_sectors_per_block; 304 part->blocks[i].used_sectors = 0; 305 part->blocks[i].erases++; 306 307 rc = mtd_write(part->mbd.mtd, part->blocks[i].offset, sizeof(magic), 308 &retlen, (u_char *)&magic); 309 310 if (!rc && retlen != sizeof(magic)) 311 rc = -EIO; 312 313 if (rc) { 314 printk(KERN_ERR PREFIX "'%s': unable to write RFD " 315 "header at 0x%lx\n", 316 part->mbd.mtd->name, 317 part->blocks[i].offset); 318 part->blocks[i].state = BLOCK_FAILED; 319 } 320 else 321 part->blocks[i].state = BLOCK_OK; 322 323 kfree(erase); 324 } 325 326 static int erase_block(struct partition *part, int block) 327 { 328 struct erase_info *erase; 329 int rc = -ENOMEM; 330 331 erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL); 332 if (!erase) 333 goto err; 334 335 erase->mtd = part->mbd.mtd; 336 erase->callback = erase_callback; 337 erase->addr = part->blocks[block].offset; 338 erase->len = part->block_size; 339 erase->priv = (u_long)part; 340 341 part->blocks[block].state = BLOCK_ERASING; 342 part->blocks[block].free_sectors = 0; 343 344 rc = mtd_erase(part->mbd.mtd, erase); 345 346 if (rc) { 347 printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' " 348 "failed\n", (unsigned long long)erase->addr, 349 (unsigned long long)erase->len, part->mbd.mtd->name); 350 kfree(erase); 351 } 352 353 err: 354 return rc; 355 } 356 357 static int move_block_contents(struct partition *part, int block_no, u_long *old_sector) 358 { 359 void *sector_data; 360 u16 *map; 361 size_t retlen; 362 int i, rc = -ENOMEM; 363 364 part->is_reclaiming = 1; 365 366 sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL); 367 if (!sector_data) 368 goto err3; 369 370 map = kmalloc(part->header_size, GFP_KERNEL); 371 if (!map) 372 goto err2; 373 374 rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset, 375 part->header_size, &retlen, (u_char *)map); 376 377 if (!rc && retlen != part->header_size) 378 rc = -EIO; 379 380 if (rc) { 381 printk(KERN_ERR PREFIX "error reading '%s' at " 382 "0x%lx\n", part->mbd.mtd->name, 383 part->blocks[block_no].offset); 384 385 goto err; 386 } 387 388 for (i=0; i<part->data_sectors_per_block; i++) { 389 u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]); 390 u_long addr; 391 392 393 if (entry == SECTOR_FREE || entry == SECTOR_DELETED) 394 continue; 395 396 if (entry == SECTOR_ZERO) 397 entry = 0; 398 399 /* already warned about and ignored in build_block_map() */ 400 if (entry >= part->sector_count) 401 continue; 402 403 addr = part->blocks[block_no].offset + 404 (i + part->header_sectors_per_block) * SECTOR_SIZE; 405 406 if (*old_sector == addr) { 407 *old_sector = -1; 408 if (!part->blocks[block_no].used_sectors--) { 409 rc = erase_block(part, block_no); 410 break; 411 } 412 continue; 413 } 414 rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, 415 sector_data); 416 417 if (!rc && retlen != SECTOR_SIZE) 418 rc = -EIO; 419 420 if (rc) { 421 printk(KERN_ERR PREFIX "'%s': Unable to " 422 "read sector for relocation\n", 423 part->mbd.mtd->name); 424 425 goto err; 426 } 427 428 rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part, 429 entry, sector_data); 430 431 if (rc) 432 goto err; 433 } 434 435 err: 436 kfree(map); 437 err2: 438 kfree(sector_data); 439 err3: 440 part->is_reclaiming = 0; 441 442 return rc; 443 } 444 445 static int reclaim_block(struct partition *part, u_long *old_sector) 446 { 447 int block, best_block, score, old_sector_block; 448 int rc; 449 450 /* we have a race if sync doesn't exist */ 451 mtd_sync(part->mbd.mtd); 452 453 score = 0x7fffffff; /* MAX_INT */ 454 best_block = -1; 455 if (*old_sector != -1) 456 old_sector_block = *old_sector / part->block_size; 457 else 458 old_sector_block = -1; 459 460 for (block=0; block<part->total_blocks; block++) { 461 int this_score; 462 463 if (block == part->reserved_block) 464 continue; 465 466 /* 467 * Postpone reclaiming if there is a free sector as 468 * more removed sectors is more efficient (have to move 469 * less). 470 */ 471 if (part->blocks[block].free_sectors) 472 return 0; 473 474 this_score = part->blocks[block].used_sectors; 475 476 if (block == old_sector_block) 477 this_score--; 478 else { 479 /* no point in moving a full block */ 480 if (part->blocks[block].used_sectors == 481 part->data_sectors_per_block) 482 continue; 483 } 484 485 this_score += part->blocks[block].erases; 486 487 if (this_score < score) { 488 best_block = block; 489 score = this_score; 490 } 491 } 492 493 if (best_block == -1) 494 return -ENOSPC; 495 496 part->current_block = -1; 497 part->reserved_block = best_block; 498 499 pr_debug("reclaim_block: reclaiming block #%d with %d used " 500 "%d free sectors\n", best_block, 501 part->blocks[best_block].used_sectors, 502 part->blocks[best_block].free_sectors); 503 504 if (part->blocks[best_block].used_sectors) 505 rc = move_block_contents(part, best_block, old_sector); 506 else 507 rc = erase_block(part, best_block); 508 509 return rc; 510 } 511 512 /* 513 * IMPROVE: It would be best to choose the block with the most deleted sectors, 514 * because if we fill that one up first it'll have the most chance of having 515 * the least live sectors at reclaim. 516 */ 517 static int find_free_block(struct partition *part) 518 { 519 int block, stop; 520 521 block = part->current_block == -1 ? 522 jiffies % part->total_blocks : part->current_block; 523 stop = block; 524 525 do { 526 if (part->blocks[block].free_sectors && 527 block != part->reserved_block) 528 return block; 529 530 if (part->blocks[block].state == BLOCK_UNUSED) 531 erase_block(part, block); 532 533 if (++block >= part->total_blocks) 534 block = 0; 535 536 } while (block != stop); 537 538 return -1; 539 } 540 541 static int find_writable_block(struct partition *part, u_long *old_sector) 542 { 543 int rc, block; 544 size_t retlen; 545 546 block = find_free_block(part); 547 548 if (block == -1) { 549 if (!part->is_reclaiming) { 550 rc = reclaim_block(part, old_sector); 551 if (rc) 552 goto err; 553 554 block = find_free_block(part); 555 } 556 557 if (block == -1) { 558 rc = -ENOSPC; 559 goto err; 560 } 561 } 562 563 rc = mtd_read(part->mbd.mtd, part->blocks[block].offset, 564 part->header_size, &retlen, 565 (u_char *)part->header_cache); 566 567 if (!rc && retlen != part->header_size) 568 rc = -EIO; 569 570 if (rc) { 571 printk(KERN_ERR PREFIX "'%s': unable to read header at " 572 "0x%lx\n", part->mbd.mtd->name, 573 part->blocks[block].offset); 574 goto err; 575 } 576 577 part->current_block = block; 578 579 err: 580 return rc; 581 } 582 583 static int mark_sector_deleted(struct partition *part, u_long old_addr) 584 { 585 int block, offset, rc; 586 u_long addr; 587 size_t retlen; 588 u16 del = cpu_to_le16(SECTOR_DELETED); 589 590 block = old_addr / part->block_size; 591 offset = (old_addr % part->block_size) / SECTOR_SIZE - 592 part->header_sectors_per_block; 593 594 addr = part->blocks[block].offset + 595 (HEADER_MAP_OFFSET + offset) * sizeof(u16); 596 rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen, 597 (u_char *)&del); 598 599 if (!rc && retlen != sizeof(del)) 600 rc = -EIO; 601 602 if (rc) { 603 printk(KERN_ERR PREFIX "error writing '%s' at " 604 "0x%lx\n", part->mbd.mtd->name, addr); 605 if (rc) 606 goto err; 607 } 608 if (block == part->current_block) 609 part->header_cache[offset + HEADER_MAP_OFFSET] = del; 610 611 part->blocks[block].used_sectors--; 612 613 if (!part->blocks[block].used_sectors && 614 !part->blocks[block].free_sectors) 615 rc = erase_block(part, block); 616 617 err: 618 return rc; 619 } 620 621 static int find_free_sector(const struct partition *part, const struct block *block) 622 { 623 int i, stop; 624 625 i = stop = part->data_sectors_per_block - block->free_sectors; 626 627 do { 628 if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]) 629 == SECTOR_FREE) 630 return i; 631 632 if (++i == part->data_sectors_per_block) 633 i = 0; 634 } 635 while(i != stop); 636 637 return -1; 638 } 639 640 static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr) 641 { 642 struct partition *part = (struct partition*)dev; 643 struct block *block; 644 u_long addr; 645 int i; 646 int rc; 647 size_t retlen; 648 u16 entry; 649 650 if (part->current_block == -1 || 651 !part->blocks[part->current_block].free_sectors) { 652 653 rc = find_writable_block(part, old_addr); 654 if (rc) 655 goto err; 656 } 657 658 block = &part->blocks[part->current_block]; 659 660 i = find_free_sector(part, block); 661 662 if (i < 0) { 663 rc = -ENOSPC; 664 goto err; 665 } 666 667 addr = (i + part->header_sectors_per_block) * SECTOR_SIZE + 668 block->offset; 669 rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, 670 (u_char *)buf); 671 672 if (!rc && retlen != SECTOR_SIZE) 673 rc = -EIO; 674 675 if (rc) { 676 printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n", 677 part->mbd.mtd->name, addr); 678 if (rc) 679 goto err; 680 } 681 682 part->sector_map[sector] = addr; 683 684 entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector); 685 686 part->header_cache[i + HEADER_MAP_OFFSET] = entry; 687 688 addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16); 689 rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen, 690 (u_char *)&entry); 691 692 if (!rc && retlen != sizeof(entry)) 693 rc = -EIO; 694 695 if (rc) { 696 printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n", 697 part->mbd.mtd->name, addr); 698 if (rc) 699 goto err; 700 } 701 block->used_sectors++; 702 block->free_sectors--; 703 704 err: 705 return rc; 706 } 707 708 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) 709 { 710 struct partition *part = (struct partition*)dev; 711 u_long old_addr; 712 int i; 713 int rc = 0; 714 715 pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector); 716 717 if (part->reserved_block == -1) { 718 rc = -EACCES; 719 goto err; 720 } 721 722 if (sector >= part->sector_count) { 723 rc = -EIO; 724 goto err; 725 } 726 727 old_addr = part->sector_map[sector]; 728 729 for (i=0; i<SECTOR_SIZE; i++) { 730 if (!buf[i]) 731 continue; 732 733 rc = do_writesect(dev, sector, buf, &old_addr); 734 if (rc) 735 goto err; 736 break; 737 } 738 739 if (i == SECTOR_SIZE) 740 part->sector_map[sector] = -1; 741 742 if (old_addr != -1) 743 rc = mark_sector_deleted(part, old_addr); 744 745 err: 746 return rc; 747 } 748 749 static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo) 750 { 751 struct partition *part = (struct partition*)dev; 752 753 geo->heads = 1; 754 geo->sectors = SECTORS_PER_TRACK; 755 geo->cylinders = part->cylinders; 756 757 return 0; 758 } 759 760 static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) 761 { 762 struct partition *part; 763 764 if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX) 765 return; 766 767 part = kzalloc(sizeof(struct partition), GFP_KERNEL); 768 if (!part) 769 return; 770 771 part->mbd.mtd = mtd; 772 773 if (block_size) 774 part->block_size = block_size; 775 else { 776 if (!mtd->erasesize) { 777 printk(KERN_WARNING PREFIX "please provide block_size"); 778 goto out; 779 } else 780 part->block_size = mtd->erasesize; 781 } 782 783 if (scan_header(part) == 0) { 784 part->mbd.size = part->sector_count; 785 part->mbd.tr = tr; 786 part->mbd.devnum = -1; 787 if (!(mtd->flags & MTD_WRITEABLE)) 788 part->mbd.readonly = 1; 789 else if (part->errors) { 790 printk(KERN_WARNING PREFIX "'%s': errors found, " 791 "setting read-only\n", mtd->name); 792 part->mbd.readonly = 1; 793 } 794 795 printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n", 796 mtd->name, mtd->type, mtd->flags); 797 798 if (!add_mtd_blktrans_dev((void*)part)) 799 return; 800 } 801 out: 802 kfree(part); 803 } 804 805 static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev) 806 { 807 struct partition *part = (struct partition*)dev; 808 int i; 809 810 for (i=0; i<part->total_blocks; i++) { 811 pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n", 812 part->mbd.mtd->name, i, part->blocks[i].erases); 813 } 814 815 del_mtd_blktrans_dev(dev); 816 vfree(part->sector_map); 817 kfree(part->header_cache); 818 kfree(part->blocks); 819 } 820 821 static struct mtd_blktrans_ops rfd_ftl_tr = { 822 .name = "rfd", 823 .major = RFD_FTL_MAJOR, 824 .part_bits = PART_BITS, 825 .blksize = SECTOR_SIZE, 826 827 .readsect = rfd_ftl_readsect, 828 .writesect = rfd_ftl_writesect, 829 .getgeo = rfd_ftl_getgeo, 830 .add_mtd = rfd_ftl_add_mtd, 831 .remove_dev = rfd_ftl_remove_dev, 832 .owner = THIS_MODULE, 833 }; 834 835 static int __init init_rfd_ftl(void) 836 { 837 return register_mtd_blktrans(&rfd_ftl_tr); 838 } 839 840 static void __exit cleanup_rfd_ftl(void) 841 { 842 deregister_mtd_blktrans(&rfd_ftl_tr); 843 } 844 845 module_init(init_rfd_ftl); 846 module_exit(cleanup_rfd_ftl); 847 848 MODULE_LICENSE("GPL"); 849 MODULE_AUTHOR("Sean Young <sean@mess.org>"); 850 MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, " 851 "used by General Software's Embedded BIOS"); 852 853