1 /* 2 * Copyright (c) International Business Machines Corp., 2006 3 * Copyright (c) Nokia Corporation, 2007 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * 19 * Author: Artem Bityutskiy (Битюцкий Артём), 20 * Frank Haverkamp 21 */ 22 23 /* 24 * This file includes UBI initialization and building of UBI devices. 25 * 26 * When UBI is initialized, it attaches all the MTD devices specified as the 27 * module load parameters or the kernel boot parameters. If MTD devices were 28 * specified, UBI does not attach any MTD device, but it is possible to do 29 * later using the "UBI control device". 30 * 31 * At the moment we only attach UBI devices by scanning, which will become a 32 * bottleneck when flashes reach certain large size. Then one may improve UBI 33 * and add other methods, although it does not seem to be easy to do. 34 */ 35 36 #include <linux/err.h> 37 #include <linux/module.h> 38 #include <linux/moduleparam.h> 39 #include <linux/stringify.h> 40 #include <linux/namei.h> 41 #include <linux/stat.h> 42 #include <linux/miscdevice.h> 43 #include <linux/log2.h> 44 #include <linux/kthread.h> 45 #include <linux/kernel.h> 46 #include <linux/slab.h> 47 #include "ubi.h" 48 49 /* Maximum length of the 'mtd=' parameter */ 50 #define MTD_PARAM_LEN_MAX 64 51 52 #ifdef CONFIG_MTD_UBI_MODULE 53 #define ubi_is_module() 1 54 #else 55 #define ubi_is_module() 0 56 #endif 57 58 /** 59 * struct mtd_dev_param - MTD device parameter description data structure. 60 * @name: MTD character device node path, MTD device name, or MTD device number 61 * string 62 * @vid_hdr_offs: VID header offset 63 */ 64 struct mtd_dev_param { 65 char name[MTD_PARAM_LEN_MAX]; 66 int vid_hdr_offs; 67 }; 68 69 /* Numbers of elements set in the @mtd_dev_param array */ 70 static int __initdata mtd_devs; 71 72 /* MTD devices specification parameters */ 73 static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES]; 74 75 /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ 76 struct class *ubi_class; 77 78 /* Slab cache for wear-leveling entries */ 79 struct kmem_cache *ubi_wl_entry_slab; 80 81 /* UBI control character device */ 82 static struct miscdevice ubi_ctrl_cdev = { 83 .minor = MISC_DYNAMIC_MINOR, 84 .name = "ubi_ctrl", 85 .fops = &ubi_ctrl_cdev_operations, 86 }; 87 88 /* All UBI devices in system */ 89 static struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; 90 91 /* Serializes UBI devices creations and removals */ 92 DEFINE_MUTEX(ubi_devices_mutex); 93 94 /* Protects @ubi_devices and @ubi->ref_count */ 95 static DEFINE_SPINLOCK(ubi_devices_lock); 96 97 /* "Show" method for files in '/<sysfs>/class/ubi/' */ 98 static ssize_t ubi_version_show(struct class *class, 99 struct class_attribute *attr, char *buf) 100 { 101 return sprintf(buf, "%d\n", UBI_VERSION); 102 } 103 104 /* UBI version attribute ('/<sysfs>/class/ubi/version') */ 105 static struct class_attribute ubi_version = 106 __ATTR(version, S_IRUGO, ubi_version_show, NULL); 107 108 static ssize_t dev_attribute_show(struct device *dev, 109 struct device_attribute *attr, char *buf); 110 111 /* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */ 112 static struct device_attribute dev_eraseblock_size = 113 __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL); 114 static struct device_attribute dev_avail_eraseblocks = 115 __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL); 116 static struct device_attribute dev_total_eraseblocks = 117 __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL); 118 static struct device_attribute dev_volumes_count = 119 __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL); 120 static struct device_attribute dev_max_ec = 121 __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL); 122 static struct device_attribute dev_reserved_for_bad = 123 __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL); 124 static struct device_attribute dev_bad_peb_count = 125 __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL); 126 static struct device_attribute dev_max_vol_count = 127 __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL); 128 static struct device_attribute dev_min_io_size = 129 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); 130 static struct device_attribute dev_bgt_enabled = 131 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); 132 static struct device_attribute dev_mtd_num = 133 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); 134 135 /** 136 * ubi_volume_notify - send a volume change notification. 137 * @ubi: UBI device description object 138 * @vol: volume description object of the changed volume 139 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) 140 * 141 * This is a helper function which notifies all subscribers about a volume 142 * change event (creation, removal, re-sizing, re-naming, updating). Returns 143 * zero in case of success and a negative error code in case of failure. 144 */ 145 int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype) 146 { 147 struct ubi_notification nt; 148 149 ubi_do_get_device_info(ubi, &nt.di); 150 ubi_do_get_volume_info(ubi, vol, &nt.vi); 151 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); 152 } 153 154 /** 155 * ubi_notify_all - send a notification to all volumes. 156 * @ubi: UBI device description object 157 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) 158 * @nb: the notifier to call 159 * 160 * This function walks all volumes of UBI device @ubi and sends the @ntype 161 * notification for each volume. If @nb is %NULL, then all registered notifiers 162 * are called, otherwise only the @nb notifier is called. Returns the number of 163 * sent notifications. 164 */ 165 int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb) 166 { 167 struct ubi_notification nt; 168 int i, count = 0; 169 170 ubi_do_get_device_info(ubi, &nt.di); 171 172 mutex_lock(&ubi->device_mutex); 173 for (i = 0; i < ubi->vtbl_slots; i++) { 174 /* 175 * Since the @ubi->device is locked, and we are not going to 176 * change @ubi->volumes, we do not have to lock 177 * @ubi->volumes_lock. 178 */ 179 if (!ubi->volumes[i]) 180 continue; 181 182 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi); 183 if (nb) 184 nb->notifier_call(nb, ntype, &nt); 185 else 186 blocking_notifier_call_chain(&ubi_notifiers, ntype, 187 &nt); 188 count += 1; 189 } 190 mutex_unlock(&ubi->device_mutex); 191 192 return count; 193 } 194 195 /** 196 * ubi_enumerate_volumes - send "add" notification for all existing volumes. 197 * @nb: the notifier to call 198 * 199 * This function walks all UBI devices and volumes and sends the 200 * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all 201 * registered notifiers are called, otherwise only the @nb notifier is called. 202 * Returns the number of sent notifications. 203 */ 204 int ubi_enumerate_volumes(struct notifier_block *nb) 205 { 206 int i, count = 0; 207 208 /* 209 * Since the @ubi_devices_mutex is locked, and we are not going to 210 * change @ubi_devices, we do not have to lock @ubi_devices_lock. 211 */ 212 for (i = 0; i < UBI_MAX_DEVICES; i++) { 213 struct ubi_device *ubi = ubi_devices[i]; 214 215 if (!ubi) 216 continue; 217 count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb); 218 } 219 220 return count; 221 } 222 223 /** 224 * ubi_get_device - get UBI device. 225 * @ubi_num: UBI device number 226 * 227 * This function returns UBI device description object for UBI device number 228 * @ubi_num, or %NULL if the device does not exist. This function increases the 229 * device reference count to prevent removal of the device. In other words, the 230 * device cannot be removed if its reference count is not zero. 231 */ 232 struct ubi_device *ubi_get_device(int ubi_num) 233 { 234 struct ubi_device *ubi; 235 236 spin_lock(&ubi_devices_lock); 237 ubi = ubi_devices[ubi_num]; 238 if (ubi) { 239 ubi_assert(ubi->ref_count >= 0); 240 ubi->ref_count += 1; 241 get_device(&ubi->dev); 242 } 243 spin_unlock(&ubi_devices_lock); 244 245 return ubi; 246 } 247 248 /** 249 * ubi_put_device - drop an UBI device reference. 250 * @ubi: UBI device description object 251 */ 252 void ubi_put_device(struct ubi_device *ubi) 253 { 254 spin_lock(&ubi_devices_lock); 255 ubi->ref_count -= 1; 256 put_device(&ubi->dev); 257 spin_unlock(&ubi_devices_lock); 258 } 259 260 /** 261 * ubi_get_by_major - get UBI device by character device major number. 262 * @major: major number 263 * 264 * This function is similar to 'ubi_get_device()', but it searches the device 265 * by its major number. 266 */ 267 struct ubi_device *ubi_get_by_major(int major) 268 { 269 int i; 270 struct ubi_device *ubi; 271 272 spin_lock(&ubi_devices_lock); 273 for (i = 0; i < UBI_MAX_DEVICES; i++) { 274 ubi = ubi_devices[i]; 275 if (ubi && MAJOR(ubi->cdev.dev) == major) { 276 ubi_assert(ubi->ref_count >= 0); 277 ubi->ref_count += 1; 278 get_device(&ubi->dev); 279 spin_unlock(&ubi_devices_lock); 280 return ubi; 281 } 282 } 283 spin_unlock(&ubi_devices_lock); 284 285 return NULL; 286 } 287 288 /** 289 * ubi_major2num - get UBI device number by character device major number. 290 * @major: major number 291 * 292 * This function searches UBI device number object by its major number. If UBI 293 * device was not found, this function returns -ENODEV, otherwise the UBI device 294 * number is returned. 295 */ 296 int ubi_major2num(int major) 297 { 298 int i, ubi_num = -ENODEV; 299 300 spin_lock(&ubi_devices_lock); 301 for (i = 0; i < UBI_MAX_DEVICES; i++) { 302 struct ubi_device *ubi = ubi_devices[i]; 303 304 if (ubi && MAJOR(ubi->cdev.dev) == major) { 305 ubi_num = ubi->ubi_num; 306 break; 307 } 308 } 309 spin_unlock(&ubi_devices_lock); 310 311 return ubi_num; 312 } 313 314 /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ 315 static ssize_t dev_attribute_show(struct device *dev, 316 struct device_attribute *attr, char *buf) 317 { 318 ssize_t ret; 319 struct ubi_device *ubi; 320 321 /* 322 * The below code looks weird, but it actually makes sense. We get the 323 * UBI device reference from the contained 'struct ubi_device'. But it 324 * is unclear if the device was removed or not yet. Indeed, if the 325 * device was removed before we increased its reference count, 326 * 'ubi_get_device()' will return -ENODEV and we fail. 327 * 328 * Remember, 'struct ubi_device' is freed in the release function, so 329 * we still can use 'ubi->ubi_num'. 330 */ 331 ubi = container_of(dev, struct ubi_device, dev); 332 ubi = ubi_get_device(ubi->ubi_num); 333 if (!ubi) 334 return -ENODEV; 335 336 if (attr == &dev_eraseblock_size) 337 ret = sprintf(buf, "%d\n", ubi->leb_size); 338 else if (attr == &dev_avail_eraseblocks) 339 ret = sprintf(buf, "%d\n", ubi->avail_pebs); 340 else if (attr == &dev_total_eraseblocks) 341 ret = sprintf(buf, "%d\n", ubi->good_peb_count); 342 else if (attr == &dev_volumes_count) 343 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT); 344 else if (attr == &dev_max_ec) 345 ret = sprintf(buf, "%d\n", ubi->max_ec); 346 else if (attr == &dev_reserved_for_bad) 347 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); 348 else if (attr == &dev_bad_peb_count) 349 ret = sprintf(buf, "%d\n", ubi->bad_peb_count); 350 else if (attr == &dev_max_vol_count) 351 ret = sprintf(buf, "%d\n", ubi->vtbl_slots); 352 else if (attr == &dev_min_io_size) 353 ret = sprintf(buf, "%d\n", ubi->min_io_size); 354 else if (attr == &dev_bgt_enabled) 355 ret = sprintf(buf, "%d\n", ubi->thread_enabled); 356 else if (attr == &dev_mtd_num) 357 ret = sprintf(buf, "%d\n", ubi->mtd->index); 358 else 359 ret = -EINVAL; 360 361 ubi_put_device(ubi); 362 return ret; 363 } 364 365 static void dev_release(struct device *dev) 366 { 367 struct ubi_device *ubi = container_of(dev, struct ubi_device, dev); 368 369 kfree(ubi); 370 } 371 372 /** 373 * ubi_sysfs_init - initialize sysfs for an UBI device. 374 * @ubi: UBI device description object 375 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was 376 * taken 377 * 378 * This function returns zero in case of success and a negative error code in 379 * case of failure. 380 */ 381 static int ubi_sysfs_init(struct ubi_device *ubi, int *ref) 382 { 383 int err; 384 385 ubi->dev.release = dev_release; 386 ubi->dev.devt = ubi->cdev.dev; 387 ubi->dev.class = ubi_class; 388 dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num); 389 err = device_register(&ubi->dev); 390 if (err) 391 return err; 392 393 *ref = 1; 394 err = device_create_file(&ubi->dev, &dev_eraseblock_size); 395 if (err) 396 return err; 397 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); 398 if (err) 399 return err; 400 err = device_create_file(&ubi->dev, &dev_total_eraseblocks); 401 if (err) 402 return err; 403 err = device_create_file(&ubi->dev, &dev_volumes_count); 404 if (err) 405 return err; 406 err = device_create_file(&ubi->dev, &dev_max_ec); 407 if (err) 408 return err; 409 err = device_create_file(&ubi->dev, &dev_reserved_for_bad); 410 if (err) 411 return err; 412 err = device_create_file(&ubi->dev, &dev_bad_peb_count); 413 if (err) 414 return err; 415 err = device_create_file(&ubi->dev, &dev_max_vol_count); 416 if (err) 417 return err; 418 err = device_create_file(&ubi->dev, &dev_min_io_size); 419 if (err) 420 return err; 421 err = device_create_file(&ubi->dev, &dev_bgt_enabled); 422 if (err) 423 return err; 424 err = device_create_file(&ubi->dev, &dev_mtd_num); 425 return err; 426 } 427 428 /** 429 * ubi_sysfs_close - close sysfs for an UBI device. 430 * @ubi: UBI device description object 431 */ 432 static void ubi_sysfs_close(struct ubi_device *ubi) 433 { 434 device_remove_file(&ubi->dev, &dev_mtd_num); 435 device_remove_file(&ubi->dev, &dev_bgt_enabled); 436 device_remove_file(&ubi->dev, &dev_min_io_size); 437 device_remove_file(&ubi->dev, &dev_max_vol_count); 438 device_remove_file(&ubi->dev, &dev_bad_peb_count); 439 device_remove_file(&ubi->dev, &dev_reserved_for_bad); 440 device_remove_file(&ubi->dev, &dev_max_ec); 441 device_remove_file(&ubi->dev, &dev_volumes_count); 442 device_remove_file(&ubi->dev, &dev_total_eraseblocks); 443 device_remove_file(&ubi->dev, &dev_avail_eraseblocks); 444 device_remove_file(&ubi->dev, &dev_eraseblock_size); 445 device_unregister(&ubi->dev); 446 } 447 448 /** 449 * kill_volumes - destroy all user volumes. 450 * @ubi: UBI device description object 451 */ 452 static void kill_volumes(struct ubi_device *ubi) 453 { 454 int i; 455 456 for (i = 0; i < ubi->vtbl_slots; i++) 457 if (ubi->volumes[i]) 458 ubi_free_volume(ubi, ubi->volumes[i]); 459 } 460 461 /** 462 * uif_init - initialize user interfaces for an UBI device. 463 * @ubi: UBI device description object 464 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was 465 * taken, otherwise set to %0 466 * 467 * This function initializes various user interfaces for an UBI device. If the 468 * initialization fails at an early stage, this function frees all the 469 * resources it allocated, returns an error, and @ref is set to %0. However, 470 * if the initialization fails after the UBI device was registered in the 471 * driver core subsystem, this function takes a reference to @ubi->dev, because 472 * otherwise the release function ('dev_release()') would free whole @ubi 473 * object. The @ref argument is set to %1 in this case. The caller has to put 474 * this reference. 475 * 476 * This function returns zero in case of success and a negative error code in 477 * case of failure. 478 */ 479 static int uif_init(struct ubi_device *ubi, int *ref) 480 { 481 int i, err; 482 dev_t dev; 483 484 *ref = 0; 485 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); 486 487 /* 488 * Major numbers for the UBI character devices are allocated 489 * dynamically. Major numbers of volume character devices are 490 * equivalent to ones of the corresponding UBI character device. Minor 491 * numbers of UBI character devices are 0, while minor numbers of 492 * volume character devices start from 1. Thus, we allocate one major 493 * number and ubi->vtbl_slots + 1 minor numbers. 494 */ 495 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name); 496 if (err) { 497 ubi_err("cannot register UBI character devices"); 498 return err; 499 } 500 501 ubi_assert(MINOR(dev) == 0); 502 cdev_init(&ubi->cdev, &ubi_cdev_operations); 503 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev)); 504 ubi->cdev.owner = THIS_MODULE; 505 506 err = cdev_add(&ubi->cdev, dev, 1); 507 if (err) { 508 ubi_err("cannot add character device"); 509 goto out_unreg; 510 } 511 512 err = ubi_sysfs_init(ubi, ref); 513 if (err) 514 goto out_sysfs; 515 516 for (i = 0; i < ubi->vtbl_slots; i++) 517 if (ubi->volumes[i]) { 518 err = ubi_add_volume(ubi, ubi->volumes[i]); 519 if (err) { 520 ubi_err("cannot add volume %d", i); 521 goto out_volumes; 522 } 523 } 524 525 return 0; 526 527 out_volumes: 528 kill_volumes(ubi); 529 out_sysfs: 530 if (*ref) 531 get_device(&ubi->dev); 532 ubi_sysfs_close(ubi); 533 cdev_del(&ubi->cdev); 534 out_unreg: 535 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 536 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); 537 return err; 538 } 539 540 /** 541 * uif_close - close user interfaces for an UBI device. 542 * @ubi: UBI device description object 543 * 544 * Note, since this function un-registers UBI volume device objects (@vol->dev), 545 * the memory allocated voe the volumes is freed as well (in the release 546 * function). 547 */ 548 static void uif_close(struct ubi_device *ubi) 549 { 550 kill_volumes(ubi); 551 ubi_sysfs_close(ubi); 552 cdev_del(&ubi->cdev); 553 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 554 } 555 556 /** 557 * free_internal_volumes - free internal volumes. 558 * @ubi: UBI device description object 559 */ 560 static void free_internal_volumes(struct ubi_device *ubi) 561 { 562 int i; 563 564 for (i = ubi->vtbl_slots; 565 i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { 566 kfree(ubi->volumes[i]->eba_tbl); 567 kfree(ubi->volumes[i]); 568 } 569 } 570 571 /** 572 * attach_by_scanning - attach an MTD device using scanning method. 573 * @ubi: UBI device descriptor 574 * 575 * This function returns zero in case of success and a negative error code in 576 * case of failure. 577 * 578 * Note, currently this is the only method to attach UBI devices. Hopefully in 579 * the future we'll have more scalable attaching methods and avoid full media 580 * scanning. But even in this case scanning will be needed as a fall-back 581 * attaching method if there are some on-flash table corruptions. 582 */ 583 static int attach_by_scanning(struct ubi_device *ubi) 584 { 585 int err; 586 struct ubi_scan_info *si; 587 588 si = ubi_scan(ubi); 589 if (IS_ERR(si)) 590 return PTR_ERR(si); 591 592 ubi->bad_peb_count = si->bad_peb_count; 593 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; 594 ubi->corr_peb_count = si->corr_peb_count; 595 ubi->max_ec = si->max_ec; 596 ubi->mean_ec = si->mean_ec; 597 ubi_msg("max. sequence number: %llu", si->max_sqnum); 598 599 err = ubi_read_volume_table(ubi, si); 600 if (err) 601 goto out_si; 602 603 err = ubi_wl_init_scan(ubi, si); 604 if (err) 605 goto out_vtbl; 606 607 err = ubi_eba_init_scan(ubi, si); 608 if (err) 609 goto out_wl; 610 611 ubi_scan_destroy_si(si); 612 return 0; 613 614 out_wl: 615 ubi_wl_close(ubi); 616 out_vtbl: 617 free_internal_volumes(ubi); 618 vfree(ubi->vtbl); 619 out_si: 620 ubi_scan_destroy_si(si); 621 return err; 622 } 623 624 /** 625 * io_init - initialize I/O sub-system for a given UBI device. 626 * @ubi: UBI device description object 627 * 628 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are 629 * assumed: 630 * o EC header is always at offset zero - this cannot be changed; 631 * o VID header starts just after the EC header at the closest address 632 * aligned to @io->hdrs_min_io_size; 633 * o data starts just after the VID header at the closest address aligned to 634 * @io->min_io_size 635 * 636 * This function returns zero in case of success and a negative error code in 637 * case of failure. 638 */ 639 static int io_init(struct ubi_device *ubi) 640 { 641 if (ubi->mtd->numeraseregions != 0) { 642 /* 643 * Some flashes have several erase regions. Different regions 644 * may have different eraseblock size and other 645 * characteristics. It looks like mostly multi-region flashes 646 * have one "main" region and one or more small regions to 647 * store boot loader code or boot parameters or whatever. I 648 * guess we should just pick the largest region. But this is 649 * not implemented. 650 */ 651 ubi_err("multiple regions, not implemented"); 652 return -EINVAL; 653 } 654 655 if (ubi->vid_hdr_offset < 0) 656 return -EINVAL; 657 658 /* 659 * Note, in this implementation we support MTD devices with 0x7FFFFFFF 660 * physical eraseblocks maximum. 661 */ 662 663 ubi->peb_size = ubi->mtd->erasesize; 664 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); 665 ubi->flash_size = ubi->mtd->size; 666 667 if (ubi->mtd->block_isbad && ubi->mtd->block_markbad) 668 ubi->bad_allowed = 1; 669 670 if (ubi->mtd->type == MTD_NORFLASH) { 671 ubi_assert(ubi->mtd->writesize == 1); 672 ubi->nor_flash = 1; 673 } 674 675 ubi->min_io_size = ubi->mtd->writesize; 676 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; 677 678 /* 679 * Make sure minimal I/O unit is power of 2. Note, there is no 680 * fundamental reason for this assumption. It is just an optimization 681 * which allows us to avoid costly division operations. 682 */ 683 if (!is_power_of_2(ubi->min_io_size)) { 684 ubi_err("min. I/O unit (%d) is not power of 2", 685 ubi->min_io_size); 686 return -EINVAL; 687 } 688 689 ubi_assert(ubi->hdrs_min_io_size > 0); 690 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); 691 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); 692 693 ubi->max_write_size = ubi->mtd->writebufsize; 694 /* 695 * Maximum write size has to be greater or equivalent to min. I/O 696 * size, and be multiple of min. I/O size. 697 */ 698 if (ubi->max_write_size < ubi->min_io_size || 699 ubi->max_write_size % ubi->min_io_size || 700 !is_power_of_2(ubi->max_write_size)) { 701 ubi_err("bad write buffer size %d for %d min. I/O unit", 702 ubi->max_write_size, ubi->min_io_size); 703 return -EINVAL; 704 } 705 706 /* Calculate default aligned sizes of EC and VID headers */ 707 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); 708 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); 709 710 dbg_msg("min_io_size %d", ubi->min_io_size); 711 dbg_msg("max_write_size %d", ubi->max_write_size); 712 dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size); 713 dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize); 714 dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize); 715 716 if (ubi->vid_hdr_offset == 0) 717 /* Default offset */ 718 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset = 719 ubi->ec_hdr_alsize; 720 else { 721 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset & 722 ~(ubi->hdrs_min_io_size - 1); 723 ubi->vid_hdr_shift = ubi->vid_hdr_offset - 724 ubi->vid_hdr_aloffset; 725 } 726 727 /* Similar for the data offset */ 728 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE; 729 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); 730 731 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); 732 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); 733 dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift); 734 dbg_msg("leb_start %d", ubi->leb_start); 735 736 /* The shift must be aligned to 32-bit boundary */ 737 if (ubi->vid_hdr_shift % 4) { 738 ubi_err("unaligned VID header shift %d", 739 ubi->vid_hdr_shift); 740 return -EINVAL; 741 } 742 743 /* Check sanity */ 744 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || 745 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || 746 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || 747 ubi->leb_start & (ubi->min_io_size - 1)) { 748 ubi_err("bad VID header (%d) or data offsets (%d)", 749 ubi->vid_hdr_offset, ubi->leb_start); 750 return -EINVAL; 751 } 752 753 /* 754 * Set maximum amount of physical erroneous eraseblocks to be 10%. 755 * Erroneous PEB are those which have read errors. 756 */ 757 ubi->max_erroneous = ubi->peb_count / 10; 758 if (ubi->max_erroneous < 16) 759 ubi->max_erroneous = 16; 760 dbg_msg("max_erroneous %d", ubi->max_erroneous); 761 762 /* 763 * It may happen that EC and VID headers are situated in one minimal 764 * I/O unit. In this case we can only accept this UBI image in 765 * read-only mode. 766 */ 767 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { 768 ubi_warn("EC and VID headers are in the same minimal I/O unit, " 769 "switch to read-only mode"); 770 ubi->ro_mode = 1; 771 } 772 773 ubi->leb_size = ubi->peb_size - ubi->leb_start; 774 775 if (!(ubi->mtd->flags & MTD_WRITEABLE)) { 776 ubi_msg("MTD device %d is write-protected, attach in " 777 "read-only mode", ubi->mtd->index); 778 ubi->ro_mode = 1; 779 } 780 781 ubi_msg("physical eraseblock size: %d bytes (%d KiB)", 782 ubi->peb_size, ubi->peb_size >> 10); 783 ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size); 784 ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size); 785 if (ubi->hdrs_min_io_size != ubi->min_io_size) 786 ubi_msg("sub-page size: %d", 787 ubi->hdrs_min_io_size); 788 ubi_msg("VID header offset: %d (aligned %d)", 789 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset); 790 ubi_msg("data offset: %d", ubi->leb_start); 791 792 /* 793 * Note, ideally, we have to initialize ubi->bad_peb_count here. But 794 * unfortunately, MTD does not provide this information. We should loop 795 * over all physical eraseblocks and invoke mtd->block_is_bad() for 796 * each physical eraseblock. So, we skip ubi->bad_peb_count 797 * uninitialized and initialize it after scanning. 798 */ 799 800 return 0; 801 } 802 803 /** 804 * autoresize - re-size the volume which has the "auto-resize" flag set. 805 * @ubi: UBI device description object 806 * @vol_id: ID of the volume to re-size 807 * 808 * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in 809 * the volume table to the largest possible size. See comments in ubi-header.h 810 * for more description of the flag. Returns zero in case of success and a 811 * negative error code in case of failure. 812 */ 813 static int autoresize(struct ubi_device *ubi, int vol_id) 814 { 815 struct ubi_volume_desc desc; 816 struct ubi_volume *vol = ubi->volumes[vol_id]; 817 int err, old_reserved_pebs = vol->reserved_pebs; 818 819 /* 820 * Clear the auto-resize flag in the volume in-memory copy of the 821 * volume table, and 'ubi_resize_volume()' will propagate this change 822 * to the flash. 823 */ 824 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; 825 826 if (ubi->avail_pebs == 0) { 827 struct ubi_vtbl_record vtbl_rec; 828 829 /* 830 * No available PEBs to re-size the volume, clear the flag on 831 * flash and exit. 832 */ 833 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], 834 sizeof(struct ubi_vtbl_record)); 835 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 836 if (err) 837 ubi_err("cannot clean auto-resize flag for volume %d", 838 vol_id); 839 } else { 840 desc.vol = vol; 841 err = ubi_resize_volume(&desc, 842 old_reserved_pebs + ubi->avail_pebs); 843 if (err) 844 ubi_err("cannot auto-resize volume %d", vol_id); 845 } 846 847 if (err) 848 return err; 849 850 ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id, 851 vol->name, old_reserved_pebs, vol->reserved_pebs); 852 return 0; 853 } 854 855 /** 856 * ubi_attach_mtd_dev - attach an MTD device. 857 * @mtd: MTD device description object 858 * @ubi_num: number to assign to the new UBI device 859 * @vid_hdr_offset: VID header offset 860 * 861 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number 862 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in 863 * which case this function finds a vacant device number and assigns it 864 * automatically. Returns the new UBI device number in case of success and a 865 * negative error code in case of failure. 866 * 867 * Note, the invocations of this function has to be serialized by the 868 * @ubi_devices_mutex. 869 */ 870 int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) 871 { 872 struct ubi_device *ubi; 873 int i, err, ref = 0; 874 875 /* 876 * Check if we already have the same MTD device attached. 877 * 878 * Note, this function assumes that UBI devices creations and deletions 879 * are serialized, so it does not take the &ubi_devices_lock. 880 */ 881 for (i = 0; i < UBI_MAX_DEVICES; i++) { 882 ubi = ubi_devices[i]; 883 if (ubi && mtd->index == ubi->mtd->index) { 884 dbg_err("mtd%d is already attached to ubi%d", 885 mtd->index, i); 886 return -EEXIST; 887 } 888 } 889 890 /* 891 * Make sure this MTD device is not emulated on top of an UBI volume 892 * already. Well, generally this recursion works fine, but there are 893 * different problems like the UBI module takes a reference to itself 894 * by attaching (and thus, opening) the emulated MTD device. This 895 * results in inability to unload the module. And in general it makes 896 * no sense to attach emulated MTD devices, so we prohibit this. 897 */ 898 if (mtd->type == MTD_UBIVOLUME) { 899 ubi_err("refuse attaching mtd%d - it is already emulated on " 900 "top of UBI", mtd->index); 901 return -EINVAL; 902 } 903 904 if (ubi_num == UBI_DEV_NUM_AUTO) { 905 /* Search for an empty slot in the @ubi_devices array */ 906 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) 907 if (!ubi_devices[ubi_num]) 908 break; 909 if (ubi_num == UBI_MAX_DEVICES) { 910 dbg_err("only %d UBI devices may be created", 911 UBI_MAX_DEVICES); 912 return -ENFILE; 913 } 914 } else { 915 if (ubi_num >= UBI_MAX_DEVICES) 916 return -EINVAL; 917 918 /* Make sure ubi_num is not busy */ 919 if (ubi_devices[ubi_num]) { 920 dbg_err("ubi%d already exists", ubi_num); 921 return -EEXIST; 922 } 923 } 924 925 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL); 926 if (!ubi) 927 return -ENOMEM; 928 929 ubi->mtd = mtd; 930 ubi->ubi_num = ubi_num; 931 ubi->vid_hdr_offset = vid_hdr_offset; 932 ubi->autoresize_vol_id = -1; 933 934 mutex_init(&ubi->buf_mutex); 935 mutex_init(&ubi->ckvol_mutex); 936 mutex_init(&ubi->device_mutex); 937 spin_lock_init(&ubi->volumes_lock); 938 939 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); 940 dbg_msg("sizeof(struct ubi_scan_leb) %zu", sizeof(struct ubi_scan_leb)); 941 dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry)); 942 943 err = io_init(ubi); 944 if (err) 945 goto out_free; 946 947 err = -ENOMEM; 948 ubi->peb_buf1 = vmalloc(ubi->peb_size); 949 if (!ubi->peb_buf1) 950 goto out_free; 951 952 ubi->peb_buf2 = vmalloc(ubi->peb_size); 953 if (!ubi->peb_buf2) 954 goto out_free; 955 956 err = attach_by_scanning(ubi); 957 if (err) { 958 dbg_err("failed to attach by scanning, error %d", err); 959 goto out_free; 960 } 961 962 if (ubi->autoresize_vol_id != -1) { 963 err = autoresize(ubi, ubi->autoresize_vol_id); 964 if (err) 965 goto out_detach; 966 } 967 968 err = uif_init(ubi, &ref); 969 if (err) 970 goto out_detach; 971 972 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); 973 if (IS_ERR(ubi->bgt_thread)) { 974 err = PTR_ERR(ubi->bgt_thread); 975 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, 976 err); 977 goto out_uif; 978 } 979 980 ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num); 981 ubi_msg("MTD device name: \"%s\"", mtd->name); 982 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); 983 ubi_msg("number of good PEBs: %d", ubi->good_peb_count); 984 ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count); 985 ubi_msg("number of corrupted PEBs: %d", ubi->corr_peb_count); 986 ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots); 987 ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD); 988 ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT); 989 ubi_msg("number of user volumes: %d", 990 ubi->vol_count - UBI_INT_VOL_COUNT); 991 ubi_msg("available PEBs: %d", ubi->avail_pebs); 992 ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs); 993 ubi_msg("number of PEBs reserved for bad PEB handling: %d", 994 ubi->beb_rsvd_pebs); 995 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); 996 ubi_msg("image sequence number: %d", ubi->image_seq); 997 998 /* 999 * The below lock makes sure we do not race with 'ubi_thread()' which 1000 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up. 1001 */ 1002 spin_lock(&ubi->wl_lock); 1003 ubi->thread_enabled = 1; 1004 wake_up_process(ubi->bgt_thread); 1005 spin_unlock(&ubi->wl_lock); 1006 1007 ubi_devices[ubi_num] = ubi; 1008 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); 1009 return ubi_num; 1010 1011 out_uif: 1012 uif_close(ubi); 1013 out_detach: 1014 ubi_wl_close(ubi); 1015 free_internal_volumes(ubi); 1016 vfree(ubi->vtbl); 1017 out_free: 1018 vfree(ubi->peb_buf1); 1019 vfree(ubi->peb_buf2); 1020 if (ref) 1021 put_device(&ubi->dev); 1022 else 1023 kfree(ubi); 1024 return err; 1025 } 1026 1027 /** 1028 * ubi_detach_mtd_dev - detach an MTD device. 1029 * @ubi_num: UBI device number to detach from 1030 * @anyway: detach MTD even if device reference count is not zero 1031 * 1032 * This function destroys an UBI device number @ubi_num and detaches the 1033 * underlying MTD device. Returns zero in case of success and %-EBUSY if the 1034 * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not 1035 * exist. 1036 * 1037 * Note, the invocations of this function has to be serialized by the 1038 * @ubi_devices_mutex. 1039 */ 1040 int ubi_detach_mtd_dev(int ubi_num, int anyway) 1041 { 1042 struct ubi_device *ubi; 1043 1044 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 1045 return -EINVAL; 1046 1047 ubi = ubi_get_device(ubi_num); 1048 if (!ubi) 1049 return -EINVAL; 1050 1051 spin_lock(&ubi_devices_lock); 1052 put_device(&ubi->dev); 1053 ubi->ref_count -= 1; 1054 if (ubi->ref_count) { 1055 if (!anyway) { 1056 spin_unlock(&ubi_devices_lock); 1057 return -EBUSY; 1058 } 1059 /* This may only happen if there is a bug */ 1060 ubi_err("%s reference count %d, destroy anyway", 1061 ubi->ubi_name, ubi->ref_count); 1062 } 1063 ubi_devices[ubi_num] = NULL; 1064 spin_unlock(&ubi_devices_lock); 1065 1066 ubi_assert(ubi_num == ubi->ubi_num); 1067 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); 1068 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); 1069 1070 /* 1071 * Before freeing anything, we have to stop the background thread to 1072 * prevent it from doing anything on this device while we are freeing. 1073 */ 1074 if (ubi->bgt_thread) 1075 kthread_stop(ubi->bgt_thread); 1076 1077 /* 1078 * Get a reference to the device in order to prevent 'dev_release()' 1079 * from freeing the @ubi object. 1080 */ 1081 get_device(&ubi->dev); 1082 1083 uif_close(ubi); 1084 ubi_wl_close(ubi); 1085 free_internal_volumes(ubi); 1086 vfree(ubi->vtbl); 1087 put_mtd_device(ubi->mtd); 1088 vfree(ubi->peb_buf1); 1089 vfree(ubi->peb_buf2); 1090 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); 1091 put_device(&ubi->dev); 1092 return 0; 1093 } 1094 1095 /** 1096 * open_mtd_by_chdev - open an MTD device by its character device node path. 1097 * @mtd_dev: MTD character device node path 1098 * 1099 * This helper function opens an MTD device by its character node device path. 1100 * Returns MTD device description object in case of success and a negative 1101 * error code in case of failure. 1102 */ 1103 static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) 1104 { 1105 int err, major, minor, mode; 1106 struct path path; 1107 1108 /* Probably this is an MTD character device node path */ 1109 err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path); 1110 if (err) 1111 return ERR_PTR(err); 1112 1113 /* MTD device number is defined by the major / minor numbers */ 1114 major = imajor(path.dentry->d_inode); 1115 minor = iminor(path.dentry->d_inode); 1116 mode = path.dentry->d_inode->i_mode; 1117 path_put(&path); 1118 if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode)) 1119 return ERR_PTR(-EINVAL); 1120 1121 if (minor & 1) 1122 /* 1123 * Just do not think the "/dev/mtdrX" devices support is need, 1124 * so do not support them to avoid doing extra work. 1125 */ 1126 return ERR_PTR(-EINVAL); 1127 1128 return get_mtd_device(NULL, minor / 2); 1129 } 1130 1131 /** 1132 * open_mtd_device - open MTD device by name, character device path, or number. 1133 * @mtd_dev: name, character device node path, or MTD device device number 1134 * 1135 * This function tries to open and MTD device described by @mtd_dev string, 1136 * which is first treated as ASCII MTD device number, and if it is not true, it 1137 * is treated as MTD device name, and if that is also not true, it is treated 1138 * as MTD character device node path. Returns MTD device description object in 1139 * case of success and a negative error code in case of failure. 1140 */ 1141 static struct mtd_info * __init open_mtd_device(const char *mtd_dev) 1142 { 1143 struct mtd_info *mtd; 1144 int mtd_num; 1145 char *endp; 1146 1147 mtd_num = simple_strtoul(mtd_dev, &endp, 0); 1148 if (*endp != '\0' || mtd_dev == endp) { 1149 /* 1150 * This does not look like an ASCII integer, probably this is 1151 * MTD device name. 1152 */ 1153 mtd = get_mtd_device_nm(mtd_dev); 1154 if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV) 1155 /* Probably this is an MTD character device node path */ 1156 mtd = open_mtd_by_chdev(mtd_dev); 1157 } else 1158 mtd = get_mtd_device(NULL, mtd_num); 1159 1160 return mtd; 1161 } 1162 1163 static int __init ubi_init(void) 1164 { 1165 int err, i, k; 1166 1167 /* Ensure that EC and VID headers have correct size */ 1168 BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64); 1169 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); 1170 1171 if (mtd_devs > UBI_MAX_DEVICES) { 1172 ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES); 1173 return -EINVAL; 1174 } 1175 1176 /* Create base sysfs directory and sysfs files */ 1177 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); 1178 if (IS_ERR(ubi_class)) { 1179 err = PTR_ERR(ubi_class); 1180 ubi_err("cannot create UBI class"); 1181 goto out; 1182 } 1183 1184 err = class_create_file(ubi_class, &ubi_version); 1185 if (err) { 1186 ubi_err("cannot create sysfs file"); 1187 goto out_class; 1188 } 1189 1190 err = misc_register(&ubi_ctrl_cdev); 1191 if (err) { 1192 ubi_err("cannot register device"); 1193 goto out_version; 1194 } 1195 1196 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab", 1197 sizeof(struct ubi_wl_entry), 1198 0, 0, NULL); 1199 if (!ubi_wl_entry_slab) 1200 goto out_dev_unreg; 1201 1202 /* Attach MTD devices */ 1203 for (i = 0; i < mtd_devs; i++) { 1204 struct mtd_dev_param *p = &mtd_dev_param[i]; 1205 struct mtd_info *mtd; 1206 1207 cond_resched(); 1208 1209 mtd = open_mtd_device(p->name); 1210 if (IS_ERR(mtd)) { 1211 err = PTR_ERR(mtd); 1212 goto out_detach; 1213 } 1214 1215 mutex_lock(&ubi_devices_mutex); 1216 err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 1217 p->vid_hdr_offs); 1218 mutex_unlock(&ubi_devices_mutex); 1219 if (err < 0) { 1220 ubi_err("cannot attach mtd%d", mtd->index); 1221 put_mtd_device(mtd); 1222 1223 /* 1224 * Originally UBI stopped initializing on any error. 1225 * However, later on it was found out that this 1226 * behavior is not very good when UBI is compiled into 1227 * the kernel and the MTD devices to attach are passed 1228 * through the command line. Indeed, UBI failure 1229 * stopped whole boot sequence. 1230 * 1231 * To fix this, we changed the behavior for the 1232 * non-module case, but preserved the old behavior for 1233 * the module case, just for compatibility. This is a 1234 * little inconsistent, though. 1235 */ 1236 if (ubi_is_module()) 1237 goto out_detach; 1238 } 1239 } 1240 1241 return 0; 1242 1243 out_detach: 1244 for (k = 0; k < i; k++) 1245 if (ubi_devices[k]) { 1246 mutex_lock(&ubi_devices_mutex); 1247 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1); 1248 mutex_unlock(&ubi_devices_mutex); 1249 } 1250 kmem_cache_destroy(ubi_wl_entry_slab); 1251 out_dev_unreg: 1252 misc_deregister(&ubi_ctrl_cdev); 1253 out_version: 1254 class_remove_file(ubi_class, &ubi_version); 1255 out_class: 1256 class_destroy(ubi_class); 1257 out: 1258 ubi_err("UBI error: cannot initialize UBI, error %d", err); 1259 return err; 1260 } 1261 module_init(ubi_init); 1262 1263 static void __exit ubi_exit(void) 1264 { 1265 int i; 1266 1267 for (i = 0; i < UBI_MAX_DEVICES; i++) 1268 if (ubi_devices[i]) { 1269 mutex_lock(&ubi_devices_mutex); 1270 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1); 1271 mutex_unlock(&ubi_devices_mutex); 1272 } 1273 kmem_cache_destroy(ubi_wl_entry_slab); 1274 misc_deregister(&ubi_ctrl_cdev); 1275 class_remove_file(ubi_class, &ubi_version); 1276 class_destroy(ubi_class); 1277 } 1278 module_exit(ubi_exit); 1279 1280 /** 1281 * bytes_str_to_int - convert a number of bytes string into an integer. 1282 * @str: the string to convert 1283 * 1284 * This function returns positive resulting integer in case of success and a 1285 * negative error code in case of failure. 1286 */ 1287 static int __init bytes_str_to_int(const char *str) 1288 { 1289 char *endp; 1290 unsigned long result; 1291 1292 result = simple_strtoul(str, &endp, 0); 1293 if (str == endp || result >= INT_MAX) { 1294 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", 1295 str); 1296 return -EINVAL; 1297 } 1298 1299 switch (*endp) { 1300 case 'G': 1301 result *= 1024; 1302 case 'M': 1303 result *= 1024; 1304 case 'K': 1305 result *= 1024; 1306 if (endp[1] == 'i' && endp[2] == 'B') 1307 endp += 2; 1308 case '\0': 1309 break; 1310 default: 1311 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", 1312 str); 1313 return -EINVAL; 1314 } 1315 1316 return result; 1317 } 1318 1319 /** 1320 * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter. 1321 * @val: the parameter value to parse 1322 * @kp: not used 1323 * 1324 * This function returns zero in case of success and a negative error code in 1325 * case of error. 1326 */ 1327 static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) 1328 { 1329 int i, len; 1330 struct mtd_dev_param *p; 1331 char buf[MTD_PARAM_LEN_MAX]; 1332 char *pbuf = &buf[0]; 1333 char *tokens[2] = {NULL, NULL}; 1334 1335 if (!val) 1336 return -EINVAL; 1337 1338 if (mtd_devs == UBI_MAX_DEVICES) { 1339 printk(KERN_ERR "UBI error: too many parameters, max. is %d\n", 1340 UBI_MAX_DEVICES); 1341 return -EINVAL; 1342 } 1343 1344 len = strnlen(val, MTD_PARAM_LEN_MAX); 1345 if (len == MTD_PARAM_LEN_MAX) { 1346 printk(KERN_ERR "UBI error: parameter \"%s\" is too long, " 1347 "max. is %d\n", val, MTD_PARAM_LEN_MAX); 1348 return -EINVAL; 1349 } 1350 1351 if (len == 0) { 1352 printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - " 1353 "ignored\n"); 1354 return 0; 1355 } 1356 1357 strcpy(buf, val); 1358 1359 /* Get rid of the final newline */ 1360 if (buf[len - 1] == '\n') 1361 buf[len - 1] = '\0'; 1362 1363 for (i = 0; i < 2; i++) 1364 tokens[i] = strsep(&pbuf, ","); 1365 1366 if (pbuf) { 1367 printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n", 1368 val); 1369 return -EINVAL; 1370 } 1371 1372 p = &mtd_dev_param[mtd_devs]; 1373 strcpy(&p->name[0], tokens[0]); 1374 1375 if (tokens[1]) 1376 p->vid_hdr_offs = bytes_str_to_int(tokens[1]); 1377 1378 if (p->vid_hdr_offs < 0) 1379 return p->vid_hdr_offs; 1380 1381 mtd_devs += 1; 1382 return 0; 1383 } 1384 1385 module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); 1386 MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " 1387 "mtd=<name|num|path>[,<vid_hdr_offs>].\n" 1388 "Multiple \"mtd\" parameters may be specified.\n" 1389 "MTD devices may be specified by their number, name, or " 1390 "path to the MTD character device node.\n" 1391 "Optional \"vid_hdr_offs\" parameter specifies UBI VID " 1392 "header position to be used by UBI.\n" 1393 "Example 1: mtd=/dev/mtd0 - attach MTD device " 1394 "/dev/mtd0.\n" 1395 "Example 2: mtd=content,1984 mtd=4 - attach MTD device " 1396 "with name \"content\" using VID header offset 1984, and " 1397 "MTD device number 4 with default VID header offset."); 1398 1399 MODULE_VERSION(__stringify(UBI_VERSION)); 1400 MODULE_DESCRIPTION("UBI - Unsorted Block Images"); 1401 MODULE_AUTHOR("Artem Bityutskiy"); 1402 MODULE_LICENSE("GPL"); 1403