1 /* 2 * Copyright (c) International Business Machines Corp., 2006 3 * Copyright (c) Nokia Corporation, 2007 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * 19 * Author: Artem Bityutskiy (Битюцкий Артём), 20 * Frank Haverkamp 21 */ 22 23 /* 24 * This file includes UBI initialization and building of UBI devices. 25 * 26 * When UBI is initialized, it attaches all the MTD devices specified as the 27 * module load parameters or the kernel boot parameters. If MTD devices were 28 * specified, UBI does not attach any MTD device, but it is possible to do 29 * later using the "UBI control device". 30 * 31 * At the moment we only attach UBI devices by scanning, which will become a 32 * bottleneck when flashes reach certain large size. Then one may improve UBI 33 * and add other methods, although it does not seem to be easy to do. 34 */ 35 36 #include <linux/err.h> 37 #include <linux/module.h> 38 #include <linux/moduleparam.h> 39 #include <linux/stringify.h> 40 #include <linux/namei.h> 41 #include <linux/stat.h> 42 #include <linux/miscdevice.h> 43 #include <linux/log2.h> 44 #include <linux/kthread.h> 45 #include <linux/kernel.h> 46 #include <linux/slab.h> 47 #include "ubi.h" 48 49 /* Maximum length of the 'mtd=' parameter */ 50 #define MTD_PARAM_LEN_MAX 64 51 52 #ifdef CONFIG_MTD_UBI_MODULE 53 #define ubi_is_module() 1 54 #else 55 #define ubi_is_module() 0 56 #endif 57 58 /** 59 * struct mtd_dev_param - MTD device parameter description data structure. 60 * @name: MTD character device node path, MTD device name, or MTD device number 61 * string 62 * @vid_hdr_offs: VID header offset 63 */ 64 struct mtd_dev_param { 65 char name[MTD_PARAM_LEN_MAX]; 66 int vid_hdr_offs; 67 }; 68 69 /* Numbers of elements set in the @mtd_dev_param array */ 70 static int __initdata mtd_devs; 71 72 /* MTD devices specification parameters */ 73 static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES]; 74 75 /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ 76 struct class *ubi_class; 77 78 /* Slab cache for wear-leveling entries */ 79 struct kmem_cache *ubi_wl_entry_slab; 80 81 /* UBI control character device */ 82 static struct miscdevice ubi_ctrl_cdev = { 83 .minor = MISC_DYNAMIC_MINOR, 84 .name = "ubi_ctrl", 85 .fops = &ubi_ctrl_cdev_operations, 86 }; 87 88 /* All UBI devices in system */ 89 static struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; 90 91 /* Serializes UBI devices creations and removals */ 92 DEFINE_MUTEX(ubi_devices_mutex); 93 94 /* Protects @ubi_devices and @ubi->ref_count */ 95 static DEFINE_SPINLOCK(ubi_devices_lock); 96 97 /* "Show" method for files in '/<sysfs>/class/ubi/' */ 98 static ssize_t ubi_version_show(struct class *class, 99 struct class_attribute *attr, char *buf) 100 { 101 return sprintf(buf, "%d\n", UBI_VERSION); 102 } 103 104 /* UBI version attribute ('/<sysfs>/class/ubi/version') */ 105 static struct class_attribute ubi_version = 106 __ATTR(version, S_IRUGO, ubi_version_show, NULL); 107 108 static ssize_t dev_attribute_show(struct device *dev, 109 struct device_attribute *attr, char *buf); 110 111 /* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */ 112 static struct device_attribute dev_eraseblock_size = 113 __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL); 114 static struct device_attribute dev_avail_eraseblocks = 115 __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL); 116 static struct device_attribute dev_total_eraseblocks = 117 __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL); 118 static struct device_attribute dev_volumes_count = 119 __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL); 120 static struct device_attribute dev_max_ec = 121 __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL); 122 static struct device_attribute dev_reserved_for_bad = 123 __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL); 124 static struct device_attribute dev_bad_peb_count = 125 __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL); 126 static struct device_attribute dev_max_vol_count = 127 __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL); 128 static struct device_attribute dev_min_io_size = 129 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); 130 static struct device_attribute dev_bgt_enabled = 131 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); 132 static struct device_attribute dev_mtd_num = 133 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); 134 135 /** 136 * ubi_volume_notify - send a volume change notification. 137 * @ubi: UBI device description object 138 * @vol: volume description object of the changed volume 139 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) 140 * 141 * This is a helper function which notifies all subscribers about a volume 142 * change event (creation, removal, re-sizing, re-naming, updating). Returns 143 * zero in case of success and a negative error code in case of failure. 144 */ 145 int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype) 146 { 147 struct ubi_notification nt; 148 149 ubi_do_get_device_info(ubi, &nt.di); 150 ubi_do_get_volume_info(ubi, vol, &nt.vi); 151 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); 152 } 153 154 /** 155 * ubi_notify_all - send a notification to all volumes. 156 * @ubi: UBI device description object 157 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) 158 * @nb: the notifier to call 159 * 160 * This function walks all volumes of UBI device @ubi and sends the @ntype 161 * notification for each volume. If @nb is %NULL, then all registered notifiers 162 * are called, otherwise only the @nb notifier is called. Returns the number of 163 * sent notifications. 164 */ 165 int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb) 166 { 167 struct ubi_notification nt; 168 int i, count = 0; 169 170 ubi_do_get_device_info(ubi, &nt.di); 171 172 mutex_lock(&ubi->device_mutex); 173 for (i = 0; i < ubi->vtbl_slots; i++) { 174 /* 175 * Since the @ubi->device is locked, and we are not going to 176 * change @ubi->volumes, we do not have to lock 177 * @ubi->volumes_lock. 178 */ 179 if (!ubi->volumes[i]) 180 continue; 181 182 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi); 183 if (nb) 184 nb->notifier_call(nb, ntype, &nt); 185 else 186 blocking_notifier_call_chain(&ubi_notifiers, ntype, 187 &nt); 188 count += 1; 189 } 190 mutex_unlock(&ubi->device_mutex); 191 192 return count; 193 } 194 195 /** 196 * ubi_enumerate_volumes - send "add" notification for all existing volumes. 197 * @nb: the notifier to call 198 * 199 * This function walks all UBI devices and volumes and sends the 200 * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all 201 * registered notifiers are called, otherwise only the @nb notifier is called. 202 * Returns the number of sent notifications. 203 */ 204 int ubi_enumerate_volumes(struct notifier_block *nb) 205 { 206 int i, count = 0; 207 208 /* 209 * Since the @ubi_devices_mutex is locked, and we are not going to 210 * change @ubi_devices, we do not have to lock @ubi_devices_lock. 211 */ 212 for (i = 0; i < UBI_MAX_DEVICES; i++) { 213 struct ubi_device *ubi = ubi_devices[i]; 214 215 if (!ubi) 216 continue; 217 count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb); 218 } 219 220 return count; 221 } 222 223 /** 224 * ubi_get_device - get UBI device. 225 * @ubi_num: UBI device number 226 * 227 * This function returns UBI device description object for UBI device number 228 * @ubi_num, or %NULL if the device does not exist. This function increases the 229 * device reference count to prevent removal of the device. In other words, the 230 * device cannot be removed if its reference count is not zero. 231 */ 232 struct ubi_device *ubi_get_device(int ubi_num) 233 { 234 struct ubi_device *ubi; 235 236 spin_lock(&ubi_devices_lock); 237 ubi = ubi_devices[ubi_num]; 238 if (ubi) { 239 ubi_assert(ubi->ref_count >= 0); 240 ubi->ref_count += 1; 241 get_device(&ubi->dev); 242 } 243 spin_unlock(&ubi_devices_lock); 244 245 return ubi; 246 } 247 248 /** 249 * ubi_put_device - drop an UBI device reference. 250 * @ubi: UBI device description object 251 */ 252 void ubi_put_device(struct ubi_device *ubi) 253 { 254 spin_lock(&ubi_devices_lock); 255 ubi->ref_count -= 1; 256 put_device(&ubi->dev); 257 spin_unlock(&ubi_devices_lock); 258 } 259 260 /** 261 * ubi_get_by_major - get UBI device by character device major number. 262 * @major: major number 263 * 264 * This function is similar to 'ubi_get_device()', but it searches the device 265 * by its major number. 266 */ 267 struct ubi_device *ubi_get_by_major(int major) 268 { 269 int i; 270 struct ubi_device *ubi; 271 272 spin_lock(&ubi_devices_lock); 273 for (i = 0; i < UBI_MAX_DEVICES; i++) { 274 ubi = ubi_devices[i]; 275 if (ubi && MAJOR(ubi->cdev.dev) == major) { 276 ubi_assert(ubi->ref_count >= 0); 277 ubi->ref_count += 1; 278 get_device(&ubi->dev); 279 spin_unlock(&ubi_devices_lock); 280 return ubi; 281 } 282 } 283 spin_unlock(&ubi_devices_lock); 284 285 return NULL; 286 } 287 288 /** 289 * ubi_major2num - get UBI device number by character device major number. 290 * @major: major number 291 * 292 * This function searches UBI device number object by its major number. If UBI 293 * device was not found, this function returns -ENODEV, otherwise the UBI device 294 * number is returned. 295 */ 296 int ubi_major2num(int major) 297 { 298 int i, ubi_num = -ENODEV; 299 300 spin_lock(&ubi_devices_lock); 301 for (i = 0; i < UBI_MAX_DEVICES; i++) { 302 struct ubi_device *ubi = ubi_devices[i]; 303 304 if (ubi && MAJOR(ubi->cdev.dev) == major) { 305 ubi_num = ubi->ubi_num; 306 break; 307 } 308 } 309 spin_unlock(&ubi_devices_lock); 310 311 return ubi_num; 312 } 313 314 /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ 315 static ssize_t dev_attribute_show(struct device *dev, 316 struct device_attribute *attr, char *buf) 317 { 318 ssize_t ret; 319 struct ubi_device *ubi; 320 321 /* 322 * The below code looks weird, but it actually makes sense. We get the 323 * UBI device reference from the contained 'struct ubi_device'. But it 324 * is unclear if the device was removed or not yet. Indeed, if the 325 * device was removed before we increased its reference count, 326 * 'ubi_get_device()' will return -ENODEV and we fail. 327 * 328 * Remember, 'struct ubi_device' is freed in the release function, so 329 * we still can use 'ubi->ubi_num'. 330 */ 331 ubi = container_of(dev, struct ubi_device, dev); 332 ubi = ubi_get_device(ubi->ubi_num); 333 if (!ubi) 334 return -ENODEV; 335 336 if (attr == &dev_eraseblock_size) 337 ret = sprintf(buf, "%d\n", ubi->leb_size); 338 else if (attr == &dev_avail_eraseblocks) 339 ret = sprintf(buf, "%d\n", ubi->avail_pebs); 340 else if (attr == &dev_total_eraseblocks) 341 ret = sprintf(buf, "%d\n", ubi->good_peb_count); 342 else if (attr == &dev_volumes_count) 343 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT); 344 else if (attr == &dev_max_ec) 345 ret = sprintf(buf, "%d\n", ubi->max_ec); 346 else if (attr == &dev_reserved_for_bad) 347 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); 348 else if (attr == &dev_bad_peb_count) 349 ret = sprintf(buf, "%d\n", ubi->bad_peb_count); 350 else if (attr == &dev_max_vol_count) 351 ret = sprintf(buf, "%d\n", ubi->vtbl_slots); 352 else if (attr == &dev_min_io_size) 353 ret = sprintf(buf, "%d\n", ubi->min_io_size); 354 else if (attr == &dev_bgt_enabled) 355 ret = sprintf(buf, "%d\n", ubi->thread_enabled); 356 else if (attr == &dev_mtd_num) 357 ret = sprintf(buf, "%d\n", ubi->mtd->index); 358 else 359 ret = -EINVAL; 360 361 ubi_put_device(ubi); 362 return ret; 363 } 364 365 static void dev_release(struct device *dev) 366 { 367 struct ubi_device *ubi = container_of(dev, struct ubi_device, dev); 368 369 kfree(ubi); 370 } 371 372 /** 373 * ubi_sysfs_init - initialize sysfs for an UBI device. 374 * @ubi: UBI device description object 375 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was 376 * taken 377 * 378 * This function returns zero in case of success and a negative error code in 379 * case of failure. 380 */ 381 static int ubi_sysfs_init(struct ubi_device *ubi, int *ref) 382 { 383 int err; 384 385 ubi->dev.release = dev_release; 386 ubi->dev.devt = ubi->cdev.dev; 387 ubi->dev.class = ubi_class; 388 dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num); 389 err = device_register(&ubi->dev); 390 if (err) 391 return err; 392 393 *ref = 1; 394 err = device_create_file(&ubi->dev, &dev_eraseblock_size); 395 if (err) 396 return err; 397 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); 398 if (err) 399 return err; 400 err = device_create_file(&ubi->dev, &dev_total_eraseblocks); 401 if (err) 402 return err; 403 err = device_create_file(&ubi->dev, &dev_volumes_count); 404 if (err) 405 return err; 406 err = device_create_file(&ubi->dev, &dev_max_ec); 407 if (err) 408 return err; 409 err = device_create_file(&ubi->dev, &dev_reserved_for_bad); 410 if (err) 411 return err; 412 err = device_create_file(&ubi->dev, &dev_bad_peb_count); 413 if (err) 414 return err; 415 err = device_create_file(&ubi->dev, &dev_max_vol_count); 416 if (err) 417 return err; 418 err = device_create_file(&ubi->dev, &dev_min_io_size); 419 if (err) 420 return err; 421 err = device_create_file(&ubi->dev, &dev_bgt_enabled); 422 if (err) 423 return err; 424 err = device_create_file(&ubi->dev, &dev_mtd_num); 425 return err; 426 } 427 428 /** 429 * ubi_sysfs_close - close sysfs for an UBI device. 430 * @ubi: UBI device description object 431 */ 432 static void ubi_sysfs_close(struct ubi_device *ubi) 433 { 434 device_remove_file(&ubi->dev, &dev_mtd_num); 435 device_remove_file(&ubi->dev, &dev_bgt_enabled); 436 device_remove_file(&ubi->dev, &dev_min_io_size); 437 device_remove_file(&ubi->dev, &dev_max_vol_count); 438 device_remove_file(&ubi->dev, &dev_bad_peb_count); 439 device_remove_file(&ubi->dev, &dev_reserved_for_bad); 440 device_remove_file(&ubi->dev, &dev_max_ec); 441 device_remove_file(&ubi->dev, &dev_volumes_count); 442 device_remove_file(&ubi->dev, &dev_total_eraseblocks); 443 device_remove_file(&ubi->dev, &dev_avail_eraseblocks); 444 device_remove_file(&ubi->dev, &dev_eraseblock_size); 445 device_unregister(&ubi->dev); 446 } 447 448 /** 449 * kill_volumes - destroy all user volumes. 450 * @ubi: UBI device description object 451 */ 452 static void kill_volumes(struct ubi_device *ubi) 453 { 454 int i; 455 456 for (i = 0; i < ubi->vtbl_slots; i++) 457 if (ubi->volumes[i]) 458 ubi_free_volume(ubi, ubi->volumes[i]); 459 } 460 461 /** 462 * uif_init - initialize user interfaces for an UBI device. 463 * @ubi: UBI device description object 464 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was 465 * taken, otherwise set to %0 466 * 467 * This function initializes various user interfaces for an UBI device. If the 468 * initialization fails at an early stage, this function frees all the 469 * resources it allocated, returns an error, and @ref is set to %0. However, 470 * if the initialization fails after the UBI device was registered in the 471 * driver core subsystem, this function takes a reference to @ubi->dev, because 472 * otherwise the release function ('dev_release()') would free whole @ubi 473 * object. The @ref argument is set to %1 in this case. The caller has to put 474 * this reference. 475 * 476 * This function returns zero in case of success and a negative error code in 477 * case of failure. 478 */ 479 static int uif_init(struct ubi_device *ubi, int *ref) 480 { 481 int i, err; 482 dev_t dev; 483 484 *ref = 0; 485 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); 486 487 /* 488 * Major numbers for the UBI character devices are allocated 489 * dynamically. Major numbers of volume character devices are 490 * equivalent to ones of the corresponding UBI character device. Minor 491 * numbers of UBI character devices are 0, while minor numbers of 492 * volume character devices start from 1. Thus, we allocate one major 493 * number and ubi->vtbl_slots + 1 minor numbers. 494 */ 495 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name); 496 if (err) { 497 ubi_err("cannot register UBI character devices"); 498 return err; 499 } 500 501 ubi_assert(MINOR(dev) == 0); 502 cdev_init(&ubi->cdev, &ubi_cdev_operations); 503 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev)); 504 ubi->cdev.owner = THIS_MODULE; 505 506 err = cdev_add(&ubi->cdev, dev, 1); 507 if (err) { 508 ubi_err("cannot add character device"); 509 goto out_unreg; 510 } 511 512 err = ubi_sysfs_init(ubi, ref); 513 if (err) 514 goto out_sysfs; 515 516 for (i = 0; i < ubi->vtbl_slots; i++) 517 if (ubi->volumes[i]) { 518 err = ubi_add_volume(ubi, ubi->volumes[i]); 519 if (err) { 520 ubi_err("cannot add volume %d", i); 521 goto out_volumes; 522 } 523 } 524 525 return 0; 526 527 out_volumes: 528 kill_volumes(ubi); 529 out_sysfs: 530 if (*ref) 531 get_device(&ubi->dev); 532 ubi_sysfs_close(ubi); 533 cdev_del(&ubi->cdev); 534 out_unreg: 535 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 536 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); 537 return err; 538 } 539 540 /** 541 * uif_close - close user interfaces for an UBI device. 542 * @ubi: UBI device description object 543 * 544 * Note, since this function un-registers UBI volume device objects (@vol->dev), 545 * the memory allocated voe the volumes is freed as well (in the release 546 * function). 547 */ 548 static void uif_close(struct ubi_device *ubi) 549 { 550 kill_volumes(ubi); 551 ubi_sysfs_close(ubi); 552 cdev_del(&ubi->cdev); 553 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 554 } 555 556 /** 557 * free_internal_volumes - free internal volumes. 558 * @ubi: UBI device description object 559 */ 560 static void free_internal_volumes(struct ubi_device *ubi) 561 { 562 int i; 563 564 for (i = ubi->vtbl_slots; 565 i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { 566 kfree(ubi->volumes[i]->eba_tbl); 567 kfree(ubi->volumes[i]); 568 } 569 } 570 571 /** 572 * attach_by_scanning - attach an MTD device using scanning method. 573 * @ubi: UBI device descriptor 574 * 575 * This function returns zero in case of success and a negative error code in 576 * case of failure. 577 * 578 * Note, currently this is the only method to attach UBI devices. Hopefully in 579 * the future we'll have more scalable attaching methods and avoid full media 580 * scanning. But even in this case scanning will be needed as a fall-back 581 * attaching method if there are some on-flash table corruptions. 582 */ 583 static int attach_by_scanning(struct ubi_device *ubi) 584 { 585 int err; 586 struct ubi_scan_info *si; 587 588 si = ubi_scan(ubi); 589 if (IS_ERR(si)) 590 return PTR_ERR(si); 591 592 ubi->bad_peb_count = si->bad_peb_count; 593 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; 594 ubi->corr_peb_count = si->corr_peb_count; 595 ubi->max_ec = si->max_ec; 596 ubi->mean_ec = si->mean_ec; 597 ubi_msg("max. sequence number: %llu", si->max_sqnum); 598 599 err = ubi_read_volume_table(ubi, si); 600 if (err) 601 goto out_si; 602 603 err = ubi_wl_init_scan(ubi, si); 604 if (err) 605 goto out_vtbl; 606 607 err = ubi_eba_init_scan(ubi, si); 608 if (err) 609 goto out_wl; 610 611 ubi_scan_destroy_si(si); 612 return 0; 613 614 out_wl: 615 ubi_wl_close(ubi); 616 out_vtbl: 617 free_internal_volumes(ubi); 618 vfree(ubi->vtbl); 619 out_si: 620 ubi_scan_destroy_si(si); 621 return err; 622 } 623 624 /** 625 * io_init - initialize I/O sub-system for a given UBI device. 626 * @ubi: UBI device description object 627 * 628 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are 629 * assumed: 630 * o EC header is always at offset zero - this cannot be changed; 631 * o VID header starts just after the EC header at the closest address 632 * aligned to @io->hdrs_min_io_size; 633 * o data starts just after the VID header at the closest address aligned to 634 * @io->min_io_size 635 * 636 * This function returns zero in case of success and a negative error code in 637 * case of failure. 638 */ 639 static int io_init(struct ubi_device *ubi) 640 { 641 if (ubi->mtd->numeraseregions != 0) { 642 /* 643 * Some flashes have several erase regions. Different regions 644 * may have different eraseblock size and other 645 * characteristics. It looks like mostly multi-region flashes 646 * have one "main" region and one or more small regions to 647 * store boot loader code or boot parameters or whatever. I 648 * guess we should just pick the largest region. But this is 649 * not implemented. 650 */ 651 ubi_err("multiple regions, not implemented"); 652 return -EINVAL; 653 } 654 655 if (ubi->vid_hdr_offset < 0) 656 return -EINVAL; 657 658 /* 659 * Note, in this implementation we support MTD devices with 0x7FFFFFFF 660 * physical eraseblocks maximum. 661 */ 662 663 ubi->peb_size = ubi->mtd->erasesize; 664 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); 665 ubi->flash_size = ubi->mtd->size; 666 667 if (ubi->mtd->block_isbad && ubi->mtd->block_markbad) 668 ubi->bad_allowed = 1; 669 670 if (ubi->mtd->type == MTD_NORFLASH) { 671 ubi_assert(ubi->mtd->writesize == 1); 672 ubi->nor_flash = 1; 673 } 674 675 ubi->min_io_size = ubi->mtd->writesize; 676 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; 677 678 /* 679 * Make sure minimal I/O unit is power of 2. Note, there is no 680 * fundamental reason for this assumption. It is just an optimization 681 * which allows us to avoid costly division operations. 682 */ 683 if (!is_power_of_2(ubi->min_io_size)) { 684 ubi_err("min. I/O unit (%d) is not power of 2", 685 ubi->min_io_size); 686 return -EINVAL; 687 } 688 689 ubi_assert(ubi->hdrs_min_io_size > 0); 690 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); 691 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); 692 693 /* Calculate default aligned sizes of EC and VID headers */ 694 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); 695 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); 696 697 dbg_msg("min_io_size %d", ubi->min_io_size); 698 dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size); 699 dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize); 700 dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize); 701 702 if (ubi->vid_hdr_offset == 0) 703 /* Default offset */ 704 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset = 705 ubi->ec_hdr_alsize; 706 else { 707 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset & 708 ~(ubi->hdrs_min_io_size - 1); 709 ubi->vid_hdr_shift = ubi->vid_hdr_offset - 710 ubi->vid_hdr_aloffset; 711 } 712 713 /* Similar for the data offset */ 714 ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE; 715 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); 716 717 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); 718 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); 719 dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift); 720 dbg_msg("leb_start %d", ubi->leb_start); 721 722 /* The shift must be aligned to 32-bit boundary */ 723 if (ubi->vid_hdr_shift % 4) { 724 ubi_err("unaligned VID header shift %d", 725 ubi->vid_hdr_shift); 726 return -EINVAL; 727 } 728 729 /* Check sanity */ 730 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || 731 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || 732 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || 733 ubi->leb_start & (ubi->min_io_size - 1)) { 734 ubi_err("bad VID header (%d) or data offsets (%d)", 735 ubi->vid_hdr_offset, ubi->leb_start); 736 return -EINVAL; 737 } 738 739 /* 740 * Set maximum amount of physical erroneous eraseblocks to be 10%. 741 * Erroneous PEB are those which have read errors. 742 */ 743 ubi->max_erroneous = ubi->peb_count / 10; 744 if (ubi->max_erroneous < 16) 745 ubi->max_erroneous = 16; 746 dbg_msg("max_erroneous %d", ubi->max_erroneous); 747 748 /* 749 * It may happen that EC and VID headers are situated in one minimal 750 * I/O unit. In this case we can only accept this UBI image in 751 * read-only mode. 752 */ 753 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { 754 ubi_warn("EC and VID headers are in the same minimal I/O unit, " 755 "switch to read-only mode"); 756 ubi->ro_mode = 1; 757 } 758 759 ubi->leb_size = ubi->peb_size - ubi->leb_start; 760 761 if (!(ubi->mtd->flags & MTD_WRITEABLE)) { 762 ubi_msg("MTD device %d is write-protected, attach in " 763 "read-only mode", ubi->mtd->index); 764 ubi->ro_mode = 1; 765 } 766 767 ubi_msg("physical eraseblock size: %d bytes (%d KiB)", 768 ubi->peb_size, ubi->peb_size >> 10); 769 ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size); 770 ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size); 771 if (ubi->hdrs_min_io_size != ubi->min_io_size) 772 ubi_msg("sub-page size: %d", 773 ubi->hdrs_min_io_size); 774 ubi_msg("VID header offset: %d (aligned %d)", 775 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset); 776 ubi_msg("data offset: %d", ubi->leb_start); 777 778 /* 779 * Note, ideally, we have to initialize ubi->bad_peb_count here. But 780 * unfortunately, MTD does not provide this information. We should loop 781 * over all physical eraseblocks and invoke mtd->block_is_bad() for 782 * each physical eraseblock. So, we skip ubi->bad_peb_count 783 * uninitialized and initialize it after scanning. 784 */ 785 786 return 0; 787 } 788 789 /** 790 * autoresize - re-size the volume which has the "auto-resize" flag set. 791 * @ubi: UBI device description object 792 * @vol_id: ID of the volume to re-size 793 * 794 * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in 795 * the volume table to the largest possible size. See comments in ubi-header.h 796 * for more description of the flag. Returns zero in case of success and a 797 * negative error code in case of failure. 798 */ 799 static int autoresize(struct ubi_device *ubi, int vol_id) 800 { 801 struct ubi_volume_desc desc; 802 struct ubi_volume *vol = ubi->volumes[vol_id]; 803 int err, old_reserved_pebs = vol->reserved_pebs; 804 805 /* 806 * Clear the auto-resize flag in the volume in-memory copy of the 807 * volume table, and 'ubi_resize_volume()' will propagate this change 808 * to the flash. 809 */ 810 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; 811 812 if (ubi->avail_pebs == 0) { 813 struct ubi_vtbl_record vtbl_rec; 814 815 /* 816 * No available PEBs to re-size the volume, clear the flag on 817 * flash and exit. 818 */ 819 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], 820 sizeof(struct ubi_vtbl_record)); 821 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 822 if (err) 823 ubi_err("cannot clean auto-resize flag for volume %d", 824 vol_id); 825 } else { 826 desc.vol = vol; 827 err = ubi_resize_volume(&desc, 828 old_reserved_pebs + ubi->avail_pebs); 829 if (err) 830 ubi_err("cannot auto-resize volume %d", vol_id); 831 } 832 833 if (err) 834 return err; 835 836 ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id, 837 vol->name, old_reserved_pebs, vol->reserved_pebs); 838 return 0; 839 } 840 841 /** 842 * ubi_attach_mtd_dev - attach an MTD device. 843 * @mtd: MTD device description object 844 * @ubi_num: number to assign to the new UBI device 845 * @vid_hdr_offset: VID header offset 846 * 847 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number 848 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in 849 * which case this function finds a vacant device number and assigns it 850 * automatically. Returns the new UBI device number in case of success and a 851 * negative error code in case of failure. 852 * 853 * Note, the invocations of this function has to be serialized by the 854 * @ubi_devices_mutex. 855 */ 856 int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) 857 { 858 struct ubi_device *ubi; 859 int i, err, ref = 0; 860 861 /* 862 * Check if we already have the same MTD device attached. 863 * 864 * Note, this function assumes that UBI devices creations and deletions 865 * are serialized, so it does not take the &ubi_devices_lock. 866 */ 867 for (i = 0; i < UBI_MAX_DEVICES; i++) { 868 ubi = ubi_devices[i]; 869 if (ubi && mtd->index == ubi->mtd->index) { 870 dbg_err("mtd%d is already attached to ubi%d", 871 mtd->index, i); 872 return -EEXIST; 873 } 874 } 875 876 /* 877 * Make sure this MTD device is not emulated on top of an UBI volume 878 * already. Well, generally this recursion works fine, but there are 879 * different problems like the UBI module takes a reference to itself 880 * by attaching (and thus, opening) the emulated MTD device. This 881 * results in inability to unload the module. And in general it makes 882 * no sense to attach emulated MTD devices, so we prohibit this. 883 */ 884 if (mtd->type == MTD_UBIVOLUME) { 885 ubi_err("refuse attaching mtd%d - it is already emulated on " 886 "top of UBI", mtd->index); 887 return -EINVAL; 888 } 889 890 if (ubi_num == UBI_DEV_NUM_AUTO) { 891 /* Search for an empty slot in the @ubi_devices array */ 892 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) 893 if (!ubi_devices[ubi_num]) 894 break; 895 if (ubi_num == UBI_MAX_DEVICES) { 896 dbg_err("only %d UBI devices may be created", 897 UBI_MAX_DEVICES); 898 return -ENFILE; 899 } 900 } else { 901 if (ubi_num >= UBI_MAX_DEVICES) 902 return -EINVAL; 903 904 /* Make sure ubi_num is not busy */ 905 if (ubi_devices[ubi_num]) { 906 dbg_err("ubi%d already exists", ubi_num); 907 return -EEXIST; 908 } 909 } 910 911 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL); 912 if (!ubi) 913 return -ENOMEM; 914 915 ubi->mtd = mtd; 916 ubi->ubi_num = ubi_num; 917 ubi->vid_hdr_offset = vid_hdr_offset; 918 ubi->autoresize_vol_id = -1; 919 920 mutex_init(&ubi->buf_mutex); 921 mutex_init(&ubi->ckvol_mutex); 922 mutex_init(&ubi->device_mutex); 923 spin_lock_init(&ubi->volumes_lock); 924 925 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); 926 927 err = io_init(ubi); 928 if (err) 929 goto out_free; 930 931 err = -ENOMEM; 932 ubi->peb_buf1 = vmalloc(ubi->peb_size); 933 if (!ubi->peb_buf1) 934 goto out_free; 935 936 ubi->peb_buf2 = vmalloc(ubi->peb_size); 937 if (!ubi->peb_buf2) 938 goto out_free; 939 940 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 941 mutex_init(&ubi->dbg_buf_mutex); 942 ubi->dbg_peb_buf = vmalloc(ubi->peb_size); 943 if (!ubi->dbg_peb_buf) 944 goto out_free; 945 #endif 946 947 err = attach_by_scanning(ubi); 948 if (err) { 949 dbg_err("failed to attach by scanning, error %d", err); 950 goto out_free; 951 } 952 953 if (ubi->autoresize_vol_id != -1) { 954 err = autoresize(ubi, ubi->autoresize_vol_id); 955 if (err) 956 goto out_detach; 957 } 958 959 err = uif_init(ubi, &ref); 960 if (err) 961 goto out_detach; 962 963 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); 964 if (IS_ERR(ubi->bgt_thread)) { 965 err = PTR_ERR(ubi->bgt_thread); 966 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, 967 err); 968 goto out_uif; 969 } 970 971 ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num); 972 ubi_msg("MTD device name: \"%s\"", mtd->name); 973 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); 974 ubi_msg("number of good PEBs: %d", ubi->good_peb_count); 975 ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count); 976 ubi_msg("number of corrupted PEBs: %d", ubi->corr_peb_count); 977 ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots); 978 ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD); 979 ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT); 980 ubi_msg("number of user volumes: %d", 981 ubi->vol_count - UBI_INT_VOL_COUNT); 982 ubi_msg("available PEBs: %d", ubi->avail_pebs); 983 ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs); 984 ubi_msg("number of PEBs reserved for bad PEB handling: %d", 985 ubi->beb_rsvd_pebs); 986 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); 987 ubi_msg("image sequence number: %d", ubi->image_seq); 988 989 /* 990 * The below lock makes sure we do not race with 'ubi_thread()' which 991 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up. 992 */ 993 spin_lock(&ubi->wl_lock); 994 if (!DBG_DISABLE_BGT) 995 ubi->thread_enabled = 1; 996 wake_up_process(ubi->bgt_thread); 997 spin_unlock(&ubi->wl_lock); 998 999 ubi_devices[ubi_num] = ubi; 1000 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); 1001 return ubi_num; 1002 1003 out_uif: 1004 uif_close(ubi); 1005 out_detach: 1006 ubi_wl_close(ubi); 1007 free_internal_volumes(ubi); 1008 vfree(ubi->vtbl); 1009 out_free: 1010 vfree(ubi->peb_buf1); 1011 vfree(ubi->peb_buf2); 1012 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1013 vfree(ubi->dbg_peb_buf); 1014 #endif 1015 if (ref) 1016 put_device(&ubi->dev); 1017 else 1018 kfree(ubi); 1019 return err; 1020 } 1021 1022 /** 1023 * ubi_detach_mtd_dev - detach an MTD device. 1024 * @ubi_num: UBI device number to detach from 1025 * @anyway: detach MTD even if device reference count is not zero 1026 * 1027 * This function destroys an UBI device number @ubi_num and detaches the 1028 * underlying MTD device. Returns zero in case of success and %-EBUSY if the 1029 * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not 1030 * exist. 1031 * 1032 * Note, the invocations of this function has to be serialized by the 1033 * @ubi_devices_mutex. 1034 */ 1035 int ubi_detach_mtd_dev(int ubi_num, int anyway) 1036 { 1037 struct ubi_device *ubi; 1038 1039 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 1040 return -EINVAL; 1041 1042 ubi = ubi_get_device(ubi_num); 1043 if (!ubi) 1044 return -EINVAL; 1045 1046 spin_lock(&ubi_devices_lock); 1047 put_device(&ubi->dev); 1048 ubi->ref_count -= 1; 1049 if (ubi->ref_count) { 1050 if (!anyway) { 1051 spin_unlock(&ubi_devices_lock); 1052 return -EBUSY; 1053 } 1054 /* This may only happen if there is a bug */ 1055 ubi_err("%s reference count %d, destroy anyway", 1056 ubi->ubi_name, ubi->ref_count); 1057 } 1058 ubi_devices[ubi_num] = NULL; 1059 spin_unlock(&ubi_devices_lock); 1060 1061 ubi_assert(ubi_num == ubi->ubi_num); 1062 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); 1063 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); 1064 1065 /* 1066 * Before freeing anything, we have to stop the background thread to 1067 * prevent it from doing anything on this device while we are freeing. 1068 */ 1069 if (ubi->bgt_thread) 1070 kthread_stop(ubi->bgt_thread); 1071 1072 /* 1073 * Get a reference to the device in order to prevent 'dev_release()' 1074 * from freeing the @ubi object. 1075 */ 1076 get_device(&ubi->dev); 1077 1078 uif_close(ubi); 1079 ubi_wl_close(ubi); 1080 free_internal_volumes(ubi); 1081 vfree(ubi->vtbl); 1082 put_mtd_device(ubi->mtd); 1083 vfree(ubi->peb_buf1); 1084 vfree(ubi->peb_buf2); 1085 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1086 vfree(ubi->dbg_peb_buf); 1087 #endif 1088 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); 1089 put_device(&ubi->dev); 1090 return 0; 1091 } 1092 1093 /** 1094 * open_mtd_by_chdev - open an MTD device by its character device node path. 1095 * @mtd_dev: MTD character device node path 1096 * 1097 * This helper function opens an MTD device by its character node device path. 1098 * Returns MTD device description object in case of success and a negative 1099 * error code in case of failure. 1100 */ 1101 static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) 1102 { 1103 int err, major, minor, mode; 1104 struct path path; 1105 1106 /* Probably this is an MTD character device node path */ 1107 err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path); 1108 if (err) 1109 return ERR_PTR(err); 1110 1111 /* MTD device number is defined by the major / minor numbers */ 1112 major = imajor(path.dentry->d_inode); 1113 minor = iminor(path.dentry->d_inode); 1114 mode = path.dentry->d_inode->i_mode; 1115 path_put(&path); 1116 if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode)) 1117 return ERR_PTR(-EINVAL); 1118 1119 if (minor & 1) 1120 /* 1121 * Just do not think the "/dev/mtdrX" devices support is need, 1122 * so do not support them to avoid doing extra work. 1123 */ 1124 return ERR_PTR(-EINVAL); 1125 1126 return get_mtd_device(NULL, minor / 2); 1127 } 1128 1129 /** 1130 * open_mtd_device - open MTD device by name, character device path, or number. 1131 * @mtd_dev: name, character device node path, or MTD device device number 1132 * 1133 * This function tries to open and MTD device described by @mtd_dev string, 1134 * which is first treated as ASCII MTD device number, and if it is not true, it 1135 * is treated as MTD device name, and if that is also not true, it is treated 1136 * as MTD character device node path. Returns MTD device description object in 1137 * case of success and a negative error code in case of failure. 1138 */ 1139 static struct mtd_info * __init open_mtd_device(const char *mtd_dev) 1140 { 1141 struct mtd_info *mtd; 1142 int mtd_num; 1143 char *endp; 1144 1145 mtd_num = simple_strtoul(mtd_dev, &endp, 0); 1146 if (*endp != '\0' || mtd_dev == endp) { 1147 /* 1148 * This does not look like an ASCII integer, probably this is 1149 * MTD device name. 1150 */ 1151 mtd = get_mtd_device_nm(mtd_dev); 1152 if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV) 1153 /* Probably this is an MTD character device node path */ 1154 mtd = open_mtd_by_chdev(mtd_dev); 1155 } else 1156 mtd = get_mtd_device(NULL, mtd_num); 1157 1158 return mtd; 1159 } 1160 1161 static int __init ubi_init(void) 1162 { 1163 int err, i, k; 1164 1165 /* Ensure that EC and VID headers have correct size */ 1166 BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64); 1167 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); 1168 1169 if (mtd_devs > UBI_MAX_DEVICES) { 1170 ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES); 1171 return -EINVAL; 1172 } 1173 1174 /* Create base sysfs directory and sysfs files */ 1175 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); 1176 if (IS_ERR(ubi_class)) { 1177 err = PTR_ERR(ubi_class); 1178 ubi_err("cannot create UBI class"); 1179 goto out; 1180 } 1181 1182 err = class_create_file(ubi_class, &ubi_version); 1183 if (err) { 1184 ubi_err("cannot create sysfs file"); 1185 goto out_class; 1186 } 1187 1188 err = misc_register(&ubi_ctrl_cdev); 1189 if (err) { 1190 ubi_err("cannot register device"); 1191 goto out_version; 1192 } 1193 1194 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab", 1195 sizeof(struct ubi_wl_entry), 1196 0, 0, NULL); 1197 if (!ubi_wl_entry_slab) 1198 goto out_dev_unreg; 1199 1200 /* Attach MTD devices */ 1201 for (i = 0; i < mtd_devs; i++) { 1202 struct mtd_dev_param *p = &mtd_dev_param[i]; 1203 struct mtd_info *mtd; 1204 1205 cond_resched(); 1206 1207 mtd = open_mtd_device(p->name); 1208 if (IS_ERR(mtd)) { 1209 err = PTR_ERR(mtd); 1210 goto out_detach; 1211 } 1212 1213 mutex_lock(&ubi_devices_mutex); 1214 err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 1215 p->vid_hdr_offs); 1216 mutex_unlock(&ubi_devices_mutex); 1217 if (err < 0) { 1218 ubi_err("cannot attach mtd%d", mtd->index); 1219 put_mtd_device(mtd); 1220 1221 /* 1222 * Originally UBI stopped initializing on any error. 1223 * However, later on it was found out that this 1224 * behavior is not very good when UBI is compiled into 1225 * the kernel and the MTD devices to attach are passed 1226 * through the command line. Indeed, UBI failure 1227 * stopped whole boot sequence. 1228 * 1229 * To fix this, we changed the behavior for the 1230 * non-module case, but preserved the old behavior for 1231 * the module case, just for compatibility. This is a 1232 * little inconsistent, though. 1233 */ 1234 if (ubi_is_module()) 1235 goto out_detach; 1236 } 1237 } 1238 1239 return 0; 1240 1241 out_detach: 1242 for (k = 0; k < i; k++) 1243 if (ubi_devices[k]) { 1244 mutex_lock(&ubi_devices_mutex); 1245 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1); 1246 mutex_unlock(&ubi_devices_mutex); 1247 } 1248 kmem_cache_destroy(ubi_wl_entry_slab); 1249 out_dev_unreg: 1250 misc_deregister(&ubi_ctrl_cdev); 1251 out_version: 1252 class_remove_file(ubi_class, &ubi_version); 1253 out_class: 1254 class_destroy(ubi_class); 1255 out: 1256 ubi_err("UBI error: cannot initialize UBI, error %d", err); 1257 return err; 1258 } 1259 module_init(ubi_init); 1260 1261 static void __exit ubi_exit(void) 1262 { 1263 int i; 1264 1265 for (i = 0; i < UBI_MAX_DEVICES; i++) 1266 if (ubi_devices[i]) { 1267 mutex_lock(&ubi_devices_mutex); 1268 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1); 1269 mutex_unlock(&ubi_devices_mutex); 1270 } 1271 kmem_cache_destroy(ubi_wl_entry_slab); 1272 misc_deregister(&ubi_ctrl_cdev); 1273 class_remove_file(ubi_class, &ubi_version); 1274 class_destroy(ubi_class); 1275 } 1276 module_exit(ubi_exit); 1277 1278 /** 1279 * bytes_str_to_int - convert a number of bytes string into an integer. 1280 * @str: the string to convert 1281 * 1282 * This function returns positive resulting integer in case of success and a 1283 * negative error code in case of failure. 1284 */ 1285 static int __init bytes_str_to_int(const char *str) 1286 { 1287 char *endp; 1288 unsigned long result; 1289 1290 result = simple_strtoul(str, &endp, 0); 1291 if (str == endp || result >= INT_MAX) { 1292 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", 1293 str); 1294 return -EINVAL; 1295 } 1296 1297 switch (*endp) { 1298 case 'G': 1299 result *= 1024; 1300 case 'M': 1301 result *= 1024; 1302 case 'K': 1303 result *= 1024; 1304 if (endp[1] == 'i' && endp[2] == 'B') 1305 endp += 2; 1306 case '\0': 1307 break; 1308 default: 1309 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", 1310 str); 1311 return -EINVAL; 1312 } 1313 1314 return result; 1315 } 1316 1317 /** 1318 * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter. 1319 * @val: the parameter value to parse 1320 * @kp: not used 1321 * 1322 * This function returns zero in case of success and a negative error code in 1323 * case of error. 1324 */ 1325 static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) 1326 { 1327 int i, len; 1328 struct mtd_dev_param *p; 1329 char buf[MTD_PARAM_LEN_MAX]; 1330 char *pbuf = &buf[0]; 1331 char *tokens[2] = {NULL, NULL}; 1332 1333 if (!val) 1334 return -EINVAL; 1335 1336 if (mtd_devs == UBI_MAX_DEVICES) { 1337 printk(KERN_ERR "UBI error: too many parameters, max. is %d\n", 1338 UBI_MAX_DEVICES); 1339 return -EINVAL; 1340 } 1341 1342 len = strnlen(val, MTD_PARAM_LEN_MAX); 1343 if (len == MTD_PARAM_LEN_MAX) { 1344 printk(KERN_ERR "UBI error: parameter \"%s\" is too long, " 1345 "max. is %d\n", val, MTD_PARAM_LEN_MAX); 1346 return -EINVAL; 1347 } 1348 1349 if (len == 0) { 1350 printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - " 1351 "ignored\n"); 1352 return 0; 1353 } 1354 1355 strcpy(buf, val); 1356 1357 /* Get rid of the final newline */ 1358 if (buf[len - 1] == '\n') 1359 buf[len - 1] = '\0'; 1360 1361 for (i = 0; i < 2; i++) 1362 tokens[i] = strsep(&pbuf, ","); 1363 1364 if (pbuf) { 1365 printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n", 1366 val); 1367 return -EINVAL; 1368 } 1369 1370 p = &mtd_dev_param[mtd_devs]; 1371 strcpy(&p->name[0], tokens[0]); 1372 1373 if (tokens[1]) 1374 p->vid_hdr_offs = bytes_str_to_int(tokens[1]); 1375 1376 if (p->vid_hdr_offs < 0) 1377 return p->vid_hdr_offs; 1378 1379 mtd_devs += 1; 1380 return 0; 1381 } 1382 1383 module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); 1384 MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " 1385 "mtd=<name|num|path>[,<vid_hdr_offs>].\n" 1386 "Multiple \"mtd\" parameters may be specified.\n" 1387 "MTD devices may be specified by their number, name, or " 1388 "path to the MTD character device node.\n" 1389 "Optional \"vid_hdr_offs\" parameter specifies UBI VID " 1390 "header position to be used by UBI.\n" 1391 "Example 1: mtd=/dev/mtd0 - attach MTD device " 1392 "/dev/mtd0.\n" 1393 "Example 2: mtd=content,1984 mtd=4 - attach MTD device " 1394 "with name \"content\" using VID header offset 1984, and " 1395 "MTD device number 4 with default VID header offset."); 1396 1397 MODULE_VERSION(__stringify(UBI_VERSION)); 1398 MODULE_DESCRIPTION("UBI - Unsorted Block Images"); 1399 MODULE_AUTHOR("Artem Bityutskiy"); 1400 MODULE_LICENSE("GPL"); 1401