1 /* 2 * Copyright (c) 2014 Ezequiel Garcia 3 * Copyright (c) 2011 Free Electrons 4 * 5 * Driver parameter handling strongly based on drivers/mtd/ubi/build.c 6 * Copyright (c) International Business Machines Corp., 2006 7 * Copyright (c) Nokia Corporation, 2007 8 * Authors: Artem Bityutskiy, Frank Haverkamp 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation, version 2. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 17 * the GNU General Public License for more details. 18 */ 19 20 /* 21 * Read-only block devices on top of UBI volumes 22 * 23 * A simple implementation to allow a block device to be layered on top of a 24 * UBI volume. The implementation is provided by creating a static 1-to-1 25 * mapping between the block device and the UBI volume. 26 * 27 * The addressed byte is obtained from the addressed block sector, which is 28 * mapped linearly into the corresponding LEB: 29 * 30 * LEB number = addressed byte / LEB size 31 * 32 * This feature is compiled in the UBI core, and adds a 'block' parameter 33 * to allow early creation of block devices on top of UBI volumes. Runtime 34 * block creation/removal for UBI volumes is provided through two UBI ioctls: 35 * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK. 36 */ 37 38 #include <linux/module.h> 39 #include <linux/init.h> 40 #include <linux/err.h> 41 #include <linux/kernel.h> 42 #include <linux/list.h> 43 #include <linux/mutex.h> 44 #include <linux/slab.h> 45 #include <linux/vmalloc.h> 46 #include <linux/mtd/ubi.h> 47 #include <linux/workqueue.h> 48 #include <linux/blkdev.h> 49 #include <linux/hdreg.h> 50 #include <asm/div64.h> 51 52 #include "ubi-media.h" 53 #include "ubi.h" 54 55 /* Maximum number of supported devices */ 56 #define UBIBLOCK_MAX_DEVICES 32 57 58 /* Maximum length of the 'block=' parameter */ 59 #define UBIBLOCK_PARAM_LEN 63 60 61 /* Maximum number of comma-separated items in the 'block=' parameter */ 62 #define UBIBLOCK_PARAM_COUNT 2 63 64 struct ubiblock_param { 65 int ubi_num; 66 int vol_id; 67 char name[UBIBLOCK_PARAM_LEN+1]; 68 }; 69 70 /* Numbers of elements set in the @ubiblock_param array */ 71 static int ubiblock_devs __initdata; 72 73 /* MTD devices specification parameters */ 74 static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata; 75 76 struct ubiblock { 77 struct ubi_volume_desc *desc; 78 int ubi_num; 79 int vol_id; 80 int refcnt; 81 int leb_size; 82 83 struct gendisk *gd; 84 struct request_queue *rq; 85 86 struct workqueue_struct *wq; 87 struct work_struct work; 88 89 struct mutex dev_mutex; 90 spinlock_t queue_lock; 91 struct list_head list; 92 }; 93 94 /* Linked list of all ubiblock instances */ 95 static LIST_HEAD(ubiblock_devices); 96 static DEFINE_MUTEX(devices_mutex); 97 static int ubiblock_major; 98 99 static int __init ubiblock_set_param(const char *val, 100 const struct kernel_param *kp) 101 { 102 int i, ret; 103 size_t len; 104 struct ubiblock_param *param; 105 char buf[UBIBLOCK_PARAM_LEN]; 106 char *pbuf = &buf[0]; 107 char *tokens[UBIBLOCK_PARAM_COUNT]; 108 109 if (!val) 110 return -EINVAL; 111 112 len = strnlen(val, UBIBLOCK_PARAM_LEN); 113 if (len == 0) { 114 pr_warn("UBI: block: empty 'block=' parameter - ignored\n"); 115 return 0; 116 } 117 118 if (len == UBIBLOCK_PARAM_LEN) { 119 pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n", 120 val, UBIBLOCK_PARAM_LEN); 121 return -EINVAL; 122 } 123 124 strcpy(buf, val); 125 126 /* Get rid of the final newline */ 127 if (buf[len - 1] == '\n') 128 buf[len - 1] = '\0'; 129 130 for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++) 131 tokens[i] = strsep(&pbuf, ","); 132 133 param = &ubiblock_param[ubiblock_devs]; 134 if (tokens[1]) { 135 /* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */ 136 ret = kstrtoint(tokens[0], 10, ¶m->ubi_num); 137 if (ret < 0) 138 return -EINVAL; 139 140 /* Second param can be a number or a name */ 141 ret = kstrtoint(tokens[1], 10, ¶m->vol_id); 142 if (ret < 0) { 143 param->vol_id = -1; 144 strcpy(param->name, tokens[1]); 145 } 146 147 } else { 148 /* One parameter: must be device path */ 149 strcpy(param->name, tokens[0]); 150 param->ubi_num = -1; 151 param->vol_id = -1; 152 } 153 154 ubiblock_devs++; 155 156 return 0; 157 } 158 159 static struct kernel_param_ops ubiblock_param_ops = { 160 .set = ubiblock_set_param, 161 }; 162 module_param_cb(block, &ubiblock_param_ops, NULL, 0); 163 MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n" 164 "Multiple \"block\" parameters may be specified.\n" 165 "UBI volumes may be specified by their number, name, or path to the device node.\n" 166 "Examples\n" 167 "Using the UBI volume path:\n" 168 "ubi.block=/dev/ubi0_0\n" 169 "Using the UBI device, and the volume name:\n" 170 "ubi.block=0,rootfs\n" 171 "Using both UBI device number and UBI volume number:\n" 172 "ubi.block=0,0\n"); 173 174 static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id) 175 { 176 struct ubiblock *dev; 177 178 list_for_each_entry(dev, &ubiblock_devices, list) 179 if (dev->ubi_num == ubi_num && dev->vol_id == vol_id) 180 return dev; 181 return NULL; 182 } 183 184 static int ubiblock_read_to_buf(struct ubiblock *dev, char *buffer, 185 int leb, int offset, int len) 186 { 187 int ret; 188 189 ret = ubi_read(dev->desc, leb, buffer, offset, len); 190 if (ret) { 191 dev_err(disk_to_dev(dev->gd), "%d while reading from LEB %d (offset %d, length %d)", 192 ret, leb, offset, len); 193 return ret; 194 } 195 return 0; 196 } 197 198 static int ubiblock_read(struct ubiblock *dev, char *buffer, 199 sector_t sec, int len) 200 { 201 int ret, leb, offset; 202 int bytes_left = len; 203 int to_read = len; 204 u64 pos = sec << 9; 205 206 /* Get LEB:offset address to read from */ 207 offset = do_div(pos, dev->leb_size); 208 leb = pos; 209 210 while (bytes_left) { 211 /* 212 * We can only read one LEB at a time. Therefore if the read 213 * length is larger than one LEB size, we split the operation. 214 */ 215 if (offset + to_read > dev->leb_size) 216 to_read = dev->leb_size - offset; 217 218 ret = ubiblock_read_to_buf(dev, buffer, leb, offset, to_read); 219 if (ret) 220 return ret; 221 222 buffer += to_read; 223 bytes_left -= to_read; 224 to_read = bytes_left; 225 leb += 1; 226 offset = 0; 227 } 228 return 0; 229 } 230 231 static int do_ubiblock_request(struct ubiblock *dev, struct request *req) 232 { 233 int len, ret; 234 sector_t sec; 235 236 if (req->cmd_type != REQ_TYPE_FS) 237 return -EIO; 238 239 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > 240 get_capacity(req->rq_disk)) 241 return -EIO; 242 243 if (rq_data_dir(req) != READ) 244 return -ENOSYS; /* Write not implemented */ 245 246 sec = blk_rq_pos(req); 247 len = blk_rq_cur_bytes(req); 248 249 /* 250 * Let's prevent the device from being removed while we're doing I/O 251 * work. Notice that this means we serialize all the I/O operations, 252 * but it's probably of no impact given the NAND core serializes 253 * flash access anyway. 254 */ 255 mutex_lock(&dev->dev_mutex); 256 ret = ubiblock_read(dev, bio_data(req->bio), sec, len); 257 mutex_unlock(&dev->dev_mutex); 258 259 return ret; 260 } 261 262 static void ubiblock_do_work(struct work_struct *work) 263 { 264 struct ubiblock *dev = 265 container_of(work, struct ubiblock, work); 266 struct request_queue *rq = dev->rq; 267 struct request *req; 268 int res; 269 270 spin_lock_irq(rq->queue_lock); 271 272 req = blk_fetch_request(rq); 273 while (req) { 274 275 spin_unlock_irq(rq->queue_lock); 276 res = do_ubiblock_request(dev, req); 277 spin_lock_irq(rq->queue_lock); 278 279 /* 280 * If we're done with this request, 281 * we need to fetch a new one 282 */ 283 if (!__blk_end_request_cur(req, res)) 284 req = blk_fetch_request(rq); 285 } 286 287 spin_unlock_irq(rq->queue_lock); 288 } 289 290 static void ubiblock_request(struct request_queue *rq) 291 { 292 struct ubiblock *dev; 293 struct request *req; 294 295 dev = rq->queuedata; 296 297 if (!dev) 298 while ((req = blk_fetch_request(rq)) != NULL) 299 __blk_end_request_all(req, -ENODEV); 300 else 301 queue_work(dev->wq, &dev->work); 302 } 303 304 static int ubiblock_open(struct block_device *bdev, fmode_t mode) 305 { 306 struct ubiblock *dev = bdev->bd_disk->private_data; 307 int ret; 308 309 mutex_lock(&dev->dev_mutex); 310 if (dev->refcnt > 0) { 311 /* 312 * The volume is already open, just increase the reference 313 * counter. 314 */ 315 goto out_done; 316 } 317 318 /* 319 * We want users to be aware they should only mount us as read-only. 320 * It's just a paranoid check, as write requests will get rejected 321 * in any case. 322 */ 323 if (mode & FMODE_WRITE) { 324 ret = -EPERM; 325 goto out_unlock; 326 } 327 328 dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY); 329 if (IS_ERR(dev->desc)) { 330 dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d", 331 dev->ubi_num, dev->vol_id); 332 ret = PTR_ERR(dev->desc); 333 dev->desc = NULL; 334 goto out_unlock; 335 } 336 337 out_done: 338 dev->refcnt++; 339 mutex_unlock(&dev->dev_mutex); 340 return 0; 341 342 out_unlock: 343 mutex_unlock(&dev->dev_mutex); 344 return ret; 345 } 346 347 static void ubiblock_release(struct gendisk *gd, fmode_t mode) 348 { 349 struct ubiblock *dev = gd->private_data; 350 351 mutex_lock(&dev->dev_mutex); 352 dev->refcnt--; 353 if (dev->refcnt == 0) { 354 ubi_close_volume(dev->desc); 355 dev->desc = NULL; 356 } 357 mutex_unlock(&dev->dev_mutex); 358 } 359 360 static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo) 361 { 362 /* Some tools might require this information */ 363 geo->heads = 1; 364 geo->cylinders = 1; 365 geo->sectors = get_capacity(bdev->bd_disk); 366 geo->start = 0; 367 return 0; 368 } 369 370 static const struct block_device_operations ubiblock_ops = { 371 .owner = THIS_MODULE, 372 .open = ubiblock_open, 373 .release = ubiblock_release, 374 .getgeo = ubiblock_getgeo, 375 }; 376 377 int ubiblock_create(struct ubi_volume_info *vi) 378 { 379 struct ubiblock *dev; 380 struct gendisk *gd; 381 u64 disk_capacity = vi->used_bytes >> 9; 382 int ret; 383 384 if ((sector_t)disk_capacity != disk_capacity) 385 return -EFBIG; 386 /* Check that the volume isn't already handled */ 387 mutex_lock(&devices_mutex); 388 if (find_dev_nolock(vi->ubi_num, vi->vol_id)) { 389 mutex_unlock(&devices_mutex); 390 return -EEXIST; 391 } 392 mutex_unlock(&devices_mutex); 393 394 dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL); 395 if (!dev) 396 return -ENOMEM; 397 398 mutex_init(&dev->dev_mutex); 399 400 dev->ubi_num = vi->ubi_num; 401 dev->vol_id = vi->vol_id; 402 dev->leb_size = vi->usable_leb_size; 403 404 /* Initialize the gendisk of this ubiblock device */ 405 gd = alloc_disk(1); 406 if (!gd) { 407 pr_err("UBI: block: alloc_disk failed"); 408 ret = -ENODEV; 409 goto out_free_dev; 410 } 411 412 gd->fops = &ubiblock_ops; 413 gd->major = ubiblock_major; 414 gd->first_minor = dev->ubi_num * UBI_MAX_VOLUMES + dev->vol_id; 415 gd->private_data = dev; 416 sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id); 417 set_capacity(gd, disk_capacity); 418 dev->gd = gd; 419 420 spin_lock_init(&dev->queue_lock); 421 dev->rq = blk_init_queue(ubiblock_request, &dev->queue_lock); 422 if (!dev->rq) { 423 dev_err(disk_to_dev(gd), "blk_init_queue failed"); 424 ret = -ENODEV; 425 goto out_put_disk; 426 } 427 428 dev->rq->queuedata = dev; 429 dev->gd->queue = dev->rq; 430 431 /* 432 * Create one workqueue per volume (per registered block device). 433 * Rembember workqueues are cheap, they're not threads. 434 */ 435 dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name); 436 if (!dev->wq) { 437 ret = -ENOMEM; 438 goto out_free_queue; 439 } 440 INIT_WORK(&dev->work, ubiblock_do_work); 441 442 mutex_lock(&devices_mutex); 443 list_add_tail(&dev->list, &ubiblock_devices); 444 mutex_unlock(&devices_mutex); 445 446 /* Must be the last step: anyone can call file ops from now on */ 447 add_disk(dev->gd); 448 dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)", 449 dev->ubi_num, dev->vol_id, vi->name); 450 return 0; 451 452 out_free_queue: 453 blk_cleanup_queue(dev->rq); 454 out_put_disk: 455 put_disk(dev->gd); 456 out_free_dev: 457 kfree(dev); 458 459 return ret; 460 } 461 462 static void ubiblock_cleanup(struct ubiblock *dev) 463 { 464 del_gendisk(dev->gd); 465 blk_cleanup_queue(dev->rq); 466 dev_info(disk_to_dev(dev->gd), "released"); 467 put_disk(dev->gd); 468 } 469 470 int ubiblock_remove(struct ubi_volume_info *vi) 471 { 472 struct ubiblock *dev; 473 474 mutex_lock(&devices_mutex); 475 dev = find_dev_nolock(vi->ubi_num, vi->vol_id); 476 if (!dev) { 477 mutex_unlock(&devices_mutex); 478 return -ENODEV; 479 } 480 481 /* Found a device, let's lock it so we can check if it's busy */ 482 mutex_lock(&dev->dev_mutex); 483 if (dev->refcnt > 0) { 484 mutex_unlock(&dev->dev_mutex); 485 mutex_unlock(&devices_mutex); 486 return -EBUSY; 487 } 488 489 /* Remove from device list */ 490 list_del(&dev->list); 491 mutex_unlock(&devices_mutex); 492 493 /* Flush pending work and stop this workqueue */ 494 destroy_workqueue(dev->wq); 495 496 ubiblock_cleanup(dev); 497 mutex_unlock(&dev->dev_mutex); 498 kfree(dev); 499 return 0; 500 } 501 502 static int ubiblock_resize(struct ubi_volume_info *vi) 503 { 504 struct ubiblock *dev; 505 u64 disk_capacity = vi->used_bytes >> 9; 506 507 /* 508 * Need to lock the device list until we stop using the device, 509 * otherwise the device struct might get released in 510 * 'ubiblock_remove()'. 511 */ 512 mutex_lock(&devices_mutex); 513 dev = find_dev_nolock(vi->ubi_num, vi->vol_id); 514 if (!dev) { 515 mutex_unlock(&devices_mutex); 516 return -ENODEV; 517 } 518 if ((sector_t)disk_capacity != disk_capacity) { 519 mutex_unlock(&devices_mutex); 520 dev_warn(disk_to_dev(dev->gd), "the volume is too big (%d LEBs), cannot resize", 521 vi->size); 522 return -EFBIG; 523 } 524 525 mutex_lock(&dev->dev_mutex); 526 527 if (get_capacity(dev->gd) != disk_capacity) { 528 set_capacity(dev->gd, disk_capacity); 529 dev_info(disk_to_dev(dev->gd), "resized to %lld bytes", 530 vi->used_bytes); 531 } 532 mutex_unlock(&dev->dev_mutex); 533 mutex_unlock(&devices_mutex); 534 return 0; 535 } 536 537 static int ubiblock_notify(struct notifier_block *nb, 538 unsigned long notification_type, void *ns_ptr) 539 { 540 struct ubi_notification *nt = ns_ptr; 541 542 switch (notification_type) { 543 case UBI_VOLUME_ADDED: 544 /* 545 * We want to enforce explicit block device creation for 546 * volumes, so when a volume is added we do nothing. 547 */ 548 break; 549 case UBI_VOLUME_REMOVED: 550 ubiblock_remove(&nt->vi); 551 break; 552 case UBI_VOLUME_RESIZED: 553 ubiblock_resize(&nt->vi); 554 break; 555 case UBI_VOLUME_UPDATED: 556 /* 557 * If the volume is static, a content update might mean the 558 * size (i.e. used_bytes) was also changed. 559 */ 560 if (nt->vi.vol_type == UBI_STATIC_VOLUME) 561 ubiblock_resize(&nt->vi); 562 break; 563 default: 564 break; 565 } 566 return NOTIFY_OK; 567 } 568 569 static struct notifier_block ubiblock_notifier = { 570 .notifier_call = ubiblock_notify, 571 }; 572 573 static struct ubi_volume_desc * __init 574 open_volume_desc(const char *name, int ubi_num, int vol_id) 575 { 576 if (ubi_num == -1) 577 /* No ubi num, name must be a vol device path */ 578 return ubi_open_volume_path(name, UBI_READONLY); 579 else if (vol_id == -1) 580 /* No vol_id, must be vol_name */ 581 return ubi_open_volume_nm(ubi_num, name, UBI_READONLY); 582 else 583 return ubi_open_volume(ubi_num, vol_id, UBI_READONLY); 584 } 585 586 static int __init ubiblock_create_from_param(void) 587 { 588 int i, ret; 589 struct ubiblock_param *p; 590 struct ubi_volume_desc *desc; 591 struct ubi_volume_info vi; 592 593 for (i = 0; i < ubiblock_devs; i++) { 594 p = &ubiblock_param[i]; 595 596 desc = open_volume_desc(p->name, p->ubi_num, p->vol_id); 597 if (IS_ERR(desc)) { 598 pr_err("UBI: block: can't open volume, err=%ld\n", 599 PTR_ERR(desc)); 600 ret = PTR_ERR(desc); 601 break; 602 } 603 604 ubi_get_volume_info(desc, &vi); 605 ubi_close_volume(desc); 606 607 ret = ubiblock_create(&vi); 608 if (ret) { 609 pr_err("UBI: block: can't add '%s' volume, err=%d\n", 610 vi.name, ret); 611 break; 612 } 613 } 614 return ret; 615 } 616 617 static void ubiblock_remove_all(void) 618 { 619 struct ubiblock *next; 620 struct ubiblock *dev; 621 622 list_for_each_entry_safe(dev, next, &ubiblock_devices, list) { 623 /* Flush pending work and stop workqueue */ 624 destroy_workqueue(dev->wq); 625 /* The module is being forcefully removed */ 626 WARN_ON(dev->desc); 627 /* Remove from device list */ 628 list_del(&dev->list); 629 ubiblock_cleanup(dev); 630 kfree(dev); 631 } 632 } 633 634 int __init ubiblock_init(void) 635 { 636 int ret; 637 638 ubiblock_major = register_blkdev(0, "ubiblock"); 639 if (ubiblock_major < 0) 640 return ubiblock_major; 641 642 /* Attach block devices from 'block=' module param */ 643 ret = ubiblock_create_from_param(); 644 if (ret) 645 goto err_remove; 646 647 /* 648 * Block devices are only created upon user requests, so we ignore 649 * existing volumes. 650 */ 651 ret = ubi_register_volume_notifier(&ubiblock_notifier, 1); 652 if (ret) 653 goto err_unreg; 654 return 0; 655 656 err_unreg: 657 unregister_blkdev(ubiblock_major, "ubiblock"); 658 err_remove: 659 ubiblock_remove_all(); 660 return ret; 661 } 662 663 void __exit ubiblock_exit(void) 664 { 665 ubi_unregister_volume_notifier(&ubiblock_notifier); 666 ubiblock_remove_all(); 667 unregister_blkdev(ubiblock_major, "ubiblock"); 668 } 669