1 /* 2 * Interface to Linux block layer for MTD 'translation layers'. 3 * 4 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/slab.h> 24 #include <linux/module.h> 25 #include <linux/list.h> 26 #include <linux/fs.h> 27 #include <linux/mtd/blktrans.h> 28 #include <linux/mtd/mtd.h> 29 #include <linux/blkdev.h> 30 #include <linux/blkpg.h> 31 #include <linux/spinlock.h> 32 #include <linux/hdreg.h> 33 #include <linux/init.h> 34 #include <linux/mutex.h> 35 #include <linux/kthread.h> 36 #include <asm/uaccess.h> 37 38 #include "mtdcore.h" 39 40 static LIST_HEAD(blktrans_majors); 41 static DEFINE_MUTEX(blktrans_ref_mutex); 42 43 void blktrans_dev_release(struct kref *kref) 44 { 45 struct mtd_blktrans_dev *dev = 46 container_of(kref, struct mtd_blktrans_dev, ref); 47 48 dev->disk->private_data = NULL; 49 blk_cleanup_queue(dev->rq); 50 put_disk(dev->disk); 51 list_del(&dev->list); 52 kfree(dev); 53 } 54 55 static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk) 56 { 57 struct mtd_blktrans_dev *dev; 58 59 mutex_lock(&blktrans_ref_mutex); 60 dev = disk->private_data; 61 62 if (!dev) 63 goto unlock; 64 kref_get(&dev->ref); 65 unlock: 66 mutex_unlock(&blktrans_ref_mutex); 67 return dev; 68 } 69 70 void blktrans_dev_put(struct mtd_blktrans_dev *dev) 71 { 72 mutex_lock(&blktrans_ref_mutex); 73 kref_put(&dev->ref, blktrans_dev_release); 74 mutex_unlock(&blktrans_ref_mutex); 75 } 76 77 78 static int do_blktrans_request(struct mtd_blktrans_ops *tr, 79 struct mtd_blktrans_dev *dev, 80 struct request *req) 81 { 82 unsigned long block, nsect; 83 char *buf; 84 85 block = blk_rq_pos(req) << 9 >> tr->blkshift; 86 nsect = blk_rq_cur_bytes(req) >> tr->blkshift; 87 88 buf = req->buffer; 89 90 if (req->cmd_type != REQ_TYPE_FS) 91 return -EIO; 92 93 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > 94 get_capacity(req->rq_disk)) 95 return -EIO; 96 97 if (req->cmd_flags & REQ_DISCARD) 98 return tr->discard(dev, block, nsect); 99 100 switch(rq_data_dir(req)) { 101 case READ: 102 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 103 if (tr->readsect(dev, block, buf)) 104 return -EIO; 105 rq_flush_dcache_pages(req); 106 return 0; 107 case WRITE: 108 if (!tr->writesect) 109 return -EIO; 110 111 rq_flush_dcache_pages(req); 112 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 113 if (tr->writesect(dev, block, buf)) 114 return -EIO; 115 return 0; 116 default: 117 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); 118 return -EIO; 119 } 120 } 121 122 static int mtd_blktrans_thread(void *arg) 123 { 124 struct mtd_blktrans_dev *dev = arg; 125 struct request_queue *rq = dev->rq; 126 struct request *req = NULL; 127 128 spin_lock_irq(rq->queue_lock); 129 130 while (!kthread_should_stop()) { 131 int res; 132 133 if (!req && !(req = blk_fetch_request(rq))) { 134 set_current_state(TASK_INTERRUPTIBLE); 135 136 if (kthread_should_stop()) 137 set_current_state(TASK_RUNNING); 138 139 spin_unlock_irq(rq->queue_lock); 140 schedule(); 141 spin_lock_irq(rq->queue_lock); 142 continue; 143 } 144 145 spin_unlock_irq(rq->queue_lock); 146 147 mutex_lock(&dev->lock); 148 res = do_blktrans_request(dev->tr, dev, req); 149 mutex_unlock(&dev->lock); 150 151 spin_lock_irq(rq->queue_lock); 152 153 if (!__blk_end_request_cur(req, res)) 154 req = NULL; 155 } 156 157 if (req) 158 __blk_end_request_all(req, -EIO); 159 160 spin_unlock_irq(rq->queue_lock); 161 162 return 0; 163 } 164 165 static void mtd_blktrans_request(struct request_queue *rq) 166 { 167 struct mtd_blktrans_dev *dev; 168 struct request *req = NULL; 169 170 dev = rq->queuedata; 171 172 if (!dev) 173 while ((req = blk_fetch_request(rq)) != NULL) 174 __blk_end_request_all(req, -ENODEV); 175 else 176 wake_up_process(dev->thread); 177 } 178 179 static int blktrans_open(struct block_device *bdev, fmode_t mode) 180 { 181 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); 182 int ret = 0; 183 184 if (!dev) 185 return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/ 186 187 mutex_lock(&dev->lock); 188 189 if (dev->open++) 190 goto unlock; 191 192 kref_get(&dev->ref); 193 __module_get(dev->tr->owner); 194 195 if (dev->mtd) { 196 ret = dev->tr->open ? dev->tr->open(dev) : 0; 197 __get_mtd_device(dev->mtd); 198 } 199 200 unlock: 201 mutex_unlock(&dev->lock); 202 blktrans_dev_put(dev); 203 return ret; 204 } 205 206 static int blktrans_release(struct gendisk *disk, fmode_t mode) 207 { 208 struct mtd_blktrans_dev *dev = blktrans_dev_get(disk); 209 int ret = 0; 210 211 if (!dev) 212 return ret; 213 214 mutex_lock(&dev->lock); 215 216 if (--dev->open) 217 goto unlock; 218 219 kref_put(&dev->ref, blktrans_dev_release); 220 module_put(dev->tr->owner); 221 222 if (dev->mtd) { 223 ret = dev->tr->release ? dev->tr->release(dev) : 0; 224 __put_mtd_device(dev->mtd); 225 } 226 unlock: 227 mutex_unlock(&dev->lock); 228 blktrans_dev_put(dev); 229 return ret; 230 } 231 232 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) 233 { 234 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); 235 int ret = -ENXIO; 236 237 if (!dev) 238 return ret; 239 240 mutex_lock(&dev->lock); 241 242 if (!dev->mtd) 243 goto unlock; 244 245 ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0; 246 unlock: 247 mutex_unlock(&dev->lock); 248 blktrans_dev_put(dev); 249 return ret; 250 } 251 252 static int blktrans_ioctl(struct block_device *bdev, fmode_t mode, 253 unsigned int cmd, unsigned long arg) 254 { 255 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); 256 int ret = -ENXIO; 257 258 if (!dev) 259 return ret; 260 261 mutex_lock(&dev->lock); 262 263 if (!dev->mtd) 264 goto unlock; 265 266 switch (cmd) { 267 case BLKFLSBUF: 268 ret = dev->tr->flush ? dev->tr->flush(dev) : 0; 269 break; 270 default: 271 ret = -ENOTTY; 272 } 273 unlock: 274 mutex_unlock(&dev->lock); 275 blktrans_dev_put(dev); 276 return ret; 277 } 278 279 static const struct block_device_operations mtd_blktrans_ops = { 280 .owner = THIS_MODULE, 281 .open = blktrans_open, 282 .release = blktrans_release, 283 .ioctl = blktrans_ioctl, 284 .getgeo = blktrans_getgeo, 285 }; 286 287 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) 288 { 289 struct mtd_blktrans_ops *tr = new->tr; 290 struct mtd_blktrans_dev *d; 291 int last_devnum = -1; 292 struct gendisk *gd; 293 int ret; 294 295 if (mutex_trylock(&mtd_table_mutex)) { 296 mutex_unlock(&mtd_table_mutex); 297 BUG(); 298 } 299 300 mutex_lock(&blktrans_ref_mutex); 301 list_for_each_entry(d, &tr->devs, list) { 302 if (new->devnum == -1) { 303 /* Use first free number */ 304 if (d->devnum != last_devnum+1) { 305 /* Found a free devnum. Plug it in here */ 306 new->devnum = last_devnum+1; 307 list_add_tail(&new->list, &d->list); 308 goto added; 309 } 310 } else if (d->devnum == new->devnum) { 311 /* Required number taken */ 312 mutex_unlock(&blktrans_ref_mutex); 313 return -EBUSY; 314 } else if (d->devnum > new->devnum) { 315 /* Required number was free */ 316 list_add_tail(&new->list, &d->list); 317 goto added; 318 } 319 last_devnum = d->devnum; 320 } 321 322 ret = -EBUSY; 323 if (new->devnum == -1) 324 new->devnum = last_devnum+1; 325 326 /* Check that the device and any partitions will get valid 327 * minor numbers and that the disk naming code below can cope 328 * with this number. */ 329 if (new->devnum > (MINORMASK >> tr->part_bits) || 330 (tr->part_bits && new->devnum >= 27 * 26)) { 331 mutex_unlock(&blktrans_ref_mutex); 332 goto error1; 333 } 334 335 list_add_tail(&new->list, &tr->devs); 336 added: 337 mutex_unlock(&blktrans_ref_mutex); 338 339 mutex_init(&new->lock); 340 kref_init(&new->ref); 341 if (!tr->writesect) 342 new->readonly = 1; 343 344 /* Create gendisk */ 345 ret = -ENOMEM; 346 gd = alloc_disk(1 << tr->part_bits); 347 348 if (!gd) 349 goto error2; 350 351 new->disk = gd; 352 gd->private_data = new; 353 gd->major = tr->major; 354 gd->first_minor = (new->devnum) << tr->part_bits; 355 gd->fops = &mtd_blktrans_ops; 356 357 if (tr->part_bits) 358 if (new->devnum < 26) 359 snprintf(gd->disk_name, sizeof(gd->disk_name), 360 "%s%c", tr->name, 'a' + new->devnum); 361 else 362 snprintf(gd->disk_name, sizeof(gd->disk_name), 363 "%s%c%c", tr->name, 364 'a' - 1 + new->devnum / 26, 365 'a' + new->devnum % 26); 366 else 367 snprintf(gd->disk_name, sizeof(gd->disk_name), 368 "%s%d", tr->name, new->devnum); 369 370 set_capacity(gd, (new->size * tr->blksize) >> 9); 371 372 /* Create the request queue */ 373 spin_lock_init(&new->queue_lock); 374 new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock); 375 376 if (!new->rq) 377 goto error3; 378 379 new->rq->queuedata = new; 380 blk_queue_logical_block_size(new->rq, tr->blksize); 381 382 if (tr->discard) 383 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 384 new->rq); 385 386 gd->queue = new->rq; 387 388 /* Create processing thread */ 389 /* TODO: workqueue ? */ 390 new->thread = kthread_run(mtd_blktrans_thread, new, 391 "%s%d", tr->name, new->mtd->index); 392 if (IS_ERR(new->thread)) { 393 ret = PTR_ERR(new->thread); 394 goto error4; 395 } 396 gd->driverfs_dev = &new->mtd->dev; 397 398 if (new->readonly) 399 set_disk_ro(gd, 1); 400 401 add_disk(gd); 402 403 if (new->disk_attributes) { 404 ret = sysfs_create_group(&disk_to_dev(gd)->kobj, 405 new->disk_attributes); 406 WARN_ON(ret); 407 } 408 return 0; 409 error4: 410 blk_cleanup_queue(new->rq); 411 error3: 412 put_disk(new->disk); 413 error2: 414 list_del(&new->list); 415 error1: 416 return ret; 417 } 418 419 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) 420 { 421 unsigned long flags; 422 423 if (mutex_trylock(&mtd_table_mutex)) { 424 mutex_unlock(&mtd_table_mutex); 425 BUG(); 426 } 427 428 if (old->disk_attributes) 429 sysfs_remove_group(&disk_to_dev(old->disk)->kobj, 430 old->disk_attributes); 431 432 /* Stop new requests to arrive */ 433 del_gendisk(old->disk); 434 435 436 /* Stop the thread */ 437 kthread_stop(old->thread); 438 439 /* Kill current requests */ 440 spin_lock_irqsave(&old->queue_lock, flags); 441 old->rq->queuedata = NULL; 442 blk_start_queue(old->rq); 443 spin_unlock_irqrestore(&old->queue_lock, flags); 444 445 /* If the device is currently open, tell trans driver to close it, 446 then put mtd device, and don't touch it again */ 447 mutex_lock(&old->lock); 448 if (old->open) { 449 if (old->tr->release) 450 old->tr->release(old); 451 __put_mtd_device(old->mtd); 452 } 453 454 old->mtd = NULL; 455 456 mutex_unlock(&old->lock); 457 blktrans_dev_put(old); 458 return 0; 459 } 460 461 static void blktrans_notify_remove(struct mtd_info *mtd) 462 { 463 struct mtd_blktrans_ops *tr; 464 struct mtd_blktrans_dev *dev, *next; 465 466 list_for_each_entry(tr, &blktrans_majors, list) 467 list_for_each_entry_safe(dev, next, &tr->devs, list) 468 if (dev->mtd == mtd) 469 tr->remove_dev(dev); 470 } 471 472 static void blktrans_notify_add(struct mtd_info *mtd) 473 { 474 struct mtd_blktrans_ops *tr; 475 476 if (mtd->type == MTD_ABSENT) 477 return; 478 479 list_for_each_entry(tr, &blktrans_majors, list) 480 tr->add_mtd(tr, mtd); 481 } 482 483 static struct mtd_notifier blktrans_notifier = { 484 .add = blktrans_notify_add, 485 .remove = blktrans_notify_remove, 486 }; 487 488 int register_mtd_blktrans(struct mtd_blktrans_ops *tr) 489 { 490 struct mtd_info *mtd; 491 int ret; 492 493 /* Register the notifier if/when the first device type is 494 registered, to prevent the link/init ordering from fucking 495 us over. */ 496 if (!blktrans_notifier.list.next) 497 register_mtd_user(&blktrans_notifier); 498 499 500 mutex_lock(&mtd_table_mutex); 501 502 ret = register_blkdev(tr->major, tr->name); 503 if (ret < 0) { 504 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", 505 tr->name, tr->major, ret); 506 mutex_unlock(&mtd_table_mutex); 507 return ret; 508 } 509 510 if (ret) 511 tr->major = ret; 512 513 tr->blkshift = ffs(tr->blksize) - 1; 514 515 INIT_LIST_HEAD(&tr->devs); 516 list_add(&tr->list, &blktrans_majors); 517 518 mtd_for_each_device(mtd) 519 if (mtd->type != MTD_ABSENT) 520 tr->add_mtd(tr, mtd); 521 522 mutex_unlock(&mtd_table_mutex); 523 return 0; 524 } 525 526 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) 527 { 528 struct mtd_blktrans_dev *dev, *next; 529 530 mutex_lock(&mtd_table_mutex); 531 532 /* Remove it from the list of active majors */ 533 list_del(&tr->list); 534 535 list_for_each_entry_safe(dev, next, &tr->devs, list) 536 tr->remove_dev(dev); 537 538 unregister_blkdev(tr->major, tr->name); 539 mutex_unlock(&mtd_table_mutex); 540 541 BUG_ON(!list_empty(&tr->devs)); 542 return 0; 543 } 544 545 static void __exit mtd_blktrans_exit(void) 546 { 547 /* No race here -- if someone's currently in register_mtd_blktrans 548 we're screwed anyway. */ 549 if (blktrans_notifier.list.next) 550 unregister_mtd_user(&blktrans_notifier); 551 } 552 553 module_exit(mtd_blktrans_exit); 554 555 EXPORT_SYMBOL_GPL(register_mtd_blktrans); 556 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans); 557 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev); 558 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev); 559 560 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 561 MODULE_LICENSE("GPL"); 562 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'"); 563