1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Interface to Linux block layer for MTD 'translation layers'. 4 * 5 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/slab.h> 10 #include <linux/module.h> 11 #include <linux/list.h> 12 #include <linux/fs.h> 13 #include <linux/mtd/blktrans.h> 14 #include <linux/mtd/mtd.h> 15 #include <linux/blkdev.h> 16 #include <linux/blk-mq.h> 17 #include <linux/blkpg.h> 18 #include <linux/spinlock.h> 19 #include <linux/hdreg.h> 20 #include <linux/mutex.h> 21 #include <linux/uaccess.h> 22 23 #include "mtdcore.h" 24 25 static LIST_HEAD(blktrans_majors); 26 static DEFINE_MUTEX(blktrans_ref_mutex); 27 28 static void blktrans_dev_release(struct kref *kref) 29 { 30 struct mtd_blktrans_dev *dev = 31 container_of(kref, struct mtd_blktrans_dev, ref); 32 33 blk_cleanup_disk(dev->disk); 34 blk_mq_free_tag_set(dev->tag_set); 35 kfree(dev->tag_set); 36 list_del(&dev->list); 37 kfree(dev); 38 } 39 40 static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk) 41 { 42 struct mtd_blktrans_dev *dev; 43 44 mutex_lock(&blktrans_ref_mutex); 45 dev = disk->private_data; 46 47 if (!dev) 48 goto unlock; 49 kref_get(&dev->ref); 50 unlock: 51 mutex_unlock(&blktrans_ref_mutex); 52 return dev; 53 } 54 55 static void blktrans_dev_put(struct mtd_blktrans_dev *dev) 56 { 57 mutex_lock(&blktrans_ref_mutex); 58 kref_put(&dev->ref, blktrans_dev_release); 59 mutex_unlock(&blktrans_ref_mutex); 60 } 61 62 63 static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr, 64 struct mtd_blktrans_dev *dev, 65 struct request *req) 66 { 67 unsigned long block, nsect; 68 char *buf; 69 70 block = blk_rq_pos(req) << 9 >> tr->blkshift; 71 nsect = blk_rq_cur_bytes(req) >> tr->blkshift; 72 73 if (req_op(req) == REQ_OP_FLUSH) { 74 if (tr->flush(dev)) 75 return BLK_STS_IOERR; 76 return BLK_STS_OK; 77 } 78 79 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > 80 get_capacity(req->rq_disk)) 81 return BLK_STS_IOERR; 82 83 switch (req_op(req)) { 84 case REQ_OP_DISCARD: 85 if (tr->discard(dev, block, nsect)) 86 return BLK_STS_IOERR; 87 return BLK_STS_OK; 88 case REQ_OP_READ: 89 buf = kmap(bio_page(req->bio)) + bio_offset(req->bio); 90 for (; nsect > 0; nsect--, block++, buf += tr->blksize) { 91 if (tr->readsect(dev, block, buf)) { 92 kunmap(bio_page(req->bio)); 93 return BLK_STS_IOERR; 94 } 95 } 96 kunmap(bio_page(req->bio)); 97 rq_flush_dcache_pages(req); 98 return BLK_STS_OK; 99 case REQ_OP_WRITE: 100 if (!tr->writesect) 101 return BLK_STS_IOERR; 102 103 rq_flush_dcache_pages(req); 104 buf = kmap(bio_page(req->bio)) + bio_offset(req->bio); 105 for (; nsect > 0; nsect--, block++, buf += tr->blksize) { 106 if (tr->writesect(dev, block, buf)) { 107 kunmap(bio_page(req->bio)); 108 return BLK_STS_IOERR; 109 } 110 } 111 kunmap(bio_page(req->bio)); 112 return BLK_STS_OK; 113 default: 114 return BLK_STS_IOERR; 115 } 116 } 117 118 int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev) 119 { 120 return dev->bg_stop; 121 } 122 EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background); 123 124 static struct request *mtd_next_request(struct mtd_blktrans_dev *dev) 125 { 126 struct request *rq; 127 128 rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist); 129 if (rq) { 130 list_del_init(&rq->queuelist); 131 blk_mq_start_request(rq); 132 return rq; 133 } 134 135 return NULL; 136 } 137 138 static void mtd_blktrans_work(struct mtd_blktrans_dev *dev) 139 __releases(&dev->queue_lock) 140 __acquires(&dev->queue_lock) 141 { 142 struct mtd_blktrans_ops *tr = dev->tr; 143 struct request *req = NULL; 144 int background_done = 0; 145 146 while (1) { 147 blk_status_t res; 148 149 dev->bg_stop = false; 150 if (!req && !(req = mtd_next_request(dev))) { 151 if (tr->background && !background_done) { 152 spin_unlock_irq(&dev->queue_lock); 153 mutex_lock(&dev->lock); 154 tr->background(dev); 155 mutex_unlock(&dev->lock); 156 spin_lock_irq(&dev->queue_lock); 157 /* 158 * Do background processing just once per idle 159 * period. 160 */ 161 background_done = !dev->bg_stop; 162 continue; 163 } 164 break; 165 } 166 167 spin_unlock_irq(&dev->queue_lock); 168 169 mutex_lock(&dev->lock); 170 res = do_blktrans_request(dev->tr, dev, req); 171 mutex_unlock(&dev->lock); 172 173 if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) { 174 __blk_mq_end_request(req, res); 175 req = NULL; 176 } 177 178 background_done = 0; 179 spin_lock_irq(&dev->queue_lock); 180 } 181 } 182 183 static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx, 184 const struct blk_mq_queue_data *bd) 185 { 186 struct mtd_blktrans_dev *dev; 187 188 dev = hctx->queue->queuedata; 189 if (!dev) { 190 blk_mq_start_request(bd->rq); 191 return BLK_STS_IOERR; 192 } 193 194 spin_lock_irq(&dev->queue_lock); 195 list_add_tail(&bd->rq->queuelist, &dev->rq_list); 196 mtd_blktrans_work(dev); 197 spin_unlock_irq(&dev->queue_lock); 198 199 return BLK_STS_OK; 200 } 201 202 static int blktrans_open(struct block_device *bdev, fmode_t mode) 203 { 204 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); 205 int ret = 0; 206 207 if (!dev) 208 return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/ 209 210 mutex_lock(&mtd_table_mutex); 211 mutex_lock(&dev->lock); 212 213 if (dev->open) 214 goto unlock; 215 216 kref_get(&dev->ref); 217 __module_get(dev->tr->owner); 218 219 if (!dev->mtd) 220 goto unlock; 221 222 if (dev->tr->open) { 223 ret = dev->tr->open(dev); 224 if (ret) 225 goto error_put; 226 } 227 228 ret = __get_mtd_device(dev->mtd); 229 if (ret) 230 goto error_release; 231 dev->file_mode = mode; 232 233 unlock: 234 dev->open++; 235 mutex_unlock(&dev->lock); 236 mutex_unlock(&mtd_table_mutex); 237 blktrans_dev_put(dev); 238 return ret; 239 240 error_release: 241 if (dev->tr->release) 242 dev->tr->release(dev); 243 error_put: 244 module_put(dev->tr->owner); 245 kref_put(&dev->ref, blktrans_dev_release); 246 mutex_unlock(&dev->lock); 247 mutex_unlock(&mtd_table_mutex); 248 blktrans_dev_put(dev); 249 return ret; 250 } 251 252 static void blktrans_release(struct gendisk *disk, fmode_t mode) 253 { 254 struct mtd_blktrans_dev *dev = blktrans_dev_get(disk); 255 256 if (!dev) 257 return; 258 259 mutex_lock(&mtd_table_mutex); 260 mutex_lock(&dev->lock); 261 262 if (--dev->open) 263 goto unlock; 264 265 kref_put(&dev->ref, blktrans_dev_release); 266 module_put(dev->tr->owner); 267 268 if (dev->mtd) { 269 if (dev->tr->release) 270 dev->tr->release(dev); 271 __put_mtd_device(dev->mtd); 272 } 273 unlock: 274 mutex_unlock(&dev->lock); 275 mutex_unlock(&mtd_table_mutex); 276 blktrans_dev_put(dev); 277 } 278 279 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) 280 { 281 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); 282 int ret = -ENXIO; 283 284 if (!dev) 285 return ret; 286 287 mutex_lock(&dev->lock); 288 289 if (!dev->mtd) 290 goto unlock; 291 292 ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY; 293 unlock: 294 mutex_unlock(&dev->lock); 295 blktrans_dev_put(dev); 296 return ret; 297 } 298 299 static const struct block_device_operations mtd_block_ops = { 300 .owner = THIS_MODULE, 301 .open = blktrans_open, 302 .release = blktrans_release, 303 .getgeo = blktrans_getgeo, 304 }; 305 306 static const struct blk_mq_ops mtd_mq_ops = { 307 .queue_rq = mtd_queue_rq, 308 }; 309 310 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) 311 { 312 struct mtd_blktrans_ops *tr = new->tr; 313 struct mtd_blktrans_dev *d; 314 int last_devnum = -1; 315 struct gendisk *gd; 316 int ret; 317 318 if (mutex_trylock(&mtd_table_mutex)) { 319 mutex_unlock(&mtd_table_mutex); 320 BUG(); 321 } 322 323 mutex_lock(&blktrans_ref_mutex); 324 list_for_each_entry(d, &tr->devs, list) { 325 if (new->devnum == -1) { 326 /* Use first free number */ 327 if (d->devnum != last_devnum+1) { 328 /* Found a free devnum. Plug it in here */ 329 new->devnum = last_devnum+1; 330 list_add_tail(&new->list, &d->list); 331 goto added; 332 } 333 } else if (d->devnum == new->devnum) { 334 /* Required number taken */ 335 mutex_unlock(&blktrans_ref_mutex); 336 return -EBUSY; 337 } else if (d->devnum > new->devnum) { 338 /* Required number was free */ 339 list_add_tail(&new->list, &d->list); 340 goto added; 341 } 342 last_devnum = d->devnum; 343 } 344 345 ret = -EBUSY; 346 if (new->devnum == -1) 347 new->devnum = last_devnum+1; 348 349 /* Check that the device and any partitions will get valid 350 * minor numbers and that the disk naming code below can cope 351 * with this number. */ 352 if (new->devnum > (MINORMASK >> tr->part_bits) || 353 (tr->part_bits && new->devnum >= 27 * 26)) { 354 mutex_unlock(&blktrans_ref_mutex); 355 return ret; 356 } 357 358 list_add_tail(&new->list, &tr->devs); 359 added: 360 mutex_unlock(&blktrans_ref_mutex); 361 362 mutex_init(&new->lock); 363 kref_init(&new->ref); 364 if (!tr->writesect) 365 new->readonly = 1; 366 367 ret = -ENOMEM; 368 new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL); 369 if (!new->tag_set) 370 goto out_list_del; 371 372 ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2, 373 BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING); 374 if (ret) 375 goto out_kfree_tag_set; 376 377 /* Create gendisk */ 378 gd = blk_mq_alloc_disk(new->tag_set, new); 379 if (IS_ERR(gd)) { 380 ret = PTR_ERR(gd); 381 goto out_free_tag_set; 382 } 383 384 new->disk = gd; 385 new->rq = new->disk->queue; 386 gd->private_data = new; 387 gd->major = tr->major; 388 gd->first_minor = (new->devnum) << tr->part_bits; 389 gd->minors = 1 << tr->part_bits; 390 gd->fops = &mtd_block_ops; 391 392 if (tr->part_bits) 393 if (new->devnum < 26) 394 snprintf(gd->disk_name, sizeof(gd->disk_name), 395 "%s%c", tr->name, 'a' + new->devnum); 396 else 397 snprintf(gd->disk_name, sizeof(gd->disk_name), 398 "%s%c%c", tr->name, 399 'a' - 1 + new->devnum / 26, 400 'a' + new->devnum % 26); 401 else 402 snprintf(gd->disk_name, sizeof(gd->disk_name), 403 "%s%d", tr->name, new->devnum); 404 405 set_capacity(gd, ((u64)new->size * tr->blksize) >> 9); 406 407 /* Create the request queue */ 408 spin_lock_init(&new->queue_lock); 409 INIT_LIST_HEAD(&new->rq_list); 410 411 if (tr->flush) 412 blk_queue_write_cache(new->rq, true, false); 413 414 blk_queue_logical_block_size(new->rq, tr->blksize); 415 416 blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq); 417 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq); 418 419 if (tr->discard) { 420 blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq); 421 blk_queue_max_discard_sectors(new->rq, UINT_MAX); 422 } 423 424 gd->queue = new->rq; 425 426 if (new->readonly) 427 set_disk_ro(gd, 1); 428 429 device_add_disk(&new->mtd->dev, gd, NULL); 430 431 if (new->disk_attributes) { 432 ret = sysfs_create_group(&disk_to_dev(gd)->kobj, 433 new->disk_attributes); 434 WARN_ON(ret); 435 } 436 return 0; 437 438 out_free_tag_set: 439 blk_mq_free_tag_set(new->tag_set); 440 out_kfree_tag_set: 441 kfree(new->tag_set); 442 out_list_del: 443 list_del(&new->list); 444 return ret; 445 } 446 447 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) 448 { 449 unsigned long flags; 450 451 if (mutex_trylock(&mtd_table_mutex)) { 452 mutex_unlock(&mtd_table_mutex); 453 BUG(); 454 } 455 456 if (old->disk_attributes) 457 sysfs_remove_group(&disk_to_dev(old->disk)->kobj, 458 old->disk_attributes); 459 460 /* Stop new requests to arrive */ 461 del_gendisk(old->disk); 462 463 /* Kill current requests */ 464 spin_lock_irqsave(&old->queue_lock, flags); 465 old->rq->queuedata = NULL; 466 spin_unlock_irqrestore(&old->queue_lock, flags); 467 468 /* freeze+quiesce queue to ensure all requests are flushed */ 469 blk_mq_freeze_queue(old->rq); 470 blk_mq_quiesce_queue(old->rq); 471 blk_mq_unquiesce_queue(old->rq); 472 blk_mq_unfreeze_queue(old->rq); 473 474 /* If the device is currently open, tell trans driver to close it, 475 then put mtd device, and don't touch it again */ 476 mutex_lock(&old->lock); 477 if (old->open) { 478 if (old->tr->release) 479 old->tr->release(old); 480 __put_mtd_device(old->mtd); 481 } 482 483 old->mtd = NULL; 484 485 mutex_unlock(&old->lock); 486 blktrans_dev_put(old); 487 return 0; 488 } 489 490 static void blktrans_notify_remove(struct mtd_info *mtd) 491 { 492 struct mtd_blktrans_ops *tr; 493 struct mtd_blktrans_dev *dev, *next; 494 495 list_for_each_entry(tr, &blktrans_majors, list) 496 list_for_each_entry_safe(dev, next, &tr->devs, list) 497 if (dev->mtd == mtd) 498 tr->remove_dev(dev); 499 } 500 501 static void blktrans_notify_add(struct mtd_info *mtd) 502 { 503 struct mtd_blktrans_ops *tr; 504 505 if (mtd->type == MTD_ABSENT) 506 return; 507 508 list_for_each_entry(tr, &blktrans_majors, list) 509 tr->add_mtd(tr, mtd); 510 } 511 512 static struct mtd_notifier blktrans_notifier = { 513 .add = blktrans_notify_add, 514 .remove = blktrans_notify_remove, 515 }; 516 517 int register_mtd_blktrans(struct mtd_blktrans_ops *tr) 518 { 519 struct mtd_info *mtd; 520 int ret; 521 522 /* Register the notifier if/when the first device type is 523 registered, to prevent the link/init ordering from fucking 524 us over. */ 525 if (!blktrans_notifier.list.next) 526 register_mtd_user(&blktrans_notifier); 527 528 529 mutex_lock(&mtd_table_mutex); 530 531 ret = register_blkdev(tr->major, tr->name); 532 if (ret < 0) { 533 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", 534 tr->name, tr->major, ret); 535 mutex_unlock(&mtd_table_mutex); 536 return ret; 537 } 538 539 if (ret) 540 tr->major = ret; 541 542 tr->blkshift = ffs(tr->blksize) - 1; 543 544 INIT_LIST_HEAD(&tr->devs); 545 list_add(&tr->list, &blktrans_majors); 546 547 mtd_for_each_device(mtd) 548 if (mtd->type != MTD_ABSENT) 549 tr->add_mtd(tr, mtd); 550 551 mutex_unlock(&mtd_table_mutex); 552 return 0; 553 } 554 555 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) 556 { 557 struct mtd_blktrans_dev *dev, *next; 558 559 mutex_lock(&mtd_table_mutex); 560 561 /* Remove it from the list of active majors */ 562 list_del(&tr->list); 563 564 list_for_each_entry_safe(dev, next, &tr->devs, list) 565 tr->remove_dev(dev); 566 567 unregister_blkdev(tr->major, tr->name); 568 mutex_unlock(&mtd_table_mutex); 569 570 BUG_ON(!list_empty(&tr->devs)); 571 return 0; 572 } 573 574 static void __exit mtd_blktrans_exit(void) 575 { 576 /* No race here -- if someone's currently in register_mtd_blktrans 577 we're screwed anyway. */ 578 if (blktrans_notifier.list.next) 579 unregister_mtd_user(&blktrans_notifier); 580 } 581 582 module_exit(mtd_blktrans_exit); 583 584 EXPORT_SYMBOL_GPL(register_mtd_blktrans); 585 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans); 586 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev); 587 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev); 588 589 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 590 MODULE_LICENSE("GPL"); 591 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'"); 592