1 /* 2 * (C) 2003 David Woodhouse <dwmw2@infradead.org> 3 * 4 * Interface to Linux 2.5 block layer for MTD 'translation layers'. 5 * 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/slab.h> 10 #include <linux/module.h> 11 #include <linux/list.h> 12 #include <linux/fs.h> 13 #include <linux/mtd/blktrans.h> 14 #include <linux/mtd/mtd.h> 15 #include <linux/blkdev.h> 16 #include <linux/blkpg.h> 17 #include <linux/freezer.h> 18 #include <linux/spinlock.h> 19 #include <linux/hdreg.h> 20 #include <linux/init.h> 21 #include <linux/mutex.h> 22 #include <linux/kthread.h> 23 #include <asm/uaccess.h> 24 25 #include "mtdcore.h" 26 27 static LIST_HEAD(blktrans_majors); 28 29 struct mtd_blkcore_priv { 30 struct task_struct *thread; 31 struct request_queue *rq; 32 spinlock_t queue_lock; 33 }; 34 35 static int blktrans_discard_request(struct request_queue *q, 36 struct request *req) 37 { 38 req->cmd_type = REQ_TYPE_LINUX_BLOCK; 39 req->cmd[0] = REQ_LB_OP_DISCARD; 40 return 0; 41 } 42 43 static int do_blktrans_request(struct mtd_blktrans_ops *tr, 44 struct mtd_blktrans_dev *dev, 45 struct request *req) 46 { 47 unsigned long block, nsect; 48 char *buf; 49 50 block = blk_rq_pos(req) << 9 >> tr->blkshift; 51 nsect = blk_rq_cur_bytes(req) >> tr->blkshift; 52 53 buf = req->buffer; 54 55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 56 req->cmd[0] == REQ_LB_OP_DISCARD) 57 return tr->discard(dev, block, nsect); 58 59 if (!blk_fs_request(req)) 60 return -EIO; 61 62 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > 63 get_capacity(req->rq_disk)) 64 return -EIO; 65 66 switch(rq_data_dir(req)) { 67 case READ: 68 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 69 if (tr->readsect(dev, block, buf)) 70 return -EIO; 71 return 0; 72 73 case WRITE: 74 if (!tr->writesect) 75 return -EIO; 76 77 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 78 if (tr->writesect(dev, block, buf)) 79 return -EIO; 80 return 0; 81 82 default: 83 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); 84 return -EIO; 85 } 86 } 87 88 static int mtd_blktrans_thread(void *arg) 89 { 90 struct mtd_blktrans_ops *tr = arg; 91 struct request_queue *rq = tr->blkcore_priv->rq; 92 struct request *req = NULL; 93 94 /* we might get involved when memory gets low, so use PF_MEMALLOC */ 95 current->flags |= PF_MEMALLOC; 96 97 spin_lock_irq(rq->queue_lock); 98 99 while (!kthread_should_stop()) { 100 struct mtd_blktrans_dev *dev; 101 int res; 102 103 if (!req && !(req = blk_fetch_request(rq))) { 104 set_current_state(TASK_INTERRUPTIBLE); 105 spin_unlock_irq(rq->queue_lock); 106 schedule(); 107 spin_lock_irq(rq->queue_lock); 108 continue; 109 } 110 111 dev = req->rq_disk->private_data; 112 tr = dev->tr; 113 114 spin_unlock_irq(rq->queue_lock); 115 116 mutex_lock(&dev->lock); 117 res = do_blktrans_request(tr, dev, req); 118 mutex_unlock(&dev->lock); 119 120 spin_lock_irq(rq->queue_lock); 121 122 if (!__blk_end_request_cur(req, res)) 123 req = NULL; 124 } 125 126 if (req) 127 __blk_end_request_all(req, -EIO); 128 129 spin_unlock_irq(rq->queue_lock); 130 131 return 0; 132 } 133 134 static void mtd_blktrans_request(struct request_queue *rq) 135 { 136 struct mtd_blktrans_ops *tr = rq->queuedata; 137 wake_up_process(tr->blkcore_priv->thread); 138 } 139 140 141 static int blktrans_open(struct block_device *bdev, fmode_t mode) 142 { 143 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; 144 struct mtd_blktrans_ops *tr = dev->tr; 145 int ret = -ENODEV; 146 147 if (!try_module_get(dev->mtd->owner)) 148 goto out; 149 150 if (!try_module_get(tr->owner)) 151 goto out_tr; 152 153 /* FIXME: Locking. A hot pluggable device can go away 154 (del_mtd_device can be called for it) without its module 155 being unloaded. */ 156 dev->mtd->usecount++; 157 158 ret = 0; 159 if (tr->open && (ret = tr->open(dev))) { 160 dev->mtd->usecount--; 161 module_put(dev->mtd->owner); 162 out_tr: 163 module_put(tr->owner); 164 } 165 out: 166 return ret; 167 } 168 169 static int blktrans_release(struct gendisk *disk, fmode_t mode) 170 { 171 struct mtd_blktrans_dev *dev = disk->private_data; 172 struct mtd_blktrans_ops *tr = dev->tr; 173 int ret = 0; 174 175 if (tr->release) 176 ret = tr->release(dev); 177 178 if (!ret) { 179 dev->mtd->usecount--; 180 module_put(dev->mtd->owner); 181 module_put(tr->owner); 182 } 183 184 return ret; 185 } 186 187 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) 188 { 189 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; 190 191 if (dev->tr->getgeo) 192 return dev->tr->getgeo(dev, geo); 193 return -ENOTTY; 194 } 195 196 static int blktrans_ioctl(struct block_device *bdev, fmode_t mode, 197 unsigned int cmd, unsigned long arg) 198 { 199 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; 200 struct mtd_blktrans_ops *tr = dev->tr; 201 202 switch (cmd) { 203 case BLKFLSBUF: 204 if (tr->flush) 205 return tr->flush(dev); 206 /* The core code did the work, we had nothing to do. */ 207 return 0; 208 default: 209 return -ENOTTY; 210 } 211 } 212 213 static struct block_device_operations mtd_blktrans_ops = { 214 .owner = THIS_MODULE, 215 .open = blktrans_open, 216 .release = blktrans_release, 217 .locked_ioctl = blktrans_ioctl, 218 .getgeo = blktrans_getgeo, 219 }; 220 221 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) 222 { 223 struct mtd_blktrans_ops *tr = new->tr; 224 struct mtd_blktrans_dev *d; 225 int last_devnum = -1; 226 struct gendisk *gd; 227 228 if (mutex_trylock(&mtd_table_mutex)) { 229 mutex_unlock(&mtd_table_mutex); 230 BUG(); 231 } 232 233 list_for_each_entry(d, &tr->devs, list) { 234 if (new->devnum == -1) { 235 /* Use first free number */ 236 if (d->devnum != last_devnum+1) { 237 /* Found a free devnum. Plug it in here */ 238 new->devnum = last_devnum+1; 239 list_add_tail(&new->list, &d->list); 240 goto added; 241 } 242 } else if (d->devnum == new->devnum) { 243 /* Required number taken */ 244 return -EBUSY; 245 } else if (d->devnum > new->devnum) { 246 /* Required number was free */ 247 list_add_tail(&new->list, &d->list); 248 goto added; 249 } 250 last_devnum = d->devnum; 251 } 252 if (new->devnum == -1) 253 new->devnum = last_devnum+1; 254 255 if ((new->devnum << tr->part_bits) > 256) { 256 return -EBUSY; 257 } 258 259 list_add_tail(&new->list, &tr->devs); 260 added: 261 mutex_init(&new->lock); 262 if (!tr->writesect) 263 new->readonly = 1; 264 265 gd = alloc_disk(1 << tr->part_bits); 266 if (!gd) { 267 list_del(&new->list); 268 return -ENOMEM; 269 } 270 gd->major = tr->major; 271 gd->first_minor = (new->devnum) << tr->part_bits; 272 gd->fops = &mtd_blktrans_ops; 273 274 if (tr->part_bits) 275 if (new->devnum < 26) 276 snprintf(gd->disk_name, sizeof(gd->disk_name), 277 "%s%c", tr->name, 'a' + new->devnum); 278 else 279 snprintf(gd->disk_name, sizeof(gd->disk_name), 280 "%s%c%c", tr->name, 281 'a' - 1 + new->devnum / 26, 282 'a' + new->devnum % 26); 283 else 284 snprintf(gd->disk_name, sizeof(gd->disk_name), 285 "%s%d", tr->name, new->devnum); 286 287 /* 2.5 has capacity in units of 512 bytes while still 288 having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */ 289 set_capacity(gd, (new->size * tr->blksize) >> 9); 290 291 gd->private_data = new; 292 new->blkcore_priv = gd; 293 gd->queue = tr->blkcore_priv->rq; 294 gd->driverfs_dev = new->mtd->dev.parent; 295 296 if (new->readonly) 297 set_disk_ro(gd, 1); 298 299 add_disk(gd); 300 301 return 0; 302 } 303 304 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) 305 { 306 if (mutex_trylock(&mtd_table_mutex)) { 307 mutex_unlock(&mtd_table_mutex); 308 BUG(); 309 } 310 311 list_del(&old->list); 312 313 del_gendisk(old->blkcore_priv); 314 put_disk(old->blkcore_priv); 315 316 return 0; 317 } 318 319 static void blktrans_notify_remove(struct mtd_info *mtd) 320 { 321 struct mtd_blktrans_ops *tr; 322 struct mtd_blktrans_dev *dev, *next; 323 324 list_for_each_entry(tr, &blktrans_majors, list) 325 list_for_each_entry_safe(dev, next, &tr->devs, list) 326 if (dev->mtd == mtd) 327 tr->remove_dev(dev); 328 } 329 330 static void blktrans_notify_add(struct mtd_info *mtd) 331 { 332 struct mtd_blktrans_ops *tr; 333 334 if (mtd->type == MTD_ABSENT) 335 return; 336 337 list_for_each_entry(tr, &blktrans_majors, list) 338 tr->add_mtd(tr, mtd); 339 } 340 341 static struct mtd_notifier blktrans_notifier = { 342 .add = blktrans_notify_add, 343 .remove = blktrans_notify_remove, 344 }; 345 346 int register_mtd_blktrans(struct mtd_blktrans_ops *tr) 347 { 348 int ret, i; 349 350 /* Register the notifier if/when the first device type is 351 registered, to prevent the link/init ordering from fucking 352 us over. */ 353 if (!blktrans_notifier.list.next) 354 register_mtd_user(&blktrans_notifier); 355 356 tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL); 357 if (!tr->blkcore_priv) 358 return -ENOMEM; 359 360 mutex_lock(&mtd_table_mutex); 361 362 ret = register_blkdev(tr->major, tr->name); 363 if (ret) { 364 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", 365 tr->name, tr->major, ret); 366 kfree(tr->blkcore_priv); 367 mutex_unlock(&mtd_table_mutex); 368 return ret; 369 } 370 spin_lock_init(&tr->blkcore_priv->queue_lock); 371 372 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock); 373 if (!tr->blkcore_priv->rq) { 374 unregister_blkdev(tr->major, tr->name); 375 kfree(tr->blkcore_priv); 376 mutex_unlock(&mtd_table_mutex); 377 return -ENOMEM; 378 } 379 380 tr->blkcore_priv->rq->queuedata = tr; 381 blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); 382 if (tr->discard) 383 blk_queue_set_discard(tr->blkcore_priv->rq, 384 blktrans_discard_request); 385 386 tr->blkshift = ffs(tr->blksize) - 1; 387 388 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr, 389 "%sd", tr->name); 390 if (IS_ERR(tr->blkcore_priv->thread)) { 391 int ret = PTR_ERR(tr->blkcore_priv->thread); 392 blk_cleanup_queue(tr->blkcore_priv->rq); 393 unregister_blkdev(tr->major, tr->name); 394 kfree(tr->blkcore_priv); 395 mutex_unlock(&mtd_table_mutex); 396 return ret; 397 } 398 399 INIT_LIST_HEAD(&tr->devs); 400 list_add(&tr->list, &blktrans_majors); 401 402 for (i=0; i<MAX_MTD_DEVICES; i++) { 403 if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT) 404 tr->add_mtd(tr, mtd_table[i]); 405 } 406 407 mutex_unlock(&mtd_table_mutex); 408 409 return 0; 410 } 411 412 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) 413 { 414 struct mtd_blktrans_dev *dev, *next; 415 416 mutex_lock(&mtd_table_mutex); 417 418 /* Clean up the kernel thread */ 419 kthread_stop(tr->blkcore_priv->thread); 420 421 /* Remove it from the list of active majors */ 422 list_del(&tr->list); 423 424 list_for_each_entry_safe(dev, next, &tr->devs, list) 425 tr->remove_dev(dev); 426 427 blk_cleanup_queue(tr->blkcore_priv->rq); 428 unregister_blkdev(tr->major, tr->name); 429 430 mutex_unlock(&mtd_table_mutex); 431 432 kfree(tr->blkcore_priv); 433 434 BUG_ON(!list_empty(&tr->devs)); 435 return 0; 436 } 437 438 static void __exit mtd_blktrans_exit(void) 439 { 440 /* No race here -- if someone's currently in register_mtd_blktrans 441 we're screwed anyway. */ 442 if (blktrans_notifier.list.next) 443 unregister_mtd_user(&blktrans_notifier); 444 } 445 446 module_exit(mtd_blktrans_exit); 447 448 EXPORT_SYMBOL_GPL(register_mtd_blktrans); 449 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans); 450 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev); 451 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev); 452 453 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 454 MODULE_LICENSE("GPL"); 455 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'"); 456