1 /* 2 * (C) 2003 David Woodhouse <dwmw2@infradead.org> 3 * 4 * Interface to Linux 2.5 block layer for MTD 'translation layers'. 5 * 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/slab.h> 10 #include <linux/module.h> 11 #include <linux/list.h> 12 #include <linux/fs.h> 13 #include <linux/mtd/blktrans.h> 14 #include <linux/mtd/mtd.h> 15 #include <linux/blkdev.h> 16 #include <linux/blkpg.h> 17 #include <linux/freezer.h> 18 #include <linux/spinlock.h> 19 #include <linux/hdreg.h> 20 #include <linux/init.h> 21 #include <linux/mutex.h> 22 #include <linux/kthread.h> 23 #include <asm/uaccess.h> 24 25 #include "mtdcore.h" 26 27 static LIST_HEAD(blktrans_majors); 28 29 struct mtd_blkcore_priv { 30 struct task_struct *thread; 31 struct request_queue *rq; 32 spinlock_t queue_lock; 33 }; 34 35 static int blktrans_discard_request(struct request_queue *q, 36 struct request *req) 37 { 38 req->cmd_type = REQ_TYPE_LINUX_BLOCK; 39 req->cmd[0] = REQ_LB_OP_DISCARD; 40 return 0; 41 } 42 43 static int do_blktrans_request(struct mtd_blktrans_ops *tr, 44 struct mtd_blktrans_dev *dev, 45 struct request *req) 46 { 47 unsigned long block, nsect; 48 char *buf; 49 50 block = req->sector << 9 >> tr->blkshift; 51 nsect = req->current_nr_sectors << 9 >> tr->blkshift; 52 53 buf = req->buffer; 54 55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 56 req->cmd[0] == REQ_LB_OP_DISCARD) 57 return !tr->discard(dev, block, nsect); 58 59 if (!blk_fs_request(req)) 60 return 0; 61 62 if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) 63 return 0; 64 65 switch(rq_data_dir(req)) { 66 case READ: 67 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 68 if (tr->readsect(dev, block, buf)) 69 return 0; 70 return 1; 71 72 case WRITE: 73 if (!tr->writesect) 74 return 0; 75 76 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 77 if (tr->writesect(dev, block, buf)) 78 return 0; 79 return 1; 80 81 default: 82 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); 83 return 0; 84 } 85 } 86 87 static int mtd_blktrans_thread(void *arg) 88 { 89 struct mtd_blktrans_ops *tr = arg; 90 struct request_queue *rq = tr->blkcore_priv->rq; 91 92 /* we might get involved when memory gets low, so use PF_MEMALLOC */ 93 current->flags |= PF_MEMALLOC; 94 95 spin_lock_irq(rq->queue_lock); 96 while (!kthread_should_stop()) { 97 struct request *req; 98 struct mtd_blktrans_dev *dev; 99 int res = 0; 100 101 req = elv_next_request(rq); 102 103 if (!req) { 104 set_current_state(TASK_INTERRUPTIBLE); 105 spin_unlock_irq(rq->queue_lock); 106 schedule(); 107 spin_lock_irq(rq->queue_lock); 108 continue; 109 } 110 111 dev = req->rq_disk->private_data; 112 tr = dev->tr; 113 114 spin_unlock_irq(rq->queue_lock); 115 116 mutex_lock(&dev->lock); 117 res = do_blktrans_request(tr, dev, req); 118 mutex_unlock(&dev->lock); 119 120 spin_lock_irq(rq->queue_lock); 121 122 end_request(req, res); 123 } 124 spin_unlock_irq(rq->queue_lock); 125 126 return 0; 127 } 128 129 static void mtd_blktrans_request(struct request_queue *rq) 130 { 131 struct mtd_blktrans_ops *tr = rq->queuedata; 132 wake_up_process(tr->blkcore_priv->thread); 133 } 134 135 136 static int blktrans_open(struct block_device *bdev, fmode_t mode) 137 { 138 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; 139 struct mtd_blktrans_ops *tr = dev->tr; 140 int ret = -ENODEV; 141 142 if (!try_module_get(dev->mtd->owner)) 143 goto out; 144 145 if (!try_module_get(tr->owner)) 146 goto out_tr; 147 148 /* FIXME: Locking. A hot pluggable device can go away 149 (del_mtd_device can be called for it) without its module 150 being unloaded. */ 151 dev->mtd->usecount++; 152 153 ret = 0; 154 if (tr->open && (ret = tr->open(dev))) { 155 dev->mtd->usecount--; 156 module_put(dev->mtd->owner); 157 out_tr: 158 module_put(tr->owner); 159 } 160 out: 161 return ret; 162 } 163 164 static int blktrans_release(struct gendisk *disk, fmode_t mode) 165 { 166 struct mtd_blktrans_dev *dev = disk->private_data; 167 struct mtd_blktrans_ops *tr = dev->tr; 168 int ret = 0; 169 170 if (tr->release) 171 ret = tr->release(dev); 172 173 if (!ret) { 174 dev->mtd->usecount--; 175 module_put(dev->mtd->owner); 176 module_put(tr->owner); 177 } 178 179 return ret; 180 } 181 182 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) 183 { 184 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; 185 186 if (dev->tr->getgeo) 187 return dev->tr->getgeo(dev, geo); 188 return -ENOTTY; 189 } 190 191 static int blktrans_ioctl(struct block_device *bdev, fmode_t mode, 192 unsigned int cmd, unsigned long arg) 193 { 194 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; 195 struct mtd_blktrans_ops *tr = dev->tr; 196 197 switch (cmd) { 198 case BLKFLSBUF: 199 if (tr->flush) 200 return tr->flush(dev); 201 /* The core code did the work, we had nothing to do. */ 202 return 0; 203 default: 204 return -ENOTTY; 205 } 206 } 207 208 static struct block_device_operations mtd_blktrans_ops = { 209 .owner = THIS_MODULE, 210 .open = blktrans_open, 211 .release = blktrans_release, 212 .locked_ioctl = blktrans_ioctl, 213 .getgeo = blktrans_getgeo, 214 }; 215 216 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) 217 { 218 struct mtd_blktrans_ops *tr = new->tr; 219 struct mtd_blktrans_dev *d; 220 int last_devnum = -1; 221 struct gendisk *gd; 222 223 if (mutex_trylock(&mtd_table_mutex)) { 224 mutex_unlock(&mtd_table_mutex); 225 BUG(); 226 } 227 228 list_for_each_entry(d, &tr->devs, list) { 229 if (new->devnum == -1) { 230 /* Use first free number */ 231 if (d->devnum != last_devnum+1) { 232 /* Found a free devnum. Plug it in here */ 233 new->devnum = last_devnum+1; 234 list_add_tail(&new->list, &d->list); 235 goto added; 236 } 237 } else if (d->devnum == new->devnum) { 238 /* Required number taken */ 239 return -EBUSY; 240 } else if (d->devnum > new->devnum) { 241 /* Required number was free */ 242 list_add_tail(&new->list, &d->list); 243 goto added; 244 } 245 last_devnum = d->devnum; 246 } 247 if (new->devnum == -1) 248 new->devnum = last_devnum+1; 249 250 if ((new->devnum << tr->part_bits) > 256) { 251 return -EBUSY; 252 } 253 254 list_add_tail(&new->list, &tr->devs); 255 added: 256 mutex_init(&new->lock); 257 if (!tr->writesect) 258 new->readonly = 1; 259 260 gd = alloc_disk(1 << tr->part_bits); 261 if (!gd) { 262 list_del(&new->list); 263 return -ENOMEM; 264 } 265 gd->major = tr->major; 266 gd->first_minor = (new->devnum) << tr->part_bits; 267 gd->fops = &mtd_blktrans_ops; 268 269 if (tr->part_bits) 270 if (new->devnum < 26) 271 snprintf(gd->disk_name, sizeof(gd->disk_name), 272 "%s%c", tr->name, 'a' + new->devnum); 273 else 274 snprintf(gd->disk_name, sizeof(gd->disk_name), 275 "%s%c%c", tr->name, 276 'a' - 1 + new->devnum / 26, 277 'a' + new->devnum % 26); 278 else 279 snprintf(gd->disk_name, sizeof(gd->disk_name), 280 "%s%d", tr->name, new->devnum); 281 282 /* 2.5 has capacity in units of 512 bytes while still 283 having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */ 284 set_capacity(gd, (new->size * tr->blksize) >> 9); 285 286 gd->private_data = new; 287 new->blkcore_priv = gd; 288 gd->queue = tr->blkcore_priv->rq; 289 gd->driverfs_dev = new->mtd->dev.parent; 290 291 if (new->readonly) 292 set_disk_ro(gd, 1); 293 294 add_disk(gd); 295 296 return 0; 297 } 298 299 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) 300 { 301 if (mutex_trylock(&mtd_table_mutex)) { 302 mutex_unlock(&mtd_table_mutex); 303 BUG(); 304 } 305 306 list_del(&old->list); 307 308 del_gendisk(old->blkcore_priv); 309 put_disk(old->blkcore_priv); 310 311 return 0; 312 } 313 314 static void blktrans_notify_remove(struct mtd_info *mtd) 315 { 316 struct mtd_blktrans_ops *tr; 317 struct mtd_blktrans_dev *dev, *next; 318 319 list_for_each_entry(tr, &blktrans_majors, list) 320 list_for_each_entry_safe(dev, next, &tr->devs, list) 321 if (dev->mtd == mtd) 322 tr->remove_dev(dev); 323 } 324 325 static void blktrans_notify_add(struct mtd_info *mtd) 326 { 327 struct mtd_blktrans_ops *tr; 328 329 if (mtd->type == MTD_ABSENT) 330 return; 331 332 list_for_each_entry(tr, &blktrans_majors, list) 333 tr->add_mtd(tr, mtd); 334 } 335 336 static struct mtd_notifier blktrans_notifier = { 337 .add = blktrans_notify_add, 338 .remove = blktrans_notify_remove, 339 }; 340 341 int register_mtd_blktrans(struct mtd_blktrans_ops *tr) 342 { 343 int ret, i; 344 345 /* Register the notifier if/when the first device type is 346 registered, to prevent the link/init ordering from fucking 347 us over. */ 348 if (!blktrans_notifier.list.next) 349 register_mtd_user(&blktrans_notifier); 350 351 tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL); 352 if (!tr->blkcore_priv) 353 return -ENOMEM; 354 355 mutex_lock(&mtd_table_mutex); 356 357 ret = register_blkdev(tr->major, tr->name); 358 if (ret) { 359 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", 360 tr->name, tr->major, ret); 361 kfree(tr->blkcore_priv); 362 mutex_unlock(&mtd_table_mutex); 363 return ret; 364 } 365 spin_lock_init(&tr->blkcore_priv->queue_lock); 366 367 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock); 368 if (!tr->blkcore_priv->rq) { 369 unregister_blkdev(tr->major, tr->name); 370 kfree(tr->blkcore_priv); 371 mutex_unlock(&mtd_table_mutex); 372 return -ENOMEM; 373 } 374 375 tr->blkcore_priv->rq->queuedata = tr; 376 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); 377 if (tr->discard) 378 blk_queue_set_discard(tr->blkcore_priv->rq, 379 blktrans_discard_request); 380 381 tr->blkshift = ffs(tr->blksize) - 1; 382 383 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr, 384 "%sd", tr->name); 385 if (IS_ERR(tr->blkcore_priv->thread)) { 386 int ret = PTR_ERR(tr->blkcore_priv->thread); 387 blk_cleanup_queue(tr->blkcore_priv->rq); 388 unregister_blkdev(tr->major, tr->name); 389 kfree(tr->blkcore_priv); 390 mutex_unlock(&mtd_table_mutex); 391 return ret; 392 } 393 394 INIT_LIST_HEAD(&tr->devs); 395 list_add(&tr->list, &blktrans_majors); 396 397 for (i=0; i<MAX_MTD_DEVICES; i++) { 398 if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT) 399 tr->add_mtd(tr, mtd_table[i]); 400 } 401 402 mutex_unlock(&mtd_table_mutex); 403 404 return 0; 405 } 406 407 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) 408 { 409 struct mtd_blktrans_dev *dev, *next; 410 411 mutex_lock(&mtd_table_mutex); 412 413 /* Clean up the kernel thread */ 414 kthread_stop(tr->blkcore_priv->thread); 415 416 /* Remove it from the list of active majors */ 417 list_del(&tr->list); 418 419 list_for_each_entry_safe(dev, next, &tr->devs, list) 420 tr->remove_dev(dev); 421 422 blk_cleanup_queue(tr->blkcore_priv->rq); 423 unregister_blkdev(tr->major, tr->name); 424 425 mutex_unlock(&mtd_table_mutex); 426 427 kfree(tr->blkcore_priv); 428 429 BUG_ON(!list_empty(&tr->devs)); 430 return 0; 431 } 432 433 static void __exit mtd_blktrans_exit(void) 434 { 435 /* No race here -- if someone's currently in register_mtd_blktrans 436 we're screwed anyway. */ 437 if (blktrans_notifier.list.next) 438 unregister_mtd_user(&blktrans_notifier); 439 } 440 441 module_exit(mtd_blktrans_exit); 442 443 EXPORT_SYMBOL_GPL(register_mtd_blktrans); 444 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans); 445 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev); 446 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev); 447 448 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 449 MODULE_LICENSE("GPL"); 450 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'"); 451