1 /* 2 * (C) 2003 David Woodhouse <dwmw2@infradead.org> 3 * 4 * Interface to Linux 2.5 block layer for MTD 'translation layers'. 5 * 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/slab.h> 10 #include <linux/module.h> 11 #include <linux/list.h> 12 #include <linux/fs.h> 13 #include <linux/mtd/blktrans.h> 14 #include <linux/mtd/mtd.h> 15 #include <linux/blkdev.h> 16 #include <linux/blkpg.h> 17 #include <linux/freezer.h> 18 #include <linux/spinlock.h> 19 #include <linux/hdreg.h> 20 #include <linux/init.h> 21 #include <linux/mutex.h> 22 #include <linux/kthread.h> 23 #include <asm/uaccess.h> 24 25 #include "mtdcore.h" 26 27 static LIST_HEAD(blktrans_majors); 28 29 struct mtd_blkcore_priv { 30 struct task_struct *thread; 31 struct request_queue *rq; 32 spinlock_t queue_lock; 33 }; 34 35 static int do_blktrans_request(struct mtd_blktrans_ops *tr, 36 struct mtd_blktrans_dev *dev, 37 struct request *req) 38 { 39 unsigned long block, nsect; 40 char *buf; 41 42 block = req->sector << 9 >> tr->blkshift; 43 nsect = req->current_nr_sectors << 9 >> tr->blkshift; 44 45 buf = req->buffer; 46 47 if (!blk_fs_request(req)) 48 return 0; 49 50 if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) 51 return 0; 52 53 switch(rq_data_dir(req)) { 54 case READ: 55 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 56 if (tr->readsect(dev, block, buf)) 57 return 0; 58 return 1; 59 60 case WRITE: 61 if (!tr->writesect) 62 return 0; 63 64 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 65 if (tr->writesect(dev, block, buf)) 66 return 0; 67 return 1; 68 69 default: 70 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); 71 return 0; 72 } 73 } 74 75 static int mtd_blktrans_thread(void *arg) 76 { 77 struct mtd_blktrans_ops *tr = arg; 78 struct request_queue *rq = tr->blkcore_priv->rq; 79 80 /* we might get involved when memory gets low, so use PF_MEMALLOC */ 81 current->flags |= PF_MEMALLOC; 82 83 spin_lock_irq(rq->queue_lock); 84 while (!kthread_should_stop()) { 85 struct request *req; 86 struct mtd_blktrans_dev *dev; 87 int res = 0; 88 89 req = elv_next_request(rq); 90 91 if (!req) { 92 set_current_state(TASK_INTERRUPTIBLE); 93 spin_unlock_irq(rq->queue_lock); 94 schedule(); 95 spin_lock_irq(rq->queue_lock); 96 continue; 97 } 98 99 dev = req->rq_disk->private_data; 100 tr = dev->tr; 101 102 spin_unlock_irq(rq->queue_lock); 103 104 mutex_lock(&dev->lock); 105 res = do_blktrans_request(tr, dev, req); 106 mutex_unlock(&dev->lock); 107 108 spin_lock_irq(rq->queue_lock); 109 110 end_request(req, res); 111 } 112 spin_unlock_irq(rq->queue_lock); 113 114 return 0; 115 } 116 117 static void mtd_blktrans_request(struct request_queue *rq) 118 { 119 struct mtd_blktrans_ops *tr = rq->queuedata; 120 wake_up_process(tr->blkcore_priv->thread); 121 } 122 123 124 static int blktrans_open(struct inode *i, struct file *f) 125 { 126 struct mtd_blktrans_dev *dev; 127 struct mtd_blktrans_ops *tr; 128 int ret = -ENODEV; 129 130 dev = i->i_bdev->bd_disk->private_data; 131 tr = dev->tr; 132 133 if (!try_module_get(dev->mtd->owner)) 134 goto out; 135 136 if (!try_module_get(tr->owner)) 137 goto out_tr; 138 139 /* FIXME: Locking. A hot pluggable device can go away 140 (del_mtd_device can be called for it) without its module 141 being unloaded. */ 142 dev->mtd->usecount++; 143 144 ret = 0; 145 if (tr->open && (ret = tr->open(dev))) { 146 dev->mtd->usecount--; 147 module_put(dev->mtd->owner); 148 out_tr: 149 module_put(tr->owner); 150 } 151 out: 152 return ret; 153 } 154 155 static int blktrans_release(struct inode *i, struct file *f) 156 { 157 struct mtd_blktrans_dev *dev; 158 struct mtd_blktrans_ops *tr; 159 int ret = 0; 160 161 dev = i->i_bdev->bd_disk->private_data; 162 tr = dev->tr; 163 164 if (tr->release) 165 ret = tr->release(dev); 166 167 if (!ret) { 168 dev->mtd->usecount--; 169 module_put(dev->mtd->owner); 170 module_put(tr->owner); 171 } 172 173 return ret; 174 } 175 176 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) 177 { 178 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; 179 180 if (dev->tr->getgeo) 181 return dev->tr->getgeo(dev, geo); 182 return -ENOTTY; 183 } 184 185 static int blktrans_ioctl(struct inode *inode, struct file *file, 186 unsigned int cmd, unsigned long arg) 187 { 188 struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data; 189 struct mtd_blktrans_ops *tr = dev->tr; 190 191 switch (cmd) { 192 case BLKFLSBUF: 193 if (tr->flush) 194 return tr->flush(dev); 195 /* The core code did the work, we had nothing to do. */ 196 return 0; 197 default: 198 return -ENOTTY; 199 } 200 } 201 202 static struct block_device_operations mtd_blktrans_ops = { 203 .owner = THIS_MODULE, 204 .open = blktrans_open, 205 .release = blktrans_release, 206 .ioctl = blktrans_ioctl, 207 .getgeo = blktrans_getgeo, 208 }; 209 210 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) 211 { 212 struct mtd_blktrans_ops *tr = new->tr; 213 struct mtd_blktrans_dev *d; 214 int last_devnum = -1; 215 struct gendisk *gd; 216 217 if (mutex_trylock(&mtd_table_mutex)) { 218 mutex_unlock(&mtd_table_mutex); 219 BUG(); 220 } 221 222 list_for_each_entry(d, &tr->devs, list) { 223 if (new->devnum == -1) { 224 /* Use first free number */ 225 if (d->devnum != last_devnum+1) { 226 /* Found a free devnum. Plug it in here */ 227 new->devnum = last_devnum+1; 228 list_add_tail(&new->list, &d->list); 229 goto added; 230 } 231 } else if (d->devnum == new->devnum) { 232 /* Required number taken */ 233 return -EBUSY; 234 } else if (d->devnum > new->devnum) { 235 /* Required number was free */ 236 list_add_tail(&new->list, &d->list); 237 goto added; 238 } 239 last_devnum = d->devnum; 240 } 241 if (new->devnum == -1) 242 new->devnum = last_devnum+1; 243 244 if ((new->devnum << tr->part_bits) > 256) { 245 return -EBUSY; 246 } 247 248 list_add_tail(&new->list, &tr->devs); 249 added: 250 mutex_init(&new->lock); 251 if (!tr->writesect) 252 new->readonly = 1; 253 254 gd = alloc_disk(1 << tr->part_bits); 255 if (!gd) { 256 list_del(&new->list); 257 return -ENOMEM; 258 } 259 gd->major = tr->major; 260 gd->first_minor = (new->devnum) << tr->part_bits; 261 gd->fops = &mtd_blktrans_ops; 262 263 if (tr->part_bits) 264 if (new->devnum < 26) 265 snprintf(gd->disk_name, sizeof(gd->disk_name), 266 "%s%c", tr->name, 'a' + new->devnum); 267 else 268 snprintf(gd->disk_name, sizeof(gd->disk_name), 269 "%s%c%c", tr->name, 270 'a' - 1 + new->devnum / 26, 271 'a' + new->devnum % 26); 272 else 273 snprintf(gd->disk_name, sizeof(gd->disk_name), 274 "%s%d", tr->name, new->devnum); 275 276 /* 2.5 has capacity in units of 512 bytes while still 277 having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */ 278 set_capacity(gd, (new->size * tr->blksize) >> 9); 279 280 gd->private_data = new; 281 new->blkcore_priv = gd; 282 gd->queue = tr->blkcore_priv->rq; 283 284 if (new->readonly) 285 set_disk_ro(gd, 1); 286 287 add_disk(gd); 288 289 return 0; 290 } 291 292 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) 293 { 294 if (mutex_trylock(&mtd_table_mutex)) { 295 mutex_unlock(&mtd_table_mutex); 296 BUG(); 297 } 298 299 list_del(&old->list); 300 301 del_gendisk(old->blkcore_priv); 302 put_disk(old->blkcore_priv); 303 304 return 0; 305 } 306 307 static void blktrans_notify_remove(struct mtd_info *mtd) 308 { 309 struct mtd_blktrans_ops *tr; 310 struct mtd_blktrans_dev *dev, *next; 311 312 list_for_each_entry(tr, &blktrans_majors, list) 313 list_for_each_entry_safe(dev, next, &tr->devs, list) 314 if (dev->mtd == mtd) 315 tr->remove_dev(dev); 316 } 317 318 static void blktrans_notify_add(struct mtd_info *mtd) 319 { 320 struct mtd_blktrans_ops *tr; 321 322 if (mtd->type == MTD_ABSENT) 323 return; 324 325 list_for_each_entry(tr, &blktrans_majors, list) 326 tr->add_mtd(tr, mtd); 327 } 328 329 static struct mtd_notifier blktrans_notifier = { 330 .add = blktrans_notify_add, 331 .remove = blktrans_notify_remove, 332 }; 333 334 int register_mtd_blktrans(struct mtd_blktrans_ops *tr) 335 { 336 int ret, i; 337 338 /* Register the notifier if/when the first device type is 339 registered, to prevent the link/init ordering from fucking 340 us over. */ 341 if (!blktrans_notifier.list.next) 342 register_mtd_user(&blktrans_notifier); 343 344 tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL); 345 if (!tr->blkcore_priv) 346 return -ENOMEM; 347 348 mutex_lock(&mtd_table_mutex); 349 350 ret = register_blkdev(tr->major, tr->name); 351 if (ret) { 352 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", 353 tr->name, tr->major, ret); 354 kfree(tr->blkcore_priv); 355 mutex_unlock(&mtd_table_mutex); 356 return ret; 357 } 358 spin_lock_init(&tr->blkcore_priv->queue_lock); 359 360 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock); 361 if (!tr->blkcore_priv->rq) { 362 unregister_blkdev(tr->major, tr->name); 363 kfree(tr->blkcore_priv); 364 mutex_unlock(&mtd_table_mutex); 365 return -ENOMEM; 366 } 367 368 tr->blkcore_priv->rq->queuedata = tr; 369 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); 370 tr->blkshift = ffs(tr->blksize) - 1; 371 372 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr, 373 "%sd", tr->name); 374 if (IS_ERR(tr->blkcore_priv->thread)) { 375 blk_cleanup_queue(tr->blkcore_priv->rq); 376 unregister_blkdev(tr->major, tr->name); 377 kfree(tr->blkcore_priv); 378 mutex_unlock(&mtd_table_mutex); 379 return PTR_ERR(tr->blkcore_priv->thread); 380 } 381 382 INIT_LIST_HEAD(&tr->devs); 383 list_add(&tr->list, &blktrans_majors); 384 385 for (i=0; i<MAX_MTD_DEVICES; i++) { 386 if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT) 387 tr->add_mtd(tr, mtd_table[i]); 388 } 389 390 mutex_unlock(&mtd_table_mutex); 391 392 return 0; 393 } 394 395 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) 396 { 397 struct mtd_blktrans_dev *dev, *next; 398 399 mutex_lock(&mtd_table_mutex); 400 401 /* Clean up the kernel thread */ 402 kthread_stop(tr->blkcore_priv->thread); 403 404 /* Remove it from the list of active majors */ 405 list_del(&tr->list); 406 407 list_for_each_entry_safe(dev, next, &tr->devs, list) 408 tr->remove_dev(dev); 409 410 blk_cleanup_queue(tr->blkcore_priv->rq); 411 unregister_blkdev(tr->major, tr->name); 412 413 mutex_unlock(&mtd_table_mutex); 414 415 kfree(tr->blkcore_priv); 416 417 BUG_ON(!list_empty(&tr->devs)); 418 return 0; 419 } 420 421 static void __exit mtd_blktrans_exit(void) 422 { 423 /* No race here -- if someone's currently in register_mtd_blktrans 424 we're screwed anyway. */ 425 if (blktrans_notifier.list.next) 426 unregister_mtd_user(&blktrans_notifier); 427 } 428 429 module_exit(mtd_blktrans_exit); 430 431 EXPORT_SYMBOL_GPL(register_mtd_blktrans); 432 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans); 433 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev); 434 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev); 435 436 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 437 MODULE_LICENSE("GPL"); 438 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'"); 439