xref: /openbmc/linux/drivers/mtd/mtd_blkdevs.c (revision 99b7e93c)
1 /*
2  * Interface to Linux block layer for MTD 'translation layers'.
3  *
4  * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  */
21 
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/fs.h>
27 #include <linux/mtd/blktrans.h>
28 #include <linux/mtd/mtd.h>
29 #include <linux/blkdev.h>
30 #include <linux/blkpg.h>
31 #include <linux/spinlock.h>
32 #include <linux/hdreg.h>
33 #include <linux/mutex.h>
34 #include <asm/uaccess.h>
35 
36 #include "mtdcore.h"
37 
38 static LIST_HEAD(blktrans_majors);
39 static DEFINE_MUTEX(blktrans_ref_mutex);
40 
41 static void blktrans_dev_release(struct kref *kref)
42 {
43 	struct mtd_blktrans_dev *dev =
44 		container_of(kref, struct mtd_blktrans_dev, ref);
45 
46 	dev->disk->private_data = NULL;
47 	blk_cleanup_queue(dev->rq);
48 	put_disk(dev->disk);
49 	list_del(&dev->list);
50 	kfree(dev);
51 }
52 
53 static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
54 {
55 	struct mtd_blktrans_dev *dev;
56 
57 	mutex_lock(&blktrans_ref_mutex);
58 	dev = disk->private_data;
59 
60 	if (!dev)
61 		goto unlock;
62 	kref_get(&dev->ref);
63 unlock:
64 	mutex_unlock(&blktrans_ref_mutex);
65 	return dev;
66 }
67 
68 static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
69 {
70 	mutex_lock(&blktrans_ref_mutex);
71 	kref_put(&dev->ref, blktrans_dev_release);
72 	mutex_unlock(&blktrans_ref_mutex);
73 }
74 
75 
76 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
77 			       struct mtd_blktrans_dev *dev,
78 			       struct request *req)
79 {
80 	unsigned long block, nsect;
81 	char *buf;
82 
83 	block = blk_rq_pos(req) << 9 >> tr->blkshift;
84 	nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
85 	buf = bio_data(req->bio);
86 
87 	if (req->cmd_type != REQ_TYPE_FS)
88 		return -EIO;
89 
90 	if (req->cmd_flags & REQ_FLUSH)
91 		return tr->flush(dev);
92 
93 	if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
94 	    get_capacity(req->rq_disk))
95 		return -EIO;
96 
97 	if (req->cmd_flags & REQ_DISCARD)
98 		return tr->discard(dev, block, nsect);
99 
100 	switch(rq_data_dir(req)) {
101 	case READ:
102 		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
103 			if (tr->readsect(dev, block, buf))
104 				return -EIO;
105 		rq_flush_dcache_pages(req);
106 		return 0;
107 	case WRITE:
108 		if (!tr->writesect)
109 			return -EIO;
110 
111 		rq_flush_dcache_pages(req);
112 		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
113 			if (tr->writesect(dev, block, buf))
114 				return -EIO;
115 		return 0;
116 	default:
117 		printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
118 		return -EIO;
119 	}
120 }
121 
122 int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
123 {
124 	return dev->bg_stop;
125 }
126 EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
127 
128 static void mtd_blktrans_work(struct work_struct *work)
129 {
130 	struct mtd_blktrans_dev *dev =
131 		container_of(work, struct mtd_blktrans_dev, work);
132 	struct mtd_blktrans_ops *tr = dev->tr;
133 	struct request_queue *rq = dev->rq;
134 	struct request *req = NULL;
135 	int background_done = 0;
136 
137 	spin_lock_irq(rq->queue_lock);
138 
139 	while (1) {
140 		int res;
141 
142 		dev->bg_stop = false;
143 		if (!req && !(req = blk_fetch_request(rq))) {
144 			if (tr->background && !background_done) {
145 				spin_unlock_irq(rq->queue_lock);
146 				mutex_lock(&dev->lock);
147 				tr->background(dev);
148 				mutex_unlock(&dev->lock);
149 				spin_lock_irq(rq->queue_lock);
150 				/*
151 				 * Do background processing just once per idle
152 				 * period.
153 				 */
154 				background_done = !dev->bg_stop;
155 				continue;
156 			}
157 			break;
158 		}
159 
160 		spin_unlock_irq(rq->queue_lock);
161 
162 		mutex_lock(&dev->lock);
163 		res = do_blktrans_request(dev->tr, dev, req);
164 		mutex_unlock(&dev->lock);
165 
166 		spin_lock_irq(rq->queue_lock);
167 
168 		if (!__blk_end_request_cur(req, res))
169 			req = NULL;
170 
171 		background_done = 0;
172 	}
173 
174 	spin_unlock_irq(rq->queue_lock);
175 }
176 
177 static void mtd_blktrans_request(struct request_queue *rq)
178 {
179 	struct mtd_blktrans_dev *dev;
180 	struct request *req = NULL;
181 
182 	dev = rq->queuedata;
183 
184 	if (!dev)
185 		while ((req = blk_fetch_request(rq)) != NULL)
186 			__blk_end_request_all(req, -ENODEV);
187 	else
188 		queue_work(dev->wq, &dev->work);
189 }
190 
191 static int blktrans_open(struct block_device *bdev, fmode_t mode)
192 {
193 	struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
194 	int ret = 0;
195 
196 	if (!dev)
197 		return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
198 
199 	mutex_lock(&dev->lock);
200 
201 	if (dev->open)
202 		goto unlock;
203 
204 	kref_get(&dev->ref);
205 	__module_get(dev->tr->owner);
206 
207 	if (!dev->mtd)
208 		goto unlock;
209 
210 	if (dev->tr->open) {
211 		ret = dev->tr->open(dev);
212 		if (ret)
213 			goto error_put;
214 	}
215 
216 	ret = __get_mtd_device(dev->mtd);
217 	if (ret)
218 		goto error_release;
219 	dev->file_mode = mode;
220 
221 unlock:
222 	dev->open++;
223 	mutex_unlock(&dev->lock);
224 	blktrans_dev_put(dev);
225 	return ret;
226 
227 error_release:
228 	if (dev->tr->release)
229 		dev->tr->release(dev);
230 error_put:
231 	module_put(dev->tr->owner);
232 	kref_put(&dev->ref, blktrans_dev_release);
233 	mutex_unlock(&dev->lock);
234 	blktrans_dev_put(dev);
235 	return ret;
236 }
237 
238 static void blktrans_release(struct gendisk *disk, fmode_t mode)
239 {
240 	struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
241 
242 	if (!dev)
243 		return;
244 
245 	mutex_lock(&dev->lock);
246 
247 	if (--dev->open)
248 		goto unlock;
249 
250 	kref_put(&dev->ref, blktrans_dev_release);
251 	module_put(dev->tr->owner);
252 
253 	if (dev->mtd) {
254 		if (dev->tr->release)
255 			dev->tr->release(dev);
256 		__put_mtd_device(dev->mtd);
257 	}
258 unlock:
259 	mutex_unlock(&dev->lock);
260 	blktrans_dev_put(dev);
261 }
262 
263 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
264 {
265 	struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
266 	int ret = -ENXIO;
267 
268 	if (!dev)
269 		return ret;
270 
271 	mutex_lock(&dev->lock);
272 
273 	if (!dev->mtd)
274 		goto unlock;
275 
276 	ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
277 unlock:
278 	mutex_unlock(&dev->lock);
279 	blktrans_dev_put(dev);
280 	return ret;
281 }
282 
283 static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
284 			      unsigned int cmd, unsigned long arg)
285 {
286 	struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
287 	int ret = -ENXIO;
288 
289 	if (!dev)
290 		return ret;
291 
292 	mutex_lock(&dev->lock);
293 
294 	if (!dev->mtd)
295 		goto unlock;
296 
297 	switch (cmd) {
298 	case BLKFLSBUF:
299 		ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
300 		break;
301 	default:
302 		ret = -ENOTTY;
303 	}
304 unlock:
305 	mutex_unlock(&dev->lock);
306 	blktrans_dev_put(dev);
307 	return ret;
308 }
309 
310 static const struct block_device_operations mtd_block_ops = {
311 	.owner		= THIS_MODULE,
312 	.open		= blktrans_open,
313 	.release	= blktrans_release,
314 	.ioctl		= blktrans_ioctl,
315 	.getgeo		= blktrans_getgeo,
316 };
317 
318 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
319 {
320 	struct mtd_blktrans_ops *tr = new->tr;
321 	struct mtd_blktrans_dev *d;
322 	int last_devnum = -1;
323 	struct gendisk *gd;
324 	int ret;
325 
326 	if (mutex_trylock(&mtd_table_mutex)) {
327 		mutex_unlock(&mtd_table_mutex);
328 		BUG();
329 	}
330 
331 	mutex_lock(&blktrans_ref_mutex);
332 	list_for_each_entry(d, &tr->devs, list) {
333 		if (new->devnum == -1) {
334 			/* Use first free number */
335 			if (d->devnum != last_devnum+1) {
336 				/* Found a free devnum. Plug it in here */
337 				new->devnum = last_devnum+1;
338 				list_add_tail(&new->list, &d->list);
339 				goto added;
340 			}
341 		} else if (d->devnum == new->devnum) {
342 			/* Required number taken */
343 			mutex_unlock(&blktrans_ref_mutex);
344 			return -EBUSY;
345 		} else if (d->devnum > new->devnum) {
346 			/* Required number was free */
347 			list_add_tail(&new->list, &d->list);
348 			goto added;
349 		}
350 		last_devnum = d->devnum;
351 	}
352 
353 	ret = -EBUSY;
354 	if (new->devnum == -1)
355 		new->devnum = last_devnum+1;
356 
357 	/* Check that the device and any partitions will get valid
358 	 * minor numbers and that the disk naming code below can cope
359 	 * with this number. */
360 	if (new->devnum > (MINORMASK >> tr->part_bits) ||
361 	    (tr->part_bits && new->devnum >= 27 * 26)) {
362 		mutex_unlock(&blktrans_ref_mutex);
363 		goto error1;
364 	}
365 
366 	list_add_tail(&new->list, &tr->devs);
367  added:
368 	mutex_unlock(&blktrans_ref_mutex);
369 
370 	mutex_init(&new->lock);
371 	kref_init(&new->ref);
372 	if (!tr->writesect)
373 		new->readonly = 1;
374 
375 	/* Create gendisk */
376 	ret = -ENOMEM;
377 	gd = alloc_disk(1 << tr->part_bits);
378 
379 	if (!gd)
380 		goto error2;
381 
382 	new->disk = gd;
383 	gd->private_data = new;
384 	gd->major = tr->major;
385 	gd->first_minor = (new->devnum) << tr->part_bits;
386 	gd->fops = &mtd_block_ops;
387 
388 	if (tr->part_bits)
389 		if (new->devnum < 26)
390 			snprintf(gd->disk_name, sizeof(gd->disk_name),
391 				 "%s%c", tr->name, 'a' + new->devnum);
392 		else
393 			snprintf(gd->disk_name, sizeof(gd->disk_name),
394 				 "%s%c%c", tr->name,
395 				 'a' - 1 + new->devnum / 26,
396 				 'a' + new->devnum % 26);
397 	else
398 		snprintf(gd->disk_name, sizeof(gd->disk_name),
399 			 "%s%d", tr->name, new->devnum);
400 
401 	set_capacity(gd, (new->size * tr->blksize) >> 9);
402 
403 	/* Create the request queue */
404 	spin_lock_init(&new->queue_lock);
405 	new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
406 
407 	if (!new->rq)
408 		goto error3;
409 
410 	if (tr->flush)
411 		blk_queue_flush(new->rq, REQ_FLUSH);
412 
413 	new->rq->queuedata = new;
414 	blk_queue_logical_block_size(new->rq, tr->blksize);
415 
416 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
417 	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq);
418 
419 	if (tr->discard) {
420 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
421 		new->rq->limits.max_discard_sectors = UINT_MAX;
422 	}
423 
424 	gd->queue = new->rq;
425 
426 	/* Create processing workqueue */
427 	new->wq = alloc_workqueue("%s%d", 0, 0,
428 				  tr->name, new->mtd->index);
429 	if (!new->wq)
430 		goto error4;
431 	INIT_WORK(&new->work, mtd_blktrans_work);
432 
433 	gd->driverfs_dev = &new->mtd->dev;
434 
435 	if (new->readonly)
436 		set_disk_ro(gd, 1);
437 
438 	add_disk(gd);
439 
440 	if (new->disk_attributes) {
441 		ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
442 					new->disk_attributes);
443 		WARN_ON(ret);
444 	}
445 	return 0;
446 error4:
447 	blk_cleanup_queue(new->rq);
448 error3:
449 	put_disk(new->disk);
450 error2:
451 	list_del(&new->list);
452 error1:
453 	return ret;
454 }
455 
456 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
457 {
458 	unsigned long flags;
459 
460 	if (mutex_trylock(&mtd_table_mutex)) {
461 		mutex_unlock(&mtd_table_mutex);
462 		BUG();
463 	}
464 
465 	if (old->disk_attributes)
466 		sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
467 						old->disk_attributes);
468 
469 	/* Stop new requests to arrive */
470 	del_gendisk(old->disk);
471 
472 	/* Stop workqueue. This will perform any pending request. */
473 	destroy_workqueue(old->wq);
474 
475 	/* Kill current requests */
476 	spin_lock_irqsave(&old->queue_lock, flags);
477 	old->rq->queuedata = NULL;
478 	blk_start_queue(old->rq);
479 	spin_unlock_irqrestore(&old->queue_lock, flags);
480 
481 	/* If the device is currently open, tell trans driver to close it,
482 		then put mtd device, and don't touch it again */
483 	mutex_lock(&old->lock);
484 	if (old->open) {
485 		if (old->tr->release)
486 			old->tr->release(old);
487 		__put_mtd_device(old->mtd);
488 	}
489 
490 	old->mtd = NULL;
491 
492 	mutex_unlock(&old->lock);
493 	blktrans_dev_put(old);
494 	return 0;
495 }
496 
497 static void blktrans_notify_remove(struct mtd_info *mtd)
498 {
499 	struct mtd_blktrans_ops *tr;
500 	struct mtd_blktrans_dev *dev, *next;
501 
502 	list_for_each_entry(tr, &blktrans_majors, list)
503 		list_for_each_entry_safe(dev, next, &tr->devs, list)
504 			if (dev->mtd == mtd)
505 				tr->remove_dev(dev);
506 }
507 
508 static void blktrans_notify_add(struct mtd_info *mtd)
509 {
510 	struct mtd_blktrans_ops *tr;
511 
512 	if (mtd->type == MTD_ABSENT)
513 		return;
514 
515 	list_for_each_entry(tr, &blktrans_majors, list)
516 		tr->add_mtd(tr, mtd);
517 }
518 
519 static struct mtd_notifier blktrans_notifier = {
520 	.add = blktrans_notify_add,
521 	.remove = blktrans_notify_remove,
522 };
523 
524 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
525 {
526 	struct mtd_info *mtd;
527 	int ret;
528 
529 	/* Register the notifier if/when the first device type is
530 	   registered, to prevent the link/init ordering from fucking
531 	   us over. */
532 	if (!blktrans_notifier.list.next)
533 		register_mtd_user(&blktrans_notifier);
534 
535 
536 	mutex_lock(&mtd_table_mutex);
537 
538 	ret = register_blkdev(tr->major, tr->name);
539 	if (ret < 0) {
540 		printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
541 		       tr->name, tr->major, ret);
542 		mutex_unlock(&mtd_table_mutex);
543 		return ret;
544 	}
545 
546 	if (ret)
547 		tr->major = ret;
548 
549 	tr->blkshift = ffs(tr->blksize) - 1;
550 
551 	INIT_LIST_HEAD(&tr->devs);
552 	list_add(&tr->list, &blktrans_majors);
553 
554 	mtd_for_each_device(mtd)
555 		if (mtd->type != MTD_ABSENT)
556 			tr->add_mtd(tr, mtd);
557 
558 	mutex_unlock(&mtd_table_mutex);
559 	return 0;
560 }
561 
562 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
563 {
564 	struct mtd_blktrans_dev *dev, *next;
565 
566 	mutex_lock(&mtd_table_mutex);
567 
568 	/* Remove it from the list of active majors */
569 	list_del(&tr->list);
570 
571 	list_for_each_entry_safe(dev, next, &tr->devs, list)
572 		tr->remove_dev(dev);
573 
574 	unregister_blkdev(tr->major, tr->name);
575 	mutex_unlock(&mtd_table_mutex);
576 
577 	BUG_ON(!list_empty(&tr->devs));
578 	return 0;
579 }
580 
581 static void __exit mtd_blktrans_exit(void)
582 {
583 	/* No race here -- if someone's currently in register_mtd_blktrans
584 	   we're screwed anyway. */
585 	if (blktrans_notifier.list.next)
586 		unregister_mtd_user(&blktrans_notifier);
587 }
588 
589 module_exit(mtd_blktrans_exit);
590 
591 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
592 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
593 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
594 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
595 
596 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
597 MODULE_LICENSE("GPL");
598 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
599