xref: /openbmc/linux/drivers/mtd/ubi/block.c (revision ca460cc2)
1 /*
2  * Copyright (c) 2014 Ezequiel Garcia
3  * Copyright (c) 2011 Free Electrons
4  *
5  * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
6  *   Copyright (c) International Business Machines Corp., 2006
7  *   Copyright (c) Nokia Corporation, 2007
8  *   Authors: Artem Bityutskiy, Frank Haverkamp
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation, version 2.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17  * the GNU General Public License for more details.
18  */
19 
20 /*
21  * Read-only block devices on top of UBI volumes
22  *
23  * A simple implementation to allow a block device to be layered on top of a
24  * UBI volume. The implementation is provided by creating a static 1-to-1
25  * mapping between the block device and the UBI volume.
26  *
27  * The addressed byte is obtained from the addressed block sector, which is
28  * mapped linearly into the corresponding LEB:
29  *
30  *   LEB number = addressed byte / LEB size
31  *
32  * This feature is compiled in the UBI core, and adds a 'block' parameter
33  * to allow early creation of block devices on top of UBI volumes. Runtime
34  * block creation/removal for UBI volumes is provided through two UBI ioctls:
35  * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
36  */
37 
38 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/err.h>
41 #include <linux/kernel.h>
42 #include <linux/list.h>
43 #include <linux/mutex.h>
44 #include <linux/slab.h>
45 #include <linux/vmalloc.h>
46 #include <linux/mtd/ubi.h>
47 #include <linux/workqueue.h>
48 #include <linux/blkdev.h>
49 #include <linux/hdreg.h>
50 #include <asm/div64.h>
51 
52 #include "ubi-media.h"
53 #include "ubi.h"
54 
55 /* Maximum number of supported devices */
56 #define UBIBLOCK_MAX_DEVICES 32
57 
58 /* Maximum length of the 'block=' parameter */
59 #define UBIBLOCK_PARAM_LEN 63
60 
61 /* Maximum number of comma-separated items in the 'block=' parameter */
62 #define UBIBLOCK_PARAM_COUNT 2
63 
64 struct ubiblock_param {
65 	int ubi_num;
66 	int vol_id;
67 	char name[UBIBLOCK_PARAM_LEN+1];
68 };
69 
70 /* Numbers of elements set in the @ubiblock_param array */
71 static int ubiblock_devs __initdata;
72 
73 /* MTD devices specification parameters */
74 static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
75 
76 struct ubiblock {
77 	struct ubi_volume_desc *desc;
78 	int ubi_num;
79 	int vol_id;
80 	int refcnt;
81 	int leb_size;
82 
83 	struct gendisk *gd;
84 	struct request_queue *rq;
85 
86 	struct workqueue_struct *wq;
87 	struct work_struct work;
88 
89 	struct mutex dev_mutex;
90 	spinlock_t queue_lock;
91 	struct list_head list;
92 };
93 
94 /* Linked list of all ubiblock instances */
95 static LIST_HEAD(ubiblock_devices);
96 static DEFINE_MUTEX(devices_mutex);
97 static int ubiblock_major;
98 
99 static int __init ubiblock_set_param(const char *val,
100 				     const struct kernel_param *kp)
101 {
102 	int i, ret;
103 	size_t len;
104 	struct ubiblock_param *param;
105 	char buf[UBIBLOCK_PARAM_LEN];
106 	char *pbuf = &buf[0];
107 	char *tokens[UBIBLOCK_PARAM_COUNT];
108 
109 	if (!val)
110 		return -EINVAL;
111 
112 	len = strnlen(val, UBIBLOCK_PARAM_LEN);
113 	if (len == 0) {
114 		ubi_warn("block: empty 'block=' parameter - ignored\n");
115 		return 0;
116 	}
117 
118 	if (len == UBIBLOCK_PARAM_LEN) {
119 		ubi_err("block: parameter \"%s\" is too long, max. is %d\n",
120 			val, UBIBLOCK_PARAM_LEN);
121 		return -EINVAL;
122 	}
123 
124 	strcpy(buf, val);
125 
126 	/* Get rid of the final newline */
127 	if (buf[len - 1] == '\n')
128 		buf[len - 1] = '\0';
129 
130 	for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
131 		tokens[i] = strsep(&pbuf, ",");
132 
133 	param = &ubiblock_param[ubiblock_devs];
134 	if (tokens[1]) {
135 		/* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
136 		ret = kstrtoint(tokens[0], 10, &param->ubi_num);
137 		if (ret < 0)
138 			return -EINVAL;
139 
140 		/* Second param can be a number or a name */
141 		ret = kstrtoint(tokens[1], 10, &param->vol_id);
142 		if (ret < 0) {
143 			param->vol_id = -1;
144 			strcpy(param->name, tokens[1]);
145 		}
146 
147 	} else {
148 		/* One parameter: must be device path */
149 		strcpy(param->name, tokens[0]);
150 		param->ubi_num = -1;
151 		param->vol_id = -1;
152 	}
153 
154 	ubiblock_devs++;
155 
156 	return 0;
157 }
158 
159 static struct kernel_param_ops ubiblock_param_ops = {
160 	.set    = ubiblock_set_param,
161 };
162 module_param_cb(block, &ubiblock_param_ops, NULL, 0);
163 MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
164 			"Multiple \"block\" parameters may be specified.\n"
165 			"UBI volumes may be specified by their number, name, or path to the device node.\n"
166 			"Examples\n"
167 			"Using the UBI volume path:\n"
168 			"ubi.block=/dev/ubi0_0\n"
169 			"Using the UBI device, and the volume name:\n"
170 			"ubi.block=0,rootfs\n"
171 			"Using both UBI device number and UBI volume number:\n"
172 			"ubi.block=0,0\n");
173 
174 static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
175 {
176 	struct ubiblock *dev;
177 
178 	list_for_each_entry(dev, &ubiblock_devices, list)
179 		if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
180 			return dev;
181 	return NULL;
182 }
183 
184 static int ubiblock_read_to_buf(struct ubiblock *dev, char *buffer,
185 				int leb, int offset, int len)
186 {
187 	int ret;
188 
189 	ret = ubi_read(dev->desc, leb, buffer, offset, len);
190 	if (ret) {
191 		ubi_err("%s: error %d while reading from LEB %d (offset %d, "
192 		        "length %d)", dev->gd->disk_name, ret, leb, offset,
193 			len);
194 		return ret;
195 	}
196 	return 0;
197 }
198 
199 static int ubiblock_read(struct ubiblock *dev, char *buffer,
200 			 sector_t sec, int len)
201 {
202 	int ret, leb, offset;
203 	int bytes_left = len;
204 	int to_read = len;
205 	u64 pos = sec << 9;
206 
207 	/* Get LEB:offset address to read from */
208 	offset = do_div(pos, dev->leb_size);
209 	leb = pos;
210 
211 	while (bytes_left) {
212 		/*
213 		 * We can only read one LEB at a time. Therefore if the read
214 		 * length is larger than one LEB size, we split the operation.
215 		 */
216 		if (offset + to_read > dev->leb_size)
217 			to_read = dev->leb_size - offset;
218 
219 		ret = ubiblock_read_to_buf(dev, buffer, leb, offset, to_read);
220 		if (ret)
221 			return ret;
222 
223 		buffer += to_read;
224 		bytes_left -= to_read;
225 		to_read = bytes_left;
226 		leb += 1;
227 		offset = 0;
228 	}
229 	return 0;
230 }
231 
232 static int do_ubiblock_request(struct ubiblock *dev, struct request *req)
233 {
234 	int len, ret;
235 	sector_t sec;
236 
237 	if (req->cmd_type != REQ_TYPE_FS)
238 		return -EIO;
239 
240 	if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
241 	    get_capacity(req->rq_disk))
242 		return -EIO;
243 
244 	if (rq_data_dir(req) != READ)
245 		return -ENOSYS; /* Write not implemented */
246 
247 	sec = blk_rq_pos(req);
248 	len = blk_rq_cur_bytes(req);
249 
250 	/*
251 	 * Let's prevent the device from being removed while we're doing I/O
252 	 * work. Notice that this means we serialize all the I/O operations,
253 	 * but it's probably of no impact given the NAND core serializes
254 	 * flash access anyway.
255 	 */
256 	mutex_lock(&dev->dev_mutex);
257 	ret = ubiblock_read(dev, bio_data(req->bio), sec, len);
258 	mutex_unlock(&dev->dev_mutex);
259 
260 	return ret;
261 }
262 
263 static void ubiblock_do_work(struct work_struct *work)
264 {
265 	struct ubiblock *dev =
266 		container_of(work, struct ubiblock, work);
267 	struct request_queue *rq = dev->rq;
268 	struct request *req;
269 	int res;
270 
271 	spin_lock_irq(rq->queue_lock);
272 
273 	req = blk_fetch_request(rq);
274 	while (req) {
275 
276 		spin_unlock_irq(rq->queue_lock);
277 		res = do_ubiblock_request(dev, req);
278 		spin_lock_irq(rq->queue_lock);
279 
280 		/*
281 		 * If we're done with this request,
282 		 * we need to fetch a new one
283 		 */
284 		if (!__blk_end_request_cur(req, res))
285 			req = blk_fetch_request(rq);
286 	}
287 
288 	spin_unlock_irq(rq->queue_lock);
289 }
290 
291 static void ubiblock_request(struct request_queue *rq)
292 {
293 	struct ubiblock *dev;
294 	struct request *req;
295 
296 	dev = rq->queuedata;
297 
298 	if (!dev)
299 		while ((req = blk_fetch_request(rq)) != NULL)
300 			__blk_end_request_all(req, -ENODEV);
301 	else
302 		queue_work(dev->wq, &dev->work);
303 }
304 
305 static int ubiblock_open(struct block_device *bdev, fmode_t mode)
306 {
307 	struct ubiblock *dev = bdev->bd_disk->private_data;
308 	int ret;
309 
310 	mutex_lock(&dev->dev_mutex);
311 	if (dev->refcnt > 0) {
312 		/*
313 		 * The volume is already open, just increase the reference
314 		 * counter.
315 		 */
316 		goto out_done;
317 	}
318 
319 	/*
320 	 * We want users to be aware they should only mount us as read-only.
321 	 * It's just a paranoid check, as write requests will get rejected
322 	 * in any case.
323 	 */
324 	if (mode & FMODE_WRITE) {
325 		ret = -EPERM;
326 		goto out_unlock;
327 	}
328 
329 	dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
330 	if (IS_ERR(dev->desc)) {
331 		ubi_err("%s failed to open ubi volume %d_%d",
332 			dev->gd->disk_name, dev->ubi_num, dev->vol_id);
333 		ret = PTR_ERR(dev->desc);
334 		dev->desc = NULL;
335 		goto out_unlock;
336 	}
337 
338 out_done:
339 	dev->refcnt++;
340 	mutex_unlock(&dev->dev_mutex);
341 	return 0;
342 
343 out_unlock:
344 	mutex_unlock(&dev->dev_mutex);
345 	return ret;
346 }
347 
348 static void ubiblock_release(struct gendisk *gd, fmode_t mode)
349 {
350 	struct ubiblock *dev = gd->private_data;
351 
352 	mutex_lock(&dev->dev_mutex);
353 	dev->refcnt--;
354 	if (dev->refcnt == 0) {
355 		ubi_close_volume(dev->desc);
356 		dev->desc = NULL;
357 	}
358 	mutex_unlock(&dev->dev_mutex);
359 }
360 
361 static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
362 {
363 	/* Some tools might require this information */
364 	geo->heads = 1;
365 	geo->cylinders = 1;
366 	geo->sectors = get_capacity(bdev->bd_disk);
367 	geo->start = 0;
368 	return 0;
369 }
370 
371 static const struct block_device_operations ubiblock_ops = {
372 	.owner = THIS_MODULE,
373 	.open = ubiblock_open,
374 	.release = ubiblock_release,
375 	.getgeo	= ubiblock_getgeo,
376 };
377 
378 int ubiblock_create(struct ubi_volume_info *vi)
379 {
380 	struct ubiblock *dev;
381 	struct gendisk *gd;
382 	u64 disk_capacity = vi->used_bytes >> 9;
383 	int ret;
384 
385 	if ((sector_t)disk_capacity != disk_capacity)
386 		return -EFBIG;
387 	/* Check that the volume isn't already handled */
388 	mutex_lock(&devices_mutex);
389 	if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
390 		mutex_unlock(&devices_mutex);
391 		return -EEXIST;
392 	}
393 	mutex_unlock(&devices_mutex);
394 
395 	dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
396 	if (!dev)
397 		return -ENOMEM;
398 
399 	mutex_init(&dev->dev_mutex);
400 
401 	dev->ubi_num = vi->ubi_num;
402 	dev->vol_id = vi->vol_id;
403 	dev->leb_size = vi->usable_leb_size;
404 
405 	/* Initialize the gendisk of this ubiblock device */
406 	gd = alloc_disk(1);
407 	if (!gd) {
408 		ubi_err("block: alloc_disk failed");
409 		ret = -ENODEV;
410 		goto out_free_dev;
411 	}
412 
413 	gd->fops = &ubiblock_ops;
414 	gd->major = ubiblock_major;
415 	gd->first_minor = dev->ubi_num * UBI_MAX_VOLUMES + dev->vol_id;
416 	gd->private_data = dev;
417 	sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
418 	set_capacity(gd, disk_capacity);
419 	dev->gd = gd;
420 
421 	spin_lock_init(&dev->queue_lock);
422 	dev->rq = blk_init_queue(ubiblock_request, &dev->queue_lock);
423 	if (!dev->rq) {
424 		ubi_err("block: blk_init_queue failed");
425 		ret = -ENODEV;
426 		goto out_put_disk;
427 	}
428 
429 	dev->rq->queuedata = dev;
430 	dev->gd->queue = dev->rq;
431 
432 	/*
433 	 * Create one workqueue per volume (per registered block device).
434 	 * Rembember workqueues are cheap, they're not threads.
435 	 */
436 	dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
437 	if (!dev->wq) {
438 		ret = -ENOMEM;
439 		goto out_free_queue;
440 	}
441 	INIT_WORK(&dev->work, ubiblock_do_work);
442 
443 	mutex_lock(&devices_mutex);
444 	list_add_tail(&dev->list, &ubiblock_devices);
445 	mutex_unlock(&devices_mutex);
446 
447 	/* Must be the last step: anyone can call file ops from now on */
448 	add_disk(dev->gd);
449 	ubi_msg("%s created from ubi%d:%d(%s)",
450 		dev->gd->disk_name, dev->ubi_num, dev->vol_id, vi->name);
451 	return 0;
452 
453 out_free_queue:
454 	blk_cleanup_queue(dev->rq);
455 out_put_disk:
456 	put_disk(dev->gd);
457 out_free_dev:
458 	kfree(dev);
459 
460 	return ret;
461 }
462 
463 static void ubiblock_cleanup(struct ubiblock *dev)
464 {
465 	del_gendisk(dev->gd);
466 	blk_cleanup_queue(dev->rq);
467 	ubi_msg("%s released", dev->gd->disk_name);
468 	put_disk(dev->gd);
469 }
470 
471 int ubiblock_remove(struct ubi_volume_info *vi)
472 {
473 	struct ubiblock *dev;
474 
475 	mutex_lock(&devices_mutex);
476 	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
477 	if (!dev) {
478 		mutex_unlock(&devices_mutex);
479 		return -ENODEV;
480 	}
481 
482 	/* Found a device, let's lock it so we can check if it's busy */
483 	mutex_lock(&dev->dev_mutex);
484 	if (dev->refcnt > 0) {
485 		mutex_unlock(&dev->dev_mutex);
486 		mutex_unlock(&devices_mutex);
487 		return -EBUSY;
488 	}
489 
490 	/* Remove from device list */
491 	list_del(&dev->list);
492 	mutex_unlock(&devices_mutex);
493 
494 	/* Flush pending work and stop this workqueue */
495 	destroy_workqueue(dev->wq);
496 
497 	ubiblock_cleanup(dev);
498 	mutex_unlock(&dev->dev_mutex);
499 	kfree(dev);
500 	return 0;
501 }
502 
503 static int ubiblock_resize(struct ubi_volume_info *vi)
504 {
505 	struct ubiblock *dev;
506 	u64 disk_capacity = vi->used_bytes >> 9;
507 
508 	/*
509 	 * Need to lock the device list until we stop using the device,
510 	 * otherwise the device struct might get released in
511 	 * 'ubiblock_remove()'.
512 	 */
513 	mutex_lock(&devices_mutex);
514 	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
515 	if (!dev) {
516 		mutex_unlock(&devices_mutex);
517 		return -ENODEV;
518 	}
519 	if ((sector_t)disk_capacity != disk_capacity) {
520 		mutex_unlock(&devices_mutex);
521 		ubi_warn("%s: the volume is too big (%d LEBs), cannot resize",
522 			 dev->gd->disk_name, vi->size);
523 		return -EFBIG;
524 	}
525 
526 	mutex_lock(&dev->dev_mutex);
527 
528 	if (get_capacity(dev->gd) != disk_capacity) {
529 		set_capacity(dev->gd, disk_capacity);
530 		ubi_msg("%s resized to %lld bytes", dev->gd->disk_name,
531 			vi->used_bytes);
532 	}
533 	mutex_unlock(&dev->dev_mutex);
534 	mutex_unlock(&devices_mutex);
535 	return 0;
536 }
537 
538 static int ubiblock_notify(struct notifier_block *nb,
539 			 unsigned long notification_type, void *ns_ptr)
540 {
541 	struct ubi_notification *nt = ns_ptr;
542 
543 	switch (notification_type) {
544 	case UBI_VOLUME_ADDED:
545 		/*
546 		 * We want to enforce explicit block device creation for
547 		 * volumes, so when a volume is added we do nothing.
548 		 */
549 		break;
550 	case UBI_VOLUME_REMOVED:
551 		ubiblock_remove(&nt->vi);
552 		break;
553 	case UBI_VOLUME_RESIZED:
554 		ubiblock_resize(&nt->vi);
555 		break;
556 	case UBI_VOLUME_UPDATED:
557 		/*
558 		 * If the volume is static, a content update might mean the
559 		 * size (i.e. used_bytes) was also changed.
560 		 */
561 		if (nt->vi.vol_type == UBI_STATIC_VOLUME)
562 			ubiblock_resize(&nt->vi);
563 		break;
564 	default:
565 		break;
566 	}
567 	return NOTIFY_OK;
568 }
569 
570 static struct notifier_block ubiblock_notifier = {
571 	.notifier_call = ubiblock_notify,
572 };
573 
574 static struct ubi_volume_desc * __init
575 open_volume_desc(const char *name, int ubi_num, int vol_id)
576 {
577 	if (ubi_num == -1)
578 		/* No ubi num, name must be a vol device path */
579 		return ubi_open_volume_path(name, UBI_READONLY);
580 	else if (vol_id == -1)
581 		/* No vol_id, must be vol_name */
582 		return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
583 	else
584 		return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
585 }
586 
587 static int __init ubiblock_create_from_param(void)
588 {
589 	int i, ret;
590 	struct ubiblock_param *p;
591 	struct ubi_volume_desc *desc;
592 	struct ubi_volume_info vi;
593 
594 	for (i = 0; i < ubiblock_devs; i++) {
595 		p = &ubiblock_param[i];
596 
597 		desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
598 		if (IS_ERR(desc)) {
599 			ubi_err("block: can't open volume, err=%ld\n",
600 				PTR_ERR(desc));
601 			ret = PTR_ERR(desc);
602 			break;
603 		}
604 
605 		ubi_get_volume_info(desc, &vi);
606 		ubi_close_volume(desc);
607 
608 		ret = ubiblock_create(&vi);
609 		if (ret) {
610 			ubi_err("block: can't add '%s' volume, err=%d\n",
611 				vi.name, ret);
612 			break;
613 		}
614 	}
615 	return ret;
616 }
617 
618 static void ubiblock_remove_all(void)
619 {
620 	struct ubiblock *next;
621 	struct ubiblock *dev;
622 
623 	list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
624 		/* Flush pending work and stop workqueue */
625 		destroy_workqueue(dev->wq);
626 		/* The module is being forcefully removed */
627 		WARN_ON(dev->desc);
628 		/* Remove from device list */
629 		list_del(&dev->list);
630 		ubiblock_cleanup(dev);
631 		kfree(dev);
632 	}
633 }
634 
635 int __init ubiblock_init(void)
636 {
637 	int ret;
638 
639 	ubiblock_major = register_blkdev(0, "ubiblock");
640 	if (ubiblock_major < 0)
641 		return ubiblock_major;
642 
643 	/* Attach block devices from 'block=' module param */
644 	ret = ubiblock_create_from_param();
645 	if (ret)
646 		goto err_remove;
647 
648 	/*
649 	 * Block devices are only created upon user requests, so we ignore
650 	 * existing volumes.
651 	 */
652 	ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
653 	if (ret)
654 		goto err_unreg;
655 	return 0;
656 
657 err_unreg:
658 	unregister_blkdev(ubiblock_major, "ubiblock");
659 err_remove:
660 	ubiblock_remove_all();
661 	return ret;
662 }
663 
664 void __exit ubiblock_exit(void)
665 {
666 	ubi_unregister_volume_notifier(&ubiblock_notifier);
667 	ubiblock_remove_all();
668 	unregister_blkdev(ubiblock_major, "ubiblock");
669 }
670