xref: /openbmc/linux/drivers/mtd/ubi/block.c (revision d2999e1b)
1 /*
2  * Copyright (c) 2014 Ezequiel Garcia
3  * Copyright (c) 2011 Free Electrons
4  *
5  * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
6  *   Copyright (c) International Business Machines Corp., 2006
7  *   Copyright (c) Nokia Corporation, 2007
8  *   Authors: Artem Bityutskiy, Frank Haverkamp
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation, version 2.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17  * the GNU General Public License for more details.
18  */
19 
20 /*
21  * Read-only block devices on top of UBI volumes
22  *
23  * A simple implementation to allow a block device to be layered on top of a
24  * UBI volume. The implementation is provided by creating a static 1-to-1
25  * mapping between the block device and the UBI volume.
26  *
27  * The addressed byte is obtained from the addressed block sector, which is
28  * mapped linearly into the corresponding LEB:
29  *
30  *   LEB number = addressed byte / LEB size
31  *
32  * This feature is compiled in the UBI core, and adds a 'block' parameter
33  * to allow early creation of block devices on top of UBI volumes. Runtime
34  * block creation/removal for UBI volumes is provided through two UBI ioctls:
35  * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
36  */
37 
38 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/err.h>
41 #include <linux/kernel.h>
42 #include <linux/list.h>
43 #include <linux/mutex.h>
44 #include <linux/slab.h>
45 #include <linux/vmalloc.h>
46 #include <linux/mtd/ubi.h>
47 #include <linux/workqueue.h>
48 #include <linux/blkdev.h>
49 #include <linux/hdreg.h>
50 #include <asm/div64.h>
51 
52 #include "ubi-media.h"
53 #include "ubi.h"
54 
55 /* Maximum number of supported devices */
56 #define UBIBLOCK_MAX_DEVICES 32
57 
58 /* Maximum length of the 'block=' parameter */
59 #define UBIBLOCK_PARAM_LEN 63
60 
61 /* Maximum number of comma-separated items in the 'block=' parameter */
62 #define UBIBLOCK_PARAM_COUNT 2
63 
64 struct ubiblock_param {
65 	int ubi_num;
66 	int vol_id;
67 	char name[UBIBLOCK_PARAM_LEN+1];
68 };
69 
70 /* Numbers of elements set in the @ubiblock_param array */
71 static int ubiblock_devs __initdata;
72 
73 /* MTD devices specification parameters */
74 static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
75 
76 struct ubiblock {
77 	struct ubi_volume_desc *desc;
78 	int ubi_num;
79 	int vol_id;
80 	int refcnt;
81 	int leb_size;
82 
83 	struct gendisk *gd;
84 	struct request_queue *rq;
85 
86 	struct workqueue_struct *wq;
87 	struct work_struct work;
88 
89 	struct mutex dev_mutex;
90 	spinlock_t queue_lock;
91 	struct list_head list;
92 };
93 
94 /* Linked list of all ubiblock instances */
95 static LIST_HEAD(ubiblock_devices);
96 static DEFINE_MUTEX(devices_mutex);
97 static int ubiblock_major;
98 
99 static int __init ubiblock_set_param(const char *val,
100 				     const struct kernel_param *kp)
101 {
102 	int i, ret;
103 	size_t len;
104 	struct ubiblock_param *param;
105 	char buf[UBIBLOCK_PARAM_LEN];
106 	char *pbuf = &buf[0];
107 	char *tokens[UBIBLOCK_PARAM_COUNT];
108 
109 	if (!val)
110 		return -EINVAL;
111 
112 	len = strnlen(val, UBIBLOCK_PARAM_LEN);
113 	if (len == 0) {
114 		ubi_warn("block: empty 'block=' parameter - ignored\n");
115 		return 0;
116 	}
117 
118 	if (len == UBIBLOCK_PARAM_LEN) {
119 		ubi_err("block: parameter \"%s\" is too long, max. is %d\n",
120 			val, UBIBLOCK_PARAM_LEN);
121 		return -EINVAL;
122 	}
123 
124 	strcpy(buf, val);
125 
126 	/* Get rid of the final newline */
127 	if (buf[len - 1] == '\n')
128 		buf[len - 1] = '\0';
129 
130 	for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
131 		tokens[i] = strsep(&pbuf, ",");
132 
133 	param = &ubiblock_param[ubiblock_devs];
134 	if (tokens[1]) {
135 		/* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
136 		ret = kstrtoint(tokens[0], 10, &param->ubi_num);
137 		if (ret < 0)
138 			return -EINVAL;
139 
140 		/* Second param can be a number or a name */
141 		ret = kstrtoint(tokens[1], 10, &param->vol_id);
142 		if (ret < 0) {
143 			param->vol_id = -1;
144 			strcpy(param->name, tokens[1]);
145 		}
146 
147 	} else {
148 		/* One parameter: must be device path */
149 		strcpy(param->name, tokens[0]);
150 		param->ubi_num = -1;
151 		param->vol_id = -1;
152 	}
153 
154 	ubiblock_devs++;
155 
156 	return 0;
157 }
158 
159 static struct kernel_param_ops ubiblock_param_ops = {
160 	.set    = ubiblock_set_param,
161 };
162 module_param_cb(block, &ubiblock_param_ops, NULL, 0);
163 MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
164 			"Multiple \"block\" parameters may be specified.\n"
165 			"UBI volumes may be specified by their number, name, or path to the device node.\n"
166 			"Examples\n"
167 			"Using the UBI volume path:\n"
168 			"ubi.block=/dev/ubi0_0\n"
169 			"Using the UBI device, and the volume name:\n"
170 			"ubi.block=0,rootfs\n"
171 			"Using both UBI device number and UBI volume number:\n"
172 			"ubi.block=0,0\n");
173 
174 static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
175 {
176 	struct ubiblock *dev;
177 
178 	list_for_each_entry(dev, &ubiblock_devices, list)
179 		if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
180 			return dev;
181 	return NULL;
182 }
183 
184 static int ubiblock_read_to_buf(struct ubiblock *dev, char *buffer,
185 				int leb, int offset, int len)
186 {
187 	int ret;
188 
189 	ret = ubi_read(dev->desc, leb, buffer, offset, len);
190 	if (ret) {
191 		ubi_err("%s ubi_read error %d",
192 			dev->gd->disk_name, ret);
193 		return ret;
194 	}
195 	return 0;
196 }
197 
198 static int ubiblock_read(struct ubiblock *dev, char *buffer,
199 			 sector_t sec, int len)
200 {
201 	int ret, leb, offset;
202 	int bytes_left = len;
203 	int to_read = len;
204 	u64 pos = sec << 9;
205 
206 	/* Get LEB:offset address to read from */
207 	offset = do_div(pos, dev->leb_size);
208 	leb = pos;
209 
210 	while (bytes_left) {
211 		/*
212 		 * We can only read one LEB at a time. Therefore if the read
213 		 * length is larger than one LEB size, we split the operation.
214 		 */
215 		if (offset + to_read > dev->leb_size)
216 			to_read = dev->leb_size - offset;
217 
218 		ret = ubiblock_read_to_buf(dev, buffer, leb, offset, to_read);
219 		if (ret)
220 			return ret;
221 
222 		buffer += to_read;
223 		bytes_left -= to_read;
224 		to_read = bytes_left;
225 		leb += 1;
226 		offset = 0;
227 	}
228 	return 0;
229 }
230 
231 static int do_ubiblock_request(struct ubiblock *dev, struct request *req)
232 {
233 	int len, ret;
234 	sector_t sec;
235 
236 	if (req->cmd_type != REQ_TYPE_FS)
237 		return -EIO;
238 
239 	if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
240 	    get_capacity(req->rq_disk))
241 		return -EIO;
242 
243 	if (rq_data_dir(req) != READ)
244 		return -ENOSYS; /* Write not implemented */
245 
246 	sec = blk_rq_pos(req);
247 	len = blk_rq_cur_bytes(req);
248 
249 	/*
250 	 * Let's prevent the device from being removed while we're doing I/O
251 	 * work. Notice that this means we serialize all the I/O operations,
252 	 * but it's probably of no impact given the NAND core serializes
253 	 * flash access anyway.
254 	 */
255 	mutex_lock(&dev->dev_mutex);
256 	ret = ubiblock_read(dev, bio_data(req->bio), sec, len);
257 	mutex_unlock(&dev->dev_mutex);
258 
259 	return ret;
260 }
261 
262 static void ubiblock_do_work(struct work_struct *work)
263 {
264 	struct ubiblock *dev =
265 		container_of(work, struct ubiblock, work);
266 	struct request_queue *rq = dev->rq;
267 	struct request *req;
268 	int res;
269 
270 	spin_lock_irq(rq->queue_lock);
271 
272 	req = blk_fetch_request(rq);
273 	while (req) {
274 
275 		spin_unlock_irq(rq->queue_lock);
276 		res = do_ubiblock_request(dev, req);
277 		spin_lock_irq(rq->queue_lock);
278 
279 		/*
280 		 * If we're done with this request,
281 		 * we need to fetch a new one
282 		 */
283 		if (!__blk_end_request_cur(req, res))
284 			req = blk_fetch_request(rq);
285 	}
286 
287 	spin_unlock_irq(rq->queue_lock);
288 }
289 
290 static void ubiblock_request(struct request_queue *rq)
291 {
292 	struct ubiblock *dev;
293 	struct request *req;
294 
295 	dev = rq->queuedata;
296 
297 	if (!dev)
298 		while ((req = blk_fetch_request(rq)) != NULL)
299 			__blk_end_request_all(req, -ENODEV);
300 	else
301 		queue_work(dev->wq, &dev->work);
302 }
303 
304 static int ubiblock_open(struct block_device *bdev, fmode_t mode)
305 {
306 	struct ubiblock *dev = bdev->bd_disk->private_data;
307 	int ret;
308 
309 	mutex_lock(&dev->dev_mutex);
310 	if (dev->refcnt > 0) {
311 		/*
312 		 * The volume is already open, just increase the reference
313 		 * counter.
314 		 */
315 		goto out_done;
316 	}
317 
318 	/*
319 	 * We want users to be aware they should only mount us as read-only.
320 	 * It's just a paranoid check, as write requests will get rejected
321 	 * in any case.
322 	 */
323 	if (mode & FMODE_WRITE) {
324 		ret = -EPERM;
325 		goto out_unlock;
326 	}
327 
328 	dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
329 	if (IS_ERR(dev->desc)) {
330 		ubi_err("%s failed to open ubi volume %d_%d",
331 			dev->gd->disk_name, dev->ubi_num, dev->vol_id);
332 		ret = PTR_ERR(dev->desc);
333 		dev->desc = NULL;
334 		goto out_unlock;
335 	}
336 
337 out_done:
338 	dev->refcnt++;
339 	mutex_unlock(&dev->dev_mutex);
340 	return 0;
341 
342 out_unlock:
343 	mutex_unlock(&dev->dev_mutex);
344 	return ret;
345 }
346 
347 static void ubiblock_release(struct gendisk *gd, fmode_t mode)
348 {
349 	struct ubiblock *dev = gd->private_data;
350 
351 	mutex_lock(&dev->dev_mutex);
352 	dev->refcnt--;
353 	if (dev->refcnt == 0) {
354 		ubi_close_volume(dev->desc);
355 		dev->desc = NULL;
356 	}
357 	mutex_unlock(&dev->dev_mutex);
358 }
359 
360 static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
361 {
362 	/* Some tools might require this information */
363 	geo->heads = 1;
364 	geo->cylinders = 1;
365 	geo->sectors = get_capacity(bdev->bd_disk);
366 	geo->start = 0;
367 	return 0;
368 }
369 
370 static const struct block_device_operations ubiblock_ops = {
371 	.owner = THIS_MODULE,
372 	.open = ubiblock_open,
373 	.release = ubiblock_release,
374 	.getgeo	= ubiblock_getgeo,
375 };
376 
377 int ubiblock_create(struct ubi_volume_info *vi)
378 {
379 	struct ubiblock *dev;
380 	struct gendisk *gd;
381 	int disk_capacity;
382 	int ret;
383 
384 	/* Check that the volume isn't already handled */
385 	mutex_lock(&devices_mutex);
386 	if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
387 		mutex_unlock(&devices_mutex);
388 		return -EEXIST;
389 	}
390 	mutex_unlock(&devices_mutex);
391 
392 	dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
393 	if (!dev)
394 		return -ENOMEM;
395 
396 	mutex_init(&dev->dev_mutex);
397 
398 	dev->ubi_num = vi->ubi_num;
399 	dev->vol_id = vi->vol_id;
400 	dev->leb_size = vi->usable_leb_size;
401 
402 	/* Initialize the gendisk of this ubiblock device */
403 	gd = alloc_disk(1);
404 	if (!gd) {
405 		ubi_err("block: alloc_disk failed");
406 		ret = -ENODEV;
407 		goto out_free_dev;
408 	}
409 
410 	gd->fops = &ubiblock_ops;
411 	gd->major = ubiblock_major;
412 	gd->first_minor = dev->ubi_num * UBI_MAX_VOLUMES + dev->vol_id;
413 	gd->private_data = dev;
414 	sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
415 	disk_capacity = (vi->size * vi->usable_leb_size) >> 9;
416 	set_capacity(gd, disk_capacity);
417 	dev->gd = gd;
418 
419 	spin_lock_init(&dev->queue_lock);
420 	dev->rq = blk_init_queue(ubiblock_request, &dev->queue_lock);
421 	if (!dev->rq) {
422 		ubi_err("block: blk_init_queue failed");
423 		ret = -ENODEV;
424 		goto out_put_disk;
425 	}
426 
427 	dev->rq->queuedata = dev;
428 	dev->gd->queue = dev->rq;
429 
430 	/*
431 	 * Create one workqueue per volume (per registered block device).
432 	 * Rembember workqueues are cheap, they're not threads.
433 	 */
434 	dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
435 	if (!dev->wq) {
436 		ret = -ENOMEM;
437 		goto out_free_queue;
438 	}
439 	INIT_WORK(&dev->work, ubiblock_do_work);
440 
441 	mutex_lock(&devices_mutex);
442 	list_add_tail(&dev->list, &ubiblock_devices);
443 	mutex_unlock(&devices_mutex);
444 
445 	/* Must be the last step: anyone can call file ops from now on */
446 	add_disk(dev->gd);
447 	ubi_msg("%s created from ubi%d:%d(%s)",
448 		dev->gd->disk_name, dev->ubi_num, dev->vol_id, vi->name);
449 	return 0;
450 
451 out_free_queue:
452 	blk_cleanup_queue(dev->rq);
453 out_put_disk:
454 	put_disk(dev->gd);
455 out_free_dev:
456 	kfree(dev);
457 
458 	return ret;
459 }
460 
461 static void ubiblock_cleanup(struct ubiblock *dev)
462 {
463 	del_gendisk(dev->gd);
464 	blk_cleanup_queue(dev->rq);
465 	ubi_msg("%s released", dev->gd->disk_name);
466 	put_disk(dev->gd);
467 }
468 
469 int ubiblock_remove(struct ubi_volume_info *vi)
470 {
471 	struct ubiblock *dev;
472 
473 	mutex_lock(&devices_mutex);
474 	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
475 	if (!dev) {
476 		mutex_unlock(&devices_mutex);
477 		return -ENODEV;
478 	}
479 
480 	/* Found a device, let's lock it so we can check if it's busy */
481 	mutex_lock(&dev->dev_mutex);
482 	if (dev->refcnt > 0) {
483 		mutex_unlock(&dev->dev_mutex);
484 		mutex_unlock(&devices_mutex);
485 		return -EBUSY;
486 	}
487 
488 	/* Remove from device list */
489 	list_del(&dev->list);
490 	mutex_unlock(&devices_mutex);
491 
492 	/* Flush pending work and stop this workqueue */
493 	destroy_workqueue(dev->wq);
494 
495 	ubiblock_cleanup(dev);
496 	mutex_unlock(&dev->dev_mutex);
497 	kfree(dev);
498 	return 0;
499 }
500 
501 static void ubiblock_resize(struct ubi_volume_info *vi)
502 {
503 	struct ubiblock *dev;
504 	int disk_capacity;
505 
506 	/*
507 	 * Need to lock the device list until we stop using the device,
508 	 * otherwise the device struct might get released in
509 	 * 'ubiblock_remove()'.
510 	 */
511 	mutex_lock(&devices_mutex);
512 	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
513 	if (!dev) {
514 		mutex_unlock(&devices_mutex);
515 		return;
516 	}
517 
518 	mutex_lock(&dev->dev_mutex);
519 	disk_capacity = (vi->size * vi->usable_leb_size) >> 9;
520 	set_capacity(dev->gd, disk_capacity);
521 	ubi_msg("%s resized to %d LEBs", dev->gd->disk_name, vi->size);
522 	mutex_unlock(&dev->dev_mutex);
523 	mutex_unlock(&devices_mutex);
524 }
525 
526 static int ubiblock_notify(struct notifier_block *nb,
527 			 unsigned long notification_type, void *ns_ptr)
528 {
529 	struct ubi_notification *nt = ns_ptr;
530 
531 	switch (notification_type) {
532 	case UBI_VOLUME_ADDED:
533 		/*
534 		 * We want to enforce explicit block device creation for
535 		 * volumes, so when a volume is added we do nothing.
536 		 */
537 		break;
538 	case UBI_VOLUME_REMOVED:
539 		ubiblock_remove(&nt->vi);
540 		break;
541 	case UBI_VOLUME_RESIZED:
542 		ubiblock_resize(&nt->vi);
543 		break;
544 	default:
545 		break;
546 	}
547 	return NOTIFY_OK;
548 }
549 
550 static struct notifier_block ubiblock_notifier = {
551 	.notifier_call = ubiblock_notify,
552 };
553 
554 static struct ubi_volume_desc * __init
555 open_volume_desc(const char *name, int ubi_num, int vol_id)
556 {
557 	if (ubi_num == -1)
558 		/* No ubi num, name must be a vol device path */
559 		return ubi_open_volume_path(name, UBI_READONLY);
560 	else if (vol_id == -1)
561 		/* No vol_id, must be vol_name */
562 		return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
563 	else
564 		return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
565 }
566 
567 static int __init ubiblock_create_from_param(void)
568 {
569 	int i, ret;
570 	struct ubiblock_param *p;
571 	struct ubi_volume_desc *desc;
572 	struct ubi_volume_info vi;
573 
574 	for (i = 0; i < ubiblock_devs; i++) {
575 		p = &ubiblock_param[i];
576 
577 		desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
578 		if (IS_ERR(desc)) {
579 			ubi_err("block: can't open volume, err=%ld\n",
580 				PTR_ERR(desc));
581 			ret = PTR_ERR(desc);
582 			break;
583 		}
584 
585 		ubi_get_volume_info(desc, &vi);
586 		ubi_close_volume(desc);
587 
588 		ret = ubiblock_create(&vi);
589 		if (ret) {
590 			ubi_err("block: can't add '%s' volume, err=%d\n",
591 				vi.name, ret);
592 			break;
593 		}
594 	}
595 	return ret;
596 }
597 
598 static void ubiblock_remove_all(void)
599 {
600 	struct ubiblock *next;
601 	struct ubiblock *dev;
602 
603 	list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
604 		/* Flush pending work and stop workqueue */
605 		destroy_workqueue(dev->wq);
606 		/* The module is being forcefully removed */
607 		WARN_ON(dev->desc);
608 		/* Remove from device list */
609 		list_del(&dev->list);
610 		ubiblock_cleanup(dev);
611 		kfree(dev);
612 	}
613 }
614 
615 int __init ubiblock_init(void)
616 {
617 	int ret;
618 
619 	ubiblock_major = register_blkdev(0, "ubiblock");
620 	if (ubiblock_major < 0)
621 		return ubiblock_major;
622 
623 	/* Attach block devices from 'block=' module param */
624 	ret = ubiblock_create_from_param();
625 	if (ret)
626 		goto err_remove;
627 
628 	/*
629 	 * Block devices are only created upon user requests, so we ignore
630 	 * existing volumes.
631 	 */
632 	ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
633 	if (ret)
634 		goto err_unreg;
635 	return 0;
636 
637 err_unreg:
638 	unregister_blkdev(ubiblock_major, "ubiblock");
639 err_remove:
640 	ubiblock_remove_all();
641 	return ret;
642 }
643 
644 void __exit ubiblock_exit(void)
645 {
646 	ubi_unregister_volume_notifier(&ubiblock_notifier);
647 	ubiblock_remove_all();
648 	unregister_blkdev(ubiblock_major, "ubiblock");
649 }
650