xref: /openbmc/linux/drivers/mtd/ubi/block.c (revision d5e7cafd)
1 /*
2  * Copyright (c) 2014 Ezequiel Garcia
3  * Copyright (c) 2011 Free Electrons
4  *
5  * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
6  *   Copyright (c) International Business Machines Corp., 2006
7  *   Copyright (c) Nokia Corporation, 2007
8  *   Authors: Artem Bityutskiy, Frank Haverkamp
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation, version 2.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17  * the GNU General Public License for more details.
18  */
19 
20 /*
21  * Read-only block devices on top of UBI volumes
22  *
23  * A simple implementation to allow a block device to be layered on top of a
24  * UBI volume. The implementation is provided by creating a static 1-to-1
25  * mapping between the block device and the UBI volume.
26  *
27  * The addressed byte is obtained from the addressed block sector, which is
28  * mapped linearly into the corresponding LEB:
29  *
30  *   LEB number = addressed byte / LEB size
31  *
32  * This feature is compiled in the UBI core, and adds a 'block' parameter
33  * to allow early creation of block devices on top of UBI volumes. Runtime
34  * block creation/removal for UBI volumes is provided through two UBI ioctls:
35  * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
36  */
37 
38 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/err.h>
41 #include <linux/kernel.h>
42 #include <linux/list.h>
43 #include <linux/mutex.h>
44 #include <linux/slab.h>
45 #include <linux/mtd/ubi.h>
46 #include <linux/workqueue.h>
47 #include <linux/blkdev.h>
48 #include <linux/blk-mq.h>
49 #include <linux/hdreg.h>
50 #include <linux/scatterlist.h>
51 #include <asm/div64.h>
52 
53 #include "ubi-media.h"
54 #include "ubi.h"
55 
56 /* Maximum number of supported devices */
57 #define UBIBLOCK_MAX_DEVICES 32
58 
59 /* Maximum length of the 'block=' parameter */
60 #define UBIBLOCK_PARAM_LEN 63
61 
62 /* Maximum number of comma-separated items in the 'block=' parameter */
63 #define UBIBLOCK_PARAM_COUNT 2
64 
65 struct ubiblock_param {
66 	int ubi_num;
67 	int vol_id;
68 	char name[UBIBLOCK_PARAM_LEN+1];
69 };
70 
71 struct ubiblock_pdu {
72 	struct work_struct work;
73 	struct ubi_sgl usgl;
74 };
75 
76 /* Numbers of elements set in the @ubiblock_param array */
77 static int ubiblock_devs __initdata;
78 
79 /* MTD devices specification parameters */
80 static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
81 
82 struct ubiblock {
83 	struct ubi_volume_desc *desc;
84 	int ubi_num;
85 	int vol_id;
86 	int refcnt;
87 	int leb_size;
88 
89 	struct gendisk *gd;
90 	struct request_queue *rq;
91 
92 	struct workqueue_struct *wq;
93 
94 	struct mutex dev_mutex;
95 	struct list_head list;
96 	struct blk_mq_tag_set tag_set;
97 };
98 
99 /* Linked list of all ubiblock instances */
100 static LIST_HEAD(ubiblock_devices);
101 static DEFINE_MUTEX(devices_mutex);
102 static int ubiblock_major;
103 
104 static int __init ubiblock_set_param(const char *val,
105 				     const struct kernel_param *kp)
106 {
107 	int i, ret;
108 	size_t len;
109 	struct ubiblock_param *param;
110 	char buf[UBIBLOCK_PARAM_LEN];
111 	char *pbuf = &buf[0];
112 	char *tokens[UBIBLOCK_PARAM_COUNT];
113 
114 	if (!val)
115 		return -EINVAL;
116 
117 	len = strnlen(val, UBIBLOCK_PARAM_LEN);
118 	if (len == 0) {
119 		pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
120 		return 0;
121 	}
122 
123 	if (len == UBIBLOCK_PARAM_LEN) {
124 		pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
125 		       val, UBIBLOCK_PARAM_LEN);
126 		return -EINVAL;
127 	}
128 
129 	strcpy(buf, val);
130 
131 	/* Get rid of the final newline */
132 	if (buf[len - 1] == '\n')
133 		buf[len - 1] = '\0';
134 
135 	for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
136 		tokens[i] = strsep(&pbuf, ",");
137 
138 	param = &ubiblock_param[ubiblock_devs];
139 	if (tokens[1]) {
140 		/* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
141 		ret = kstrtoint(tokens[0], 10, &param->ubi_num);
142 		if (ret < 0)
143 			return -EINVAL;
144 
145 		/* Second param can be a number or a name */
146 		ret = kstrtoint(tokens[1], 10, &param->vol_id);
147 		if (ret < 0) {
148 			param->vol_id = -1;
149 			strcpy(param->name, tokens[1]);
150 		}
151 
152 	} else {
153 		/* One parameter: must be device path */
154 		strcpy(param->name, tokens[0]);
155 		param->ubi_num = -1;
156 		param->vol_id = -1;
157 	}
158 
159 	ubiblock_devs++;
160 
161 	return 0;
162 }
163 
164 static struct kernel_param_ops ubiblock_param_ops = {
165 	.set    = ubiblock_set_param,
166 };
167 module_param_cb(block, &ubiblock_param_ops, NULL, 0);
168 MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
169 			"Multiple \"block\" parameters may be specified.\n"
170 			"UBI volumes may be specified by their number, name, or path to the device node.\n"
171 			"Examples\n"
172 			"Using the UBI volume path:\n"
173 			"ubi.block=/dev/ubi0_0\n"
174 			"Using the UBI device, and the volume name:\n"
175 			"ubi.block=0,rootfs\n"
176 			"Using both UBI device number and UBI volume number:\n"
177 			"ubi.block=0,0\n");
178 
179 static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
180 {
181 	struct ubiblock *dev;
182 
183 	list_for_each_entry(dev, &ubiblock_devices, list)
184 		if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
185 			return dev;
186 	return NULL;
187 }
188 
189 static int ubiblock_read(struct ubiblock_pdu *pdu)
190 {
191 	int ret, leb, offset, bytes_left, to_read;
192 	u64 pos;
193 	struct request *req = blk_mq_rq_from_pdu(pdu);
194 	struct ubiblock *dev = req->q->queuedata;
195 
196 	to_read = blk_rq_bytes(req);
197 	pos = blk_rq_pos(req) << 9;
198 
199 	/* Get LEB:offset address to read from */
200 	offset = do_div(pos, dev->leb_size);
201 	leb = pos;
202 	bytes_left = to_read;
203 
204 	while (bytes_left) {
205 		/*
206 		 * We can only read one LEB at a time. Therefore if the read
207 		 * length is larger than one LEB size, we split the operation.
208 		 */
209 		if (offset + to_read > dev->leb_size)
210 			to_read = dev->leb_size - offset;
211 
212 		ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
213 		if (ret < 0)
214 			return ret;
215 
216 		bytes_left -= to_read;
217 		to_read = bytes_left;
218 		leb += 1;
219 		offset = 0;
220 	}
221 	return 0;
222 }
223 
224 static int ubiblock_open(struct block_device *bdev, fmode_t mode)
225 {
226 	struct ubiblock *dev = bdev->bd_disk->private_data;
227 	int ret;
228 
229 	mutex_lock(&dev->dev_mutex);
230 	if (dev->refcnt > 0) {
231 		/*
232 		 * The volume is already open, just increase the reference
233 		 * counter.
234 		 */
235 		goto out_done;
236 	}
237 
238 	/*
239 	 * We want users to be aware they should only mount us as read-only.
240 	 * It's just a paranoid check, as write requests will get rejected
241 	 * in any case.
242 	 */
243 	if (mode & FMODE_WRITE) {
244 		ret = -EPERM;
245 		goto out_unlock;
246 	}
247 
248 	dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
249 	if (IS_ERR(dev->desc)) {
250 		dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
251 			dev->ubi_num, dev->vol_id);
252 		ret = PTR_ERR(dev->desc);
253 		dev->desc = NULL;
254 		goto out_unlock;
255 	}
256 
257 out_done:
258 	dev->refcnt++;
259 	mutex_unlock(&dev->dev_mutex);
260 	return 0;
261 
262 out_unlock:
263 	mutex_unlock(&dev->dev_mutex);
264 	return ret;
265 }
266 
267 static void ubiblock_release(struct gendisk *gd, fmode_t mode)
268 {
269 	struct ubiblock *dev = gd->private_data;
270 
271 	mutex_lock(&dev->dev_mutex);
272 	dev->refcnt--;
273 	if (dev->refcnt == 0) {
274 		ubi_close_volume(dev->desc);
275 		dev->desc = NULL;
276 	}
277 	mutex_unlock(&dev->dev_mutex);
278 }
279 
280 static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
281 {
282 	/* Some tools might require this information */
283 	geo->heads = 1;
284 	geo->cylinders = 1;
285 	geo->sectors = get_capacity(bdev->bd_disk);
286 	geo->start = 0;
287 	return 0;
288 }
289 
290 static const struct block_device_operations ubiblock_ops = {
291 	.owner = THIS_MODULE,
292 	.open = ubiblock_open,
293 	.release = ubiblock_release,
294 	.getgeo	= ubiblock_getgeo,
295 };
296 
297 static void ubiblock_do_work(struct work_struct *work)
298 {
299 	int ret;
300 	struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
301 	struct request *req = blk_mq_rq_from_pdu(pdu);
302 
303 	blk_mq_start_request(req);
304 
305 	/*
306 	 * It is safe to ignore the return value of blk_rq_map_sg() because
307 	 * the number of sg entries is limited to UBI_MAX_SG_COUNT
308 	 * and ubi_read_sg() will check that limit.
309 	 */
310 	blk_rq_map_sg(req->q, req, pdu->usgl.sg);
311 
312 	ret = ubiblock_read(pdu);
313 	blk_mq_end_request(req, ret);
314 }
315 
316 static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
317 			     const struct blk_mq_queue_data *bd)
318 {
319 	struct request *req = bd->rq;
320 	struct ubiblock *dev = hctx->queue->queuedata;
321 	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
322 
323 	if (req->cmd_type != REQ_TYPE_FS)
324 		return BLK_MQ_RQ_QUEUE_ERROR;
325 
326 	if (rq_data_dir(req) != READ)
327 		return BLK_MQ_RQ_QUEUE_ERROR; /* Write not implemented */
328 
329 	ubi_sgl_init(&pdu->usgl);
330 	queue_work(dev->wq, &pdu->work);
331 
332 	return BLK_MQ_RQ_QUEUE_OK;
333 }
334 
335 static int ubiblock_init_request(void *data, struct request *req,
336 				 unsigned int hctx_idx,
337 				 unsigned int request_idx,
338 				 unsigned int numa_node)
339 {
340 	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
341 
342 	sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
343 	INIT_WORK(&pdu->work, ubiblock_do_work);
344 
345 	return 0;
346 }
347 
348 static struct blk_mq_ops ubiblock_mq_ops = {
349 	.queue_rq       = ubiblock_queue_rq,
350 	.init_request	= ubiblock_init_request,
351 	.map_queue      = blk_mq_map_queue,
352 };
353 
354 int ubiblock_create(struct ubi_volume_info *vi)
355 {
356 	struct ubiblock *dev;
357 	struct gendisk *gd;
358 	u64 disk_capacity = vi->used_bytes >> 9;
359 	int ret;
360 
361 	if ((sector_t)disk_capacity != disk_capacity)
362 		return -EFBIG;
363 	/* Check that the volume isn't already handled */
364 	mutex_lock(&devices_mutex);
365 	if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
366 		mutex_unlock(&devices_mutex);
367 		return -EEXIST;
368 	}
369 	mutex_unlock(&devices_mutex);
370 
371 	dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
372 	if (!dev)
373 		return -ENOMEM;
374 
375 	mutex_init(&dev->dev_mutex);
376 
377 	dev->ubi_num = vi->ubi_num;
378 	dev->vol_id = vi->vol_id;
379 	dev->leb_size = vi->usable_leb_size;
380 
381 	/* Initialize the gendisk of this ubiblock device */
382 	gd = alloc_disk(1);
383 	if (!gd) {
384 		pr_err("UBI: block: alloc_disk failed");
385 		ret = -ENODEV;
386 		goto out_free_dev;
387 	}
388 
389 	gd->fops = &ubiblock_ops;
390 	gd->major = ubiblock_major;
391 	gd->first_minor = dev->ubi_num * UBI_MAX_VOLUMES + dev->vol_id;
392 	gd->private_data = dev;
393 	sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
394 	set_capacity(gd, disk_capacity);
395 	dev->gd = gd;
396 
397 	dev->tag_set.ops = &ubiblock_mq_ops;
398 	dev->tag_set.queue_depth = 64;
399 	dev->tag_set.numa_node = NUMA_NO_NODE;
400 	dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
401 	dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
402 	dev->tag_set.driver_data = dev;
403 	dev->tag_set.nr_hw_queues = 1;
404 
405 	ret = blk_mq_alloc_tag_set(&dev->tag_set);
406 	if (ret) {
407 		dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
408 		goto out_put_disk;
409 	}
410 
411 	dev->rq = blk_mq_init_queue(&dev->tag_set);
412 	if (IS_ERR(dev->rq)) {
413 		dev_err(disk_to_dev(gd), "blk_mq_init_queue failed");
414 		ret = PTR_ERR(dev->rq);
415 		goto out_free_tags;
416 	}
417 	blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
418 
419 	dev->rq->queuedata = dev;
420 	dev->gd->queue = dev->rq;
421 
422 	/*
423 	 * Create one workqueue per volume (per registered block device).
424 	 * Rembember workqueues are cheap, they're not threads.
425 	 */
426 	dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
427 	if (!dev->wq) {
428 		ret = -ENOMEM;
429 		goto out_free_queue;
430 	}
431 
432 	mutex_lock(&devices_mutex);
433 	list_add_tail(&dev->list, &ubiblock_devices);
434 	mutex_unlock(&devices_mutex);
435 
436 	/* Must be the last step: anyone can call file ops from now on */
437 	add_disk(dev->gd);
438 	dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
439 		 dev->ubi_num, dev->vol_id, vi->name);
440 	return 0;
441 
442 out_free_queue:
443 	blk_cleanup_queue(dev->rq);
444 out_free_tags:
445 	blk_mq_free_tag_set(&dev->tag_set);
446 out_put_disk:
447 	put_disk(dev->gd);
448 out_free_dev:
449 	kfree(dev);
450 
451 	return ret;
452 }
453 
454 static void ubiblock_cleanup(struct ubiblock *dev)
455 {
456 	/* Stop new requests to arrive */
457 	del_gendisk(dev->gd);
458 	/* Flush pending work */
459 	destroy_workqueue(dev->wq);
460 	/* Finally destroy the blk queue */
461 	blk_cleanup_queue(dev->rq);
462 	blk_mq_free_tag_set(&dev->tag_set);
463 	dev_info(disk_to_dev(dev->gd), "released");
464 	put_disk(dev->gd);
465 }
466 
467 int ubiblock_remove(struct ubi_volume_info *vi)
468 {
469 	struct ubiblock *dev;
470 
471 	mutex_lock(&devices_mutex);
472 	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
473 	if (!dev) {
474 		mutex_unlock(&devices_mutex);
475 		return -ENODEV;
476 	}
477 
478 	/* Found a device, let's lock it so we can check if it's busy */
479 	mutex_lock(&dev->dev_mutex);
480 	if (dev->refcnt > 0) {
481 		mutex_unlock(&dev->dev_mutex);
482 		mutex_unlock(&devices_mutex);
483 		return -EBUSY;
484 	}
485 
486 	/* Remove from device list */
487 	list_del(&dev->list);
488 	mutex_unlock(&devices_mutex);
489 
490 	ubiblock_cleanup(dev);
491 	mutex_unlock(&dev->dev_mutex);
492 	kfree(dev);
493 	return 0;
494 }
495 
496 static int ubiblock_resize(struct ubi_volume_info *vi)
497 {
498 	struct ubiblock *dev;
499 	u64 disk_capacity = vi->used_bytes >> 9;
500 
501 	/*
502 	 * Need to lock the device list until we stop using the device,
503 	 * otherwise the device struct might get released in
504 	 * 'ubiblock_remove()'.
505 	 */
506 	mutex_lock(&devices_mutex);
507 	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
508 	if (!dev) {
509 		mutex_unlock(&devices_mutex);
510 		return -ENODEV;
511 	}
512 	if ((sector_t)disk_capacity != disk_capacity) {
513 		mutex_unlock(&devices_mutex);
514 		dev_warn(disk_to_dev(dev->gd), "the volume is too big (%d LEBs), cannot resize",
515 			 vi->size);
516 		return -EFBIG;
517 	}
518 
519 	mutex_lock(&dev->dev_mutex);
520 
521 	if (get_capacity(dev->gd) != disk_capacity) {
522 		set_capacity(dev->gd, disk_capacity);
523 		dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
524 			 vi->used_bytes);
525 	}
526 	mutex_unlock(&dev->dev_mutex);
527 	mutex_unlock(&devices_mutex);
528 	return 0;
529 }
530 
531 static int ubiblock_notify(struct notifier_block *nb,
532 			 unsigned long notification_type, void *ns_ptr)
533 {
534 	struct ubi_notification *nt = ns_ptr;
535 
536 	switch (notification_type) {
537 	case UBI_VOLUME_ADDED:
538 		/*
539 		 * We want to enforce explicit block device creation for
540 		 * volumes, so when a volume is added we do nothing.
541 		 */
542 		break;
543 	case UBI_VOLUME_REMOVED:
544 		ubiblock_remove(&nt->vi);
545 		break;
546 	case UBI_VOLUME_RESIZED:
547 		ubiblock_resize(&nt->vi);
548 		break;
549 	case UBI_VOLUME_UPDATED:
550 		/*
551 		 * If the volume is static, a content update might mean the
552 		 * size (i.e. used_bytes) was also changed.
553 		 */
554 		if (nt->vi.vol_type == UBI_STATIC_VOLUME)
555 			ubiblock_resize(&nt->vi);
556 		break;
557 	default:
558 		break;
559 	}
560 	return NOTIFY_OK;
561 }
562 
563 static struct notifier_block ubiblock_notifier = {
564 	.notifier_call = ubiblock_notify,
565 };
566 
567 static struct ubi_volume_desc * __init
568 open_volume_desc(const char *name, int ubi_num, int vol_id)
569 {
570 	if (ubi_num == -1)
571 		/* No ubi num, name must be a vol device path */
572 		return ubi_open_volume_path(name, UBI_READONLY);
573 	else if (vol_id == -1)
574 		/* No vol_id, must be vol_name */
575 		return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
576 	else
577 		return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
578 }
579 
580 static void __init ubiblock_create_from_param(void)
581 {
582 	int i, ret = 0;
583 	struct ubiblock_param *p;
584 	struct ubi_volume_desc *desc;
585 	struct ubi_volume_info vi;
586 
587 	/*
588 	 * If there is an error creating one of the ubiblocks, continue on to
589 	 * create the following ubiblocks. This helps in a circumstance where
590 	 * the kernel command-line specifies multiple block devices and some
591 	 * may be broken, but we still want the working ones to come up.
592 	 */
593 	for (i = 0; i < ubiblock_devs; i++) {
594 		p = &ubiblock_param[i];
595 
596 		desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
597 		if (IS_ERR(desc)) {
598 			pr_err(
599 			       "UBI: block: can't open volume on ubi%d_%d, err=%ld",
600 			       p->ubi_num, p->vol_id, PTR_ERR(desc));
601 			continue;
602 		}
603 
604 		ubi_get_volume_info(desc, &vi);
605 		ubi_close_volume(desc);
606 
607 		ret = ubiblock_create(&vi);
608 		if (ret) {
609 			pr_err(
610 			       "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d",
611 			       vi.name, p->ubi_num, p->vol_id, ret);
612 			continue;
613 		}
614 	}
615 }
616 
617 static void ubiblock_remove_all(void)
618 {
619 	struct ubiblock *next;
620 	struct ubiblock *dev;
621 
622 	list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
623 		/* The module is being forcefully removed */
624 		WARN_ON(dev->desc);
625 		/* Remove from device list */
626 		list_del(&dev->list);
627 		ubiblock_cleanup(dev);
628 		kfree(dev);
629 	}
630 }
631 
632 int __init ubiblock_init(void)
633 {
634 	int ret;
635 
636 	ubiblock_major = register_blkdev(0, "ubiblock");
637 	if (ubiblock_major < 0)
638 		return ubiblock_major;
639 
640 	/*
641 	 * Attach block devices from 'block=' module param.
642 	 * Even if one block device in the param list fails to come up,
643 	 * still allow the module to load and leave any others up.
644 	 */
645 	ubiblock_create_from_param();
646 
647 	/*
648 	 * Block devices are only created upon user requests, so we ignore
649 	 * existing volumes.
650 	 */
651 	ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
652 	if (ret)
653 		goto err_unreg;
654 	return 0;
655 
656 err_unreg:
657 	unregister_blkdev(ubiblock_major, "ubiblock");
658 	ubiblock_remove_all();
659 	return ret;
660 }
661 
662 void __exit ubiblock_exit(void)
663 {
664 	ubi_unregister_volume_notifier(&ubiblock_notifier);
665 	ubiblock_remove_all();
666 	unregister_blkdev(ubiblock_major, "ubiblock");
667 }
668