xref: /openbmc/linux/drivers/mtd/ubi/block.c (revision c9dc580c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014 Ezequiel Garcia
4  * Copyright (c) 2011 Free Electrons
5  *
6  * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
7  *   Copyright (c) International Business Machines Corp., 2006
8  *   Copyright (c) Nokia Corporation, 2007
9  *   Authors: Artem Bityutskiy, Frank Haverkamp
10  */
11 
12 /*
13  * Read-only block devices on top of UBI volumes
14  *
15  * A simple implementation to allow a block device to be layered on top of a
16  * UBI volume. The implementation is provided by creating a static 1-to-1
17  * mapping between the block device and the UBI volume.
18  *
19  * The addressed byte is obtained from the addressed block sector, which is
20  * mapped linearly into the corresponding LEB:
21  *
22  *   LEB number = addressed byte / LEB size
23  *
24  * This feature is compiled in the UBI core, and adds a 'block' parameter
25  * to allow early creation of block devices on top of UBI volumes. Runtime
26  * block creation/removal for UBI volumes is provided through two UBI ioctls:
27  * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
28  */
29 
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/err.h>
33 #include <linux/kernel.h>
34 #include <linux/list.h>
35 #include <linux/mutex.h>
36 #include <linux/slab.h>
37 #include <linux/mtd/ubi.h>
38 #include <linux/blkdev.h>
39 #include <linux/blk-mq.h>
40 #include <linux/hdreg.h>
41 #include <linux/scatterlist.h>
42 #include <linux/idr.h>
43 #include <asm/div64.h>
44 
45 #include "ubi-media.h"
46 #include "ubi.h"
47 
48 /* Maximum number of supported devices */
49 #define UBIBLOCK_MAX_DEVICES 32
50 
51 /* Maximum length of the 'block=' parameter */
52 #define UBIBLOCK_PARAM_LEN 63
53 
54 /* Maximum number of comma-separated items in the 'block=' parameter */
55 #define UBIBLOCK_PARAM_COUNT 2
56 
57 struct ubiblock_param {
58 	int ubi_num;
59 	int vol_id;
60 	char name[UBIBLOCK_PARAM_LEN+1];
61 };
62 
63 struct ubiblock_pdu {
64 	struct ubi_sgl usgl;
65 };
66 
67 /* Numbers of elements set in the @ubiblock_param array */
68 static int ubiblock_devs __initdata;
69 
70 /* MTD devices specification parameters */
71 static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
72 
73 struct ubiblock {
74 	struct ubi_volume_desc *desc;
75 	int ubi_num;
76 	int vol_id;
77 	int refcnt;
78 	int leb_size;
79 
80 	struct gendisk *gd;
81 	struct request_queue *rq;
82 
83 	struct mutex dev_mutex;
84 	struct list_head list;
85 	struct blk_mq_tag_set tag_set;
86 };
87 
88 /* Linked list of all ubiblock instances */
89 static LIST_HEAD(ubiblock_devices);
90 static DEFINE_IDR(ubiblock_minor_idr);
91 /* Protects ubiblock_devices and ubiblock_minor_idr */
92 static DEFINE_MUTEX(devices_mutex);
93 static int ubiblock_major;
94 
95 static int __init ubiblock_set_param(const char *val,
96 				     const struct kernel_param *kp)
97 {
98 	int i, ret;
99 	size_t len;
100 	struct ubiblock_param *param;
101 	char buf[UBIBLOCK_PARAM_LEN];
102 	char *pbuf = &buf[0];
103 	char *tokens[UBIBLOCK_PARAM_COUNT];
104 
105 	if (!val)
106 		return -EINVAL;
107 
108 	len = strnlen(val, UBIBLOCK_PARAM_LEN);
109 	if (len == 0) {
110 		pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
111 		return 0;
112 	}
113 
114 	if (len == UBIBLOCK_PARAM_LEN) {
115 		pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
116 		       val, UBIBLOCK_PARAM_LEN);
117 		return -EINVAL;
118 	}
119 
120 	strcpy(buf, val);
121 
122 	/* Get rid of the final newline */
123 	if (buf[len - 1] == '\n')
124 		buf[len - 1] = '\0';
125 
126 	for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
127 		tokens[i] = strsep(&pbuf, ",");
128 
129 	param = &ubiblock_param[ubiblock_devs];
130 	if (tokens[1]) {
131 		/* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
132 		ret = kstrtoint(tokens[0], 10, &param->ubi_num);
133 		if (ret < 0)
134 			return -EINVAL;
135 
136 		/* Second param can be a number or a name */
137 		ret = kstrtoint(tokens[1], 10, &param->vol_id);
138 		if (ret < 0) {
139 			param->vol_id = -1;
140 			strcpy(param->name, tokens[1]);
141 		}
142 
143 	} else {
144 		/* One parameter: must be device path */
145 		strcpy(param->name, tokens[0]);
146 		param->ubi_num = -1;
147 		param->vol_id = -1;
148 	}
149 
150 	ubiblock_devs++;
151 
152 	return 0;
153 }
154 
155 static const struct kernel_param_ops ubiblock_param_ops = {
156 	.set    = ubiblock_set_param,
157 };
158 module_param_cb(block, &ubiblock_param_ops, NULL, 0);
159 MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
160 			"Multiple \"block\" parameters may be specified.\n"
161 			"UBI volumes may be specified by their number, name, or path to the device node.\n"
162 			"Examples\n"
163 			"Using the UBI volume path:\n"
164 			"ubi.block=/dev/ubi0_0\n"
165 			"Using the UBI device, and the volume name:\n"
166 			"ubi.block=0,rootfs\n"
167 			"Using both UBI device number and UBI volume number:\n"
168 			"ubi.block=0,0\n");
169 
170 static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
171 {
172 	struct ubiblock *dev;
173 
174 	list_for_each_entry(dev, &ubiblock_devices, list)
175 		if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
176 			return dev;
177 	return NULL;
178 }
179 
180 static blk_status_t ubiblock_read(struct request *req)
181 {
182 	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
183 	struct ubiblock *dev = req->q->queuedata;
184 	u64 pos = blk_rq_pos(req) << 9;
185 	int to_read = blk_rq_bytes(req);
186 	int bytes_left = to_read;
187 	/* Get LEB:offset address to read from */
188 	int offset = do_div(pos, dev->leb_size);
189 	int leb = pos;
190 	struct req_iterator iter;
191 	struct bio_vec bvec;
192 	int ret;
193 
194 	blk_mq_start_request(req);
195 
196 	/*
197 	 * It is safe to ignore the return value of blk_rq_map_sg() because
198 	 * the number of sg entries is limited to UBI_MAX_SG_COUNT
199 	 * and ubi_read_sg() will check that limit.
200 	 */
201 	ubi_sgl_init(&pdu->usgl);
202 	blk_rq_map_sg(req->q, req, pdu->usgl.sg);
203 
204 	while (bytes_left) {
205 		/*
206 		 * We can only read one LEB at a time. Therefore if the read
207 		 * length is larger than one LEB size, we split the operation.
208 		 */
209 		if (offset + to_read > dev->leb_size)
210 			to_read = dev->leb_size - offset;
211 
212 		ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
213 		if (ret < 0)
214 			break;
215 
216 		bytes_left -= to_read;
217 		to_read = bytes_left;
218 		leb += 1;
219 		offset = 0;
220 	}
221 
222 	rq_for_each_segment(bvec, req, iter)
223 		flush_dcache_page(bvec.bv_page);
224 	return errno_to_blk_status(ret);
225 }
226 
227 static int ubiblock_open(struct block_device *bdev, fmode_t mode)
228 {
229 	struct ubiblock *dev = bdev->bd_disk->private_data;
230 	int ret;
231 
232 	mutex_lock(&dev->dev_mutex);
233 	if (dev->refcnt > 0) {
234 		/*
235 		 * The volume is already open, just increase the reference
236 		 * counter.
237 		 */
238 		goto out_done;
239 	}
240 
241 	/*
242 	 * We want users to be aware they should only mount us as read-only.
243 	 * It's just a paranoid check, as write requests will get rejected
244 	 * in any case.
245 	 */
246 	if (mode & FMODE_WRITE) {
247 		ret = -EROFS;
248 		goto out_unlock;
249 	}
250 
251 	dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
252 	if (IS_ERR(dev->desc)) {
253 		dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
254 			dev->ubi_num, dev->vol_id);
255 		ret = PTR_ERR(dev->desc);
256 		dev->desc = NULL;
257 		goto out_unlock;
258 	}
259 
260 out_done:
261 	dev->refcnt++;
262 	mutex_unlock(&dev->dev_mutex);
263 	return 0;
264 
265 out_unlock:
266 	mutex_unlock(&dev->dev_mutex);
267 	return ret;
268 }
269 
270 static void ubiblock_release(struct gendisk *gd, fmode_t mode)
271 {
272 	struct ubiblock *dev = gd->private_data;
273 
274 	mutex_lock(&dev->dev_mutex);
275 	dev->refcnt--;
276 	if (dev->refcnt == 0) {
277 		ubi_close_volume(dev->desc);
278 		dev->desc = NULL;
279 	}
280 	mutex_unlock(&dev->dev_mutex);
281 }
282 
283 static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
284 {
285 	/* Some tools might require this information */
286 	geo->heads = 1;
287 	geo->cylinders = 1;
288 	geo->sectors = get_capacity(bdev->bd_disk);
289 	geo->start = 0;
290 	return 0;
291 }
292 
293 static const struct block_device_operations ubiblock_ops = {
294 	.owner = THIS_MODULE,
295 	.open = ubiblock_open,
296 	.release = ubiblock_release,
297 	.getgeo	= ubiblock_getgeo,
298 };
299 
300 static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
301 			     const struct blk_mq_queue_data *bd)
302 {
303 	switch (req_op(bd->rq)) {
304 	case REQ_OP_READ:
305 		return ubiblock_read(bd->rq);
306 	default:
307 		return BLK_STS_IOERR;
308 	}
309 }
310 
311 static int ubiblock_init_request(struct blk_mq_tag_set *set,
312 		struct request *req, unsigned int hctx_idx,
313 		unsigned int numa_node)
314 {
315 	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
316 
317 	sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
318 	return 0;
319 }
320 
321 static const struct blk_mq_ops ubiblock_mq_ops = {
322 	.queue_rq       = ubiblock_queue_rq,
323 	.init_request	= ubiblock_init_request,
324 };
325 
326 static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
327 {
328 	u64 size = vi->used_bytes >> 9;
329 
330 	if (vi->used_bytes % 512) {
331 		if (vi->vol_type == UBI_DYNAMIC_VOLUME)
332 			pr_warn("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
333 				vi->used_bytes - (size << 9));
334 		else
335 			pr_info("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
336 				vi->used_bytes - (size << 9));
337 	}
338 
339 	if ((sector_t)size != size)
340 		return -EFBIG;
341 
342 	*disk_capacity = size;
343 
344 	return 0;
345 }
346 
347 int ubiblock_create(struct ubi_volume_info *vi)
348 {
349 	struct ubiblock *dev;
350 	struct gendisk *gd;
351 	u64 disk_capacity;
352 	int ret;
353 
354 	ret = calc_disk_capacity(vi, &disk_capacity);
355 	if (ret) {
356 		return ret;
357 	}
358 
359 	/* Check that the volume isn't already handled */
360 	mutex_lock(&devices_mutex);
361 	if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
362 		ret = -EEXIST;
363 		goto out_unlock;
364 	}
365 
366 	dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
367 	if (!dev) {
368 		ret = -ENOMEM;
369 		goto out_unlock;
370 	}
371 
372 	mutex_init(&dev->dev_mutex);
373 
374 	dev->ubi_num = vi->ubi_num;
375 	dev->vol_id = vi->vol_id;
376 	dev->leb_size = vi->usable_leb_size;
377 
378 	dev->tag_set.ops = &ubiblock_mq_ops;
379 	dev->tag_set.queue_depth = 64;
380 	dev->tag_set.numa_node = NUMA_NO_NODE;
381 	dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
382 	dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
383 	dev->tag_set.driver_data = dev;
384 	dev->tag_set.nr_hw_queues = 1;
385 
386 	ret = blk_mq_alloc_tag_set(&dev->tag_set);
387 	if (ret) {
388 		dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
389 		goto out_free_dev;
390 	}
391 
392 
393 	/* Initialize the gendisk of this ubiblock device */
394 	gd = blk_mq_alloc_disk(&dev->tag_set, dev);
395 	if (IS_ERR(gd)) {
396 		ret = PTR_ERR(gd);
397 		goto out_free_tags;
398 	}
399 
400 	gd->fops = &ubiblock_ops;
401 	gd->major = ubiblock_major;
402 	gd->minors = 1;
403 	gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
404 	if (gd->first_minor < 0) {
405 		dev_err(disk_to_dev(gd),
406 			"block: dynamic minor allocation failed");
407 		ret = -ENODEV;
408 		goto out_cleanup_disk;
409 	}
410 	gd->flags |= GENHD_FL_NO_PART;
411 	gd->private_data = dev;
412 	sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
413 	set_capacity(gd, disk_capacity);
414 	dev->gd = gd;
415 
416 	dev->rq = gd->queue;
417 	blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
418 
419 	list_add_tail(&dev->list, &ubiblock_devices);
420 
421 	/* Must be the last step: anyone can call file ops from now on */
422 	ret = device_add_disk(vi->dev, dev->gd, NULL);
423 	if (ret)
424 		goto out_remove_minor;
425 
426 	dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
427 		 dev->ubi_num, dev->vol_id, vi->name);
428 	mutex_unlock(&devices_mutex);
429 	return 0;
430 
431 out_remove_minor:
432 	list_del(&dev->list);
433 	idr_remove(&ubiblock_minor_idr, gd->first_minor);
434 out_cleanup_disk:
435 	put_disk(dev->gd);
436 out_free_tags:
437 	blk_mq_free_tag_set(&dev->tag_set);
438 out_free_dev:
439 	kfree(dev);
440 out_unlock:
441 	mutex_unlock(&devices_mutex);
442 
443 	return ret;
444 }
445 
446 static void ubiblock_cleanup(struct ubiblock *dev)
447 {
448 	/* Stop new requests to arrive */
449 	del_gendisk(dev->gd);
450 	/* Finally destroy the blk queue */
451 	dev_info(disk_to_dev(dev->gd), "released");
452 	put_disk(dev->gd);
453 	blk_mq_free_tag_set(&dev->tag_set);
454 	idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
455 }
456 
457 int ubiblock_remove(struct ubi_volume_info *vi)
458 {
459 	struct ubiblock *dev;
460 	int ret;
461 
462 	mutex_lock(&devices_mutex);
463 	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
464 	if (!dev) {
465 		ret = -ENODEV;
466 		goto out_unlock;
467 	}
468 
469 	/* Found a device, let's lock it so we can check if it's busy */
470 	mutex_lock(&dev->dev_mutex);
471 	if (dev->refcnt > 0) {
472 		ret = -EBUSY;
473 		goto out_unlock_dev;
474 	}
475 
476 	/* Remove from device list */
477 	list_del(&dev->list);
478 	ubiblock_cleanup(dev);
479 	mutex_unlock(&dev->dev_mutex);
480 	mutex_unlock(&devices_mutex);
481 
482 	kfree(dev);
483 	return 0;
484 
485 out_unlock_dev:
486 	mutex_unlock(&dev->dev_mutex);
487 out_unlock:
488 	mutex_unlock(&devices_mutex);
489 	return ret;
490 }
491 
492 static int ubiblock_resize(struct ubi_volume_info *vi)
493 {
494 	struct ubiblock *dev;
495 	u64 disk_capacity;
496 	int ret;
497 
498 	/*
499 	 * Need to lock the device list until we stop using the device,
500 	 * otherwise the device struct might get released in
501 	 * 'ubiblock_remove()'.
502 	 */
503 	mutex_lock(&devices_mutex);
504 	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
505 	if (!dev) {
506 		mutex_unlock(&devices_mutex);
507 		return -ENODEV;
508 	}
509 
510 	ret = calc_disk_capacity(vi, &disk_capacity);
511 	if (ret) {
512 		mutex_unlock(&devices_mutex);
513 		if (ret == -EFBIG) {
514 			dev_warn(disk_to_dev(dev->gd),
515 				 "the volume is too big (%d LEBs), cannot resize",
516 				 vi->size);
517 		}
518 		return ret;
519 	}
520 
521 	mutex_lock(&dev->dev_mutex);
522 
523 	if (get_capacity(dev->gd) != disk_capacity) {
524 		set_capacity(dev->gd, disk_capacity);
525 		dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
526 			 vi->used_bytes);
527 	}
528 	mutex_unlock(&dev->dev_mutex);
529 	mutex_unlock(&devices_mutex);
530 	return 0;
531 }
532 
533 static int ubiblock_notify(struct notifier_block *nb,
534 			 unsigned long notification_type, void *ns_ptr)
535 {
536 	struct ubi_notification *nt = ns_ptr;
537 
538 	switch (notification_type) {
539 	case UBI_VOLUME_ADDED:
540 		/*
541 		 * We want to enforce explicit block device creation for
542 		 * volumes, so when a volume is added we do nothing.
543 		 */
544 		break;
545 	case UBI_VOLUME_REMOVED:
546 		ubiblock_remove(&nt->vi);
547 		break;
548 	case UBI_VOLUME_RESIZED:
549 		ubiblock_resize(&nt->vi);
550 		break;
551 	case UBI_VOLUME_UPDATED:
552 		/*
553 		 * If the volume is static, a content update might mean the
554 		 * size (i.e. used_bytes) was also changed.
555 		 */
556 		if (nt->vi.vol_type == UBI_STATIC_VOLUME)
557 			ubiblock_resize(&nt->vi);
558 		break;
559 	default:
560 		break;
561 	}
562 	return NOTIFY_OK;
563 }
564 
565 static struct notifier_block ubiblock_notifier = {
566 	.notifier_call = ubiblock_notify,
567 };
568 
569 static struct ubi_volume_desc * __init
570 open_volume_desc(const char *name, int ubi_num, int vol_id)
571 {
572 	if (ubi_num == -1)
573 		/* No ubi num, name must be a vol device path */
574 		return ubi_open_volume_path(name, UBI_READONLY);
575 	else if (vol_id == -1)
576 		/* No vol_id, must be vol_name */
577 		return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
578 	else
579 		return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
580 }
581 
582 static void __init ubiblock_create_from_param(void)
583 {
584 	int i, ret = 0;
585 	struct ubiblock_param *p;
586 	struct ubi_volume_desc *desc;
587 	struct ubi_volume_info vi;
588 
589 	/*
590 	 * If there is an error creating one of the ubiblocks, continue on to
591 	 * create the following ubiblocks. This helps in a circumstance where
592 	 * the kernel command-line specifies multiple block devices and some
593 	 * may be broken, but we still want the working ones to come up.
594 	 */
595 	for (i = 0; i < ubiblock_devs; i++) {
596 		p = &ubiblock_param[i];
597 
598 		desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
599 		if (IS_ERR(desc)) {
600 			pr_err(
601 			       "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
602 			       p->ubi_num, p->vol_id, PTR_ERR(desc));
603 			continue;
604 		}
605 
606 		ubi_get_volume_info(desc, &vi);
607 		ubi_close_volume(desc);
608 
609 		ret = ubiblock_create(&vi);
610 		if (ret) {
611 			pr_err(
612 			       "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
613 			       vi.name, p->ubi_num, p->vol_id, ret);
614 			continue;
615 		}
616 	}
617 }
618 
619 static void ubiblock_remove_all(void)
620 {
621 	struct ubiblock *next;
622 	struct ubiblock *dev;
623 
624 	mutex_lock(&devices_mutex);
625 	list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
626 		/* The module is being forcefully removed */
627 		WARN_ON(dev->desc);
628 		/* Remove from device list */
629 		list_del(&dev->list);
630 		ubiblock_cleanup(dev);
631 		kfree(dev);
632 	}
633 	mutex_unlock(&devices_mutex);
634 }
635 
636 int __init ubiblock_init(void)
637 {
638 	int ret;
639 
640 	ubiblock_major = register_blkdev(0, "ubiblock");
641 	if (ubiblock_major < 0)
642 		return ubiblock_major;
643 
644 	/*
645 	 * Attach block devices from 'block=' module param.
646 	 * Even if one block device in the param list fails to come up,
647 	 * still allow the module to load and leave any others up.
648 	 */
649 	ubiblock_create_from_param();
650 
651 	/*
652 	 * Block devices are only created upon user requests, so we ignore
653 	 * existing volumes.
654 	 */
655 	ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
656 	if (ret)
657 		goto err_unreg;
658 	return 0;
659 
660 err_unreg:
661 	unregister_blkdev(ubiblock_major, "ubiblock");
662 	ubiblock_remove_all();
663 	return ret;
664 }
665 
666 void __exit ubiblock_exit(void)
667 {
668 	ubi_unregister_volume_notifier(&ubiblock_notifier);
669 	ubiblock_remove_all();
670 	unregister_blkdev(ubiblock_major, "ubiblock");
671 }
672