xref: /openbmc/linux/drivers/block/brd.c (revision 752beb5e)
1 /*
2  * Ram backed block device driver.
3  *
4  * Copyright (C) 2007 Nick Piggin
5  * Copyright (C) 2007 Novell Inc.
6  *
7  * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
8  * of their respective owners.
9  */
10 
11 #include <linux/init.h>
12 #include <linux/initrd.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/major.h>
16 #include <linux/blkdev.h>
17 #include <linux/bio.h>
18 #include <linux/highmem.h>
19 #include <linux/mutex.h>
20 #include <linux/radix-tree.h>
21 #include <linux/fs.h>
22 #include <linux/slab.h>
23 #include <linux/backing-dev.h>
24 
25 #include <linux/uaccess.h>
26 
27 #define PAGE_SECTORS_SHIFT	(PAGE_SHIFT - SECTOR_SHIFT)
28 #define PAGE_SECTORS		(1 << PAGE_SECTORS_SHIFT)
29 
30 /*
31  * Each block ramdisk device has a radix_tree brd_pages of pages that stores
32  * the pages containing the block device's contents. A brd page's ->index is
33  * its offset in PAGE_SIZE units. This is similar to, but in no way connected
34  * with, the kernel's pagecache or buffer cache (which sit above our block
35  * device).
36  */
37 struct brd_device {
38 	int		brd_number;
39 
40 	struct request_queue	*brd_queue;
41 	struct gendisk		*brd_disk;
42 	struct list_head	brd_list;
43 
44 	/*
45 	 * Backing store of pages and lock to protect it. This is the contents
46 	 * of the block device.
47 	 */
48 	spinlock_t		brd_lock;
49 	struct radix_tree_root	brd_pages;
50 };
51 
52 /*
53  * Look up and return a brd's page for a given sector.
54  */
55 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
56 {
57 	pgoff_t idx;
58 	struct page *page;
59 
60 	/*
61 	 * The page lifetime is protected by the fact that we have opened the
62 	 * device node -- brd pages will never be deleted under us, so we
63 	 * don't need any further locking or refcounting.
64 	 *
65 	 * This is strictly true for the radix-tree nodes as well (ie. we
66 	 * don't actually need the rcu_read_lock()), however that is not a
67 	 * documented feature of the radix-tree API so it is better to be
68 	 * safe here (we don't have total exclusion from radix tree updates
69 	 * here, only deletes).
70 	 */
71 	rcu_read_lock();
72 	idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
73 	page = radix_tree_lookup(&brd->brd_pages, idx);
74 	rcu_read_unlock();
75 
76 	BUG_ON(page && page->index != idx);
77 
78 	return page;
79 }
80 
81 /*
82  * Look up and return a brd's page for a given sector.
83  * If one does not exist, allocate an empty page, and insert that. Then
84  * return it.
85  */
86 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
87 {
88 	pgoff_t idx;
89 	struct page *page;
90 	gfp_t gfp_flags;
91 
92 	page = brd_lookup_page(brd, sector);
93 	if (page)
94 		return page;
95 
96 	/*
97 	 * Must use NOIO because we don't want to recurse back into the
98 	 * block or filesystem layers from page reclaim.
99 	 */
100 	gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
101 	page = alloc_page(gfp_flags);
102 	if (!page)
103 		return NULL;
104 
105 	if (radix_tree_preload(GFP_NOIO)) {
106 		__free_page(page);
107 		return NULL;
108 	}
109 
110 	spin_lock(&brd->brd_lock);
111 	idx = sector >> PAGE_SECTORS_SHIFT;
112 	page->index = idx;
113 	if (radix_tree_insert(&brd->brd_pages, idx, page)) {
114 		__free_page(page);
115 		page = radix_tree_lookup(&brd->brd_pages, idx);
116 		BUG_ON(!page);
117 		BUG_ON(page->index != idx);
118 	}
119 	spin_unlock(&brd->brd_lock);
120 
121 	radix_tree_preload_end();
122 
123 	return page;
124 }
125 
126 /*
127  * Free all backing store pages and radix tree. This must only be called when
128  * there are no other users of the device.
129  */
130 #define FREE_BATCH 16
131 static void brd_free_pages(struct brd_device *brd)
132 {
133 	unsigned long pos = 0;
134 	struct page *pages[FREE_BATCH];
135 	int nr_pages;
136 
137 	do {
138 		int i;
139 
140 		nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
141 				(void **)pages, pos, FREE_BATCH);
142 
143 		for (i = 0; i < nr_pages; i++) {
144 			void *ret;
145 
146 			BUG_ON(pages[i]->index < pos);
147 			pos = pages[i]->index;
148 			ret = radix_tree_delete(&brd->brd_pages, pos);
149 			BUG_ON(!ret || ret != pages[i]);
150 			__free_page(pages[i]);
151 		}
152 
153 		pos++;
154 
155 		/*
156 		 * This assumes radix_tree_gang_lookup always returns as
157 		 * many pages as possible. If the radix-tree code changes,
158 		 * so will this have to.
159 		 */
160 	} while (nr_pages == FREE_BATCH);
161 }
162 
163 /*
164  * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
165  */
166 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
167 {
168 	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
169 	size_t copy;
170 
171 	copy = min_t(size_t, n, PAGE_SIZE - offset);
172 	if (!brd_insert_page(brd, sector))
173 		return -ENOSPC;
174 	if (copy < n) {
175 		sector += copy >> SECTOR_SHIFT;
176 		if (!brd_insert_page(brd, sector))
177 			return -ENOSPC;
178 	}
179 	return 0;
180 }
181 
182 /*
183  * Copy n bytes from src to the brd starting at sector. Does not sleep.
184  */
185 static void copy_to_brd(struct brd_device *brd, const void *src,
186 			sector_t sector, size_t n)
187 {
188 	struct page *page;
189 	void *dst;
190 	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
191 	size_t copy;
192 
193 	copy = min_t(size_t, n, PAGE_SIZE - offset);
194 	page = brd_lookup_page(brd, sector);
195 	BUG_ON(!page);
196 
197 	dst = kmap_atomic(page);
198 	memcpy(dst + offset, src, copy);
199 	kunmap_atomic(dst);
200 
201 	if (copy < n) {
202 		src += copy;
203 		sector += copy >> SECTOR_SHIFT;
204 		copy = n - copy;
205 		page = brd_lookup_page(brd, sector);
206 		BUG_ON(!page);
207 
208 		dst = kmap_atomic(page);
209 		memcpy(dst, src, copy);
210 		kunmap_atomic(dst);
211 	}
212 }
213 
214 /*
215  * Copy n bytes to dst from the brd starting at sector. Does not sleep.
216  */
217 static void copy_from_brd(void *dst, struct brd_device *brd,
218 			sector_t sector, size_t n)
219 {
220 	struct page *page;
221 	void *src;
222 	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
223 	size_t copy;
224 
225 	copy = min_t(size_t, n, PAGE_SIZE - offset);
226 	page = brd_lookup_page(brd, sector);
227 	if (page) {
228 		src = kmap_atomic(page);
229 		memcpy(dst, src + offset, copy);
230 		kunmap_atomic(src);
231 	} else
232 		memset(dst, 0, copy);
233 
234 	if (copy < n) {
235 		dst += copy;
236 		sector += copy >> SECTOR_SHIFT;
237 		copy = n - copy;
238 		page = brd_lookup_page(brd, sector);
239 		if (page) {
240 			src = kmap_atomic(page);
241 			memcpy(dst, src, copy);
242 			kunmap_atomic(src);
243 		} else
244 			memset(dst, 0, copy);
245 	}
246 }
247 
248 /*
249  * Process a single bvec of a bio.
250  */
251 static int brd_do_bvec(struct brd_device *brd, struct page *page,
252 			unsigned int len, unsigned int off, unsigned int op,
253 			sector_t sector)
254 {
255 	void *mem;
256 	int err = 0;
257 
258 	if (op_is_write(op)) {
259 		err = copy_to_brd_setup(brd, sector, len);
260 		if (err)
261 			goto out;
262 	}
263 
264 	mem = kmap_atomic(page);
265 	if (!op_is_write(op)) {
266 		copy_from_brd(mem + off, brd, sector, len);
267 		flush_dcache_page(page);
268 	} else {
269 		flush_dcache_page(page);
270 		copy_to_brd(brd, mem + off, sector, len);
271 	}
272 	kunmap_atomic(mem);
273 
274 out:
275 	return err;
276 }
277 
278 static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
279 {
280 	struct brd_device *brd = bio->bi_disk->private_data;
281 	struct bio_vec bvec;
282 	sector_t sector;
283 	struct bvec_iter iter;
284 
285 	sector = bio->bi_iter.bi_sector;
286 	if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
287 		goto io_error;
288 
289 	bio_for_each_segment(bvec, bio, iter) {
290 		unsigned int len = bvec.bv_len;
291 		int err;
292 
293 		err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
294 				  bio_op(bio), sector);
295 		if (err)
296 			goto io_error;
297 		sector += len >> SECTOR_SHIFT;
298 	}
299 
300 	bio_endio(bio);
301 	return BLK_QC_T_NONE;
302 io_error:
303 	bio_io_error(bio);
304 	return BLK_QC_T_NONE;
305 }
306 
307 static int brd_rw_page(struct block_device *bdev, sector_t sector,
308 		       struct page *page, unsigned int op)
309 {
310 	struct brd_device *brd = bdev->bd_disk->private_data;
311 	int err;
312 
313 	if (PageTransHuge(page))
314 		return -ENOTSUPP;
315 	err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
316 	page_endio(page, op_is_write(op), err);
317 	return err;
318 }
319 
320 static const struct block_device_operations brd_fops = {
321 	.owner =		THIS_MODULE,
322 	.rw_page =		brd_rw_page,
323 };
324 
325 /*
326  * And now the modules code and kernel interface.
327  */
328 static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
329 module_param(rd_nr, int, 0444);
330 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
331 
332 unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE;
333 module_param(rd_size, ulong, 0444);
334 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
335 
336 static int max_part = 1;
337 module_param(max_part, int, 0444);
338 MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
339 
340 MODULE_LICENSE("GPL");
341 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
342 MODULE_ALIAS("rd");
343 
344 #ifndef MODULE
345 /* Legacy boot options - nonmodular */
346 static int __init ramdisk_size(char *str)
347 {
348 	rd_size = simple_strtol(str, NULL, 0);
349 	return 1;
350 }
351 __setup("ramdisk_size=", ramdisk_size);
352 #endif
353 
354 /*
355  * The device scheme is derived from loop.c. Keep them in synch where possible
356  * (should share code eventually).
357  */
358 static LIST_HEAD(brd_devices);
359 static DEFINE_MUTEX(brd_devices_mutex);
360 
361 static struct brd_device *brd_alloc(int i)
362 {
363 	struct brd_device *brd;
364 	struct gendisk *disk;
365 
366 	brd = kzalloc(sizeof(*brd), GFP_KERNEL);
367 	if (!brd)
368 		goto out;
369 	brd->brd_number		= i;
370 	spin_lock_init(&brd->brd_lock);
371 	INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
372 
373 	brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
374 	if (!brd->brd_queue)
375 		goto out_free_dev;
376 
377 	blk_queue_make_request(brd->brd_queue, brd_make_request);
378 	blk_queue_max_hw_sectors(brd->brd_queue, 1024);
379 
380 	/* This is so fdisk will align partitions on 4k, because of
381 	 * direct_access API needing 4k alignment, returning a PFN
382 	 * (This is only a problem on very small devices <= 4M,
383 	 *  otherwise fdisk will align on 1M. Regardless this call
384 	 *  is harmless)
385 	 */
386 	blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
387 	disk = brd->brd_disk = alloc_disk(max_part);
388 	if (!disk)
389 		goto out_free_queue;
390 	disk->major		= RAMDISK_MAJOR;
391 	disk->first_minor	= i * max_part;
392 	disk->fops		= &brd_fops;
393 	disk->private_data	= brd;
394 	disk->flags		= GENHD_FL_EXT_DEVT;
395 	sprintf(disk->disk_name, "ram%d", i);
396 	set_capacity(disk, rd_size * 2);
397 	brd->brd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
398 
399 	/* Tell the block layer that this is not a rotational device */
400 	blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue);
401 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, brd->brd_queue);
402 
403 	return brd;
404 
405 out_free_queue:
406 	blk_cleanup_queue(brd->brd_queue);
407 out_free_dev:
408 	kfree(brd);
409 out:
410 	return NULL;
411 }
412 
413 static void brd_free(struct brd_device *brd)
414 {
415 	put_disk(brd->brd_disk);
416 	blk_cleanup_queue(brd->brd_queue);
417 	brd_free_pages(brd);
418 	kfree(brd);
419 }
420 
421 static struct brd_device *brd_init_one(int i, bool *new)
422 {
423 	struct brd_device *brd;
424 
425 	*new = false;
426 	list_for_each_entry(brd, &brd_devices, brd_list) {
427 		if (brd->brd_number == i)
428 			goto out;
429 	}
430 
431 	brd = brd_alloc(i);
432 	if (brd) {
433 		brd->brd_disk->queue = brd->brd_queue;
434 		add_disk(brd->brd_disk);
435 		list_add_tail(&brd->brd_list, &brd_devices);
436 	}
437 	*new = true;
438 out:
439 	return brd;
440 }
441 
442 static void brd_del_one(struct brd_device *brd)
443 {
444 	list_del(&brd->brd_list);
445 	del_gendisk(brd->brd_disk);
446 	brd_free(brd);
447 }
448 
449 static struct kobject *brd_probe(dev_t dev, int *part, void *data)
450 {
451 	struct brd_device *brd;
452 	struct kobject *kobj;
453 	bool new;
454 
455 	mutex_lock(&brd_devices_mutex);
456 	brd = brd_init_one(MINOR(dev) / max_part, &new);
457 	kobj = brd ? get_disk_and_module(brd->brd_disk) : NULL;
458 	mutex_unlock(&brd_devices_mutex);
459 
460 	if (new)
461 		*part = 0;
462 
463 	return kobj;
464 }
465 
466 static int __init brd_init(void)
467 {
468 	struct brd_device *brd, *next;
469 	int i;
470 
471 	/*
472 	 * brd module now has a feature to instantiate underlying device
473 	 * structure on-demand, provided that there is an access dev node.
474 	 *
475 	 * (1) if rd_nr is specified, create that many upfront. else
476 	 *     it defaults to CONFIG_BLK_DEV_RAM_COUNT
477 	 * (2) User can further extend brd devices by create dev node themselves
478 	 *     and have kernel automatically instantiate actual device
479 	 *     on-demand. Example:
480 	 *		mknod /path/devnod_name b 1 X	# 1 is the rd major
481 	 *		fdisk -l /path/devnod_name
482 	 *	If (X / max_part) was not already created it will be created
483 	 *	dynamically.
484 	 */
485 
486 	if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
487 		return -EIO;
488 
489 	if (unlikely(!max_part))
490 		max_part = 1;
491 
492 	for (i = 0; i < rd_nr; i++) {
493 		brd = brd_alloc(i);
494 		if (!brd)
495 			goto out_free;
496 		list_add_tail(&brd->brd_list, &brd_devices);
497 	}
498 
499 	/* point of no return */
500 
501 	list_for_each_entry(brd, &brd_devices, brd_list) {
502 		/*
503 		 * associate with queue just before adding disk for
504 		 * avoiding to mess up failure path
505 		 */
506 		brd->brd_disk->queue = brd->brd_queue;
507 		add_disk(brd->brd_disk);
508 	}
509 
510 	blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS,
511 				  THIS_MODULE, brd_probe, NULL, NULL);
512 
513 	pr_info("brd: module loaded\n");
514 	return 0;
515 
516 out_free:
517 	list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
518 		list_del(&brd->brd_list);
519 		brd_free(brd);
520 	}
521 	unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
522 
523 	pr_info("brd: module NOT loaded !!!\n");
524 	return -ENOMEM;
525 }
526 
527 static void __exit brd_exit(void)
528 {
529 	struct brd_device *brd, *next;
530 
531 	list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
532 		brd_del_one(brd);
533 
534 	blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS);
535 	unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
536 
537 	pr_info("brd: module unloaded\n");
538 }
539 
540 module_init(brd_init);
541 module_exit(brd_exit);
542 
543