xref: /openbmc/linux/drivers/block/brd.c (revision f7d84fa7)
1 /*
2  * Ram backed block device driver.
3  *
4  * Copyright (C) 2007 Nick Piggin
5  * Copyright (C) 2007 Novell Inc.
6  *
7  * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
8  * of their respective owners.
9  */
10 
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/major.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/highmem.h>
18 #include <linux/mutex.h>
19 #include <linux/radix-tree.h>
20 #include <linux/fs.h>
21 #include <linux/slab.h>
22 #ifdef CONFIG_BLK_DEV_RAM_DAX
23 #include <linux/pfn_t.h>
24 #include <linux/dax.h>
25 #endif
26 
27 #include <linux/uaccess.h>
28 
29 #define SECTOR_SHIFT		9
30 #define PAGE_SECTORS_SHIFT	(PAGE_SHIFT - SECTOR_SHIFT)
31 #define PAGE_SECTORS		(1 << PAGE_SECTORS_SHIFT)
32 
33 /*
34  * Each block ramdisk device has a radix_tree brd_pages of pages that stores
35  * the pages containing the block device's contents. A brd page's ->index is
36  * its offset in PAGE_SIZE units. This is similar to, but in no way connected
37  * with, the kernel's pagecache or buffer cache (which sit above our block
38  * device).
39  */
40 struct brd_device {
41 	int		brd_number;
42 
43 	struct request_queue	*brd_queue;
44 	struct gendisk		*brd_disk;
45 #ifdef CONFIG_BLK_DEV_RAM_DAX
46 	struct dax_device	*dax_dev;
47 #endif
48 	struct list_head	brd_list;
49 
50 	/*
51 	 * Backing store of pages and lock to protect it. This is the contents
52 	 * of the block device.
53 	 */
54 	spinlock_t		brd_lock;
55 	struct radix_tree_root	brd_pages;
56 };
57 
58 /*
59  * Look up and return a brd's page for a given sector.
60  */
61 static DEFINE_MUTEX(brd_mutex);
62 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
63 {
64 	pgoff_t idx;
65 	struct page *page;
66 
67 	/*
68 	 * The page lifetime is protected by the fact that we have opened the
69 	 * device node -- brd pages will never be deleted under us, so we
70 	 * don't need any further locking or refcounting.
71 	 *
72 	 * This is strictly true for the radix-tree nodes as well (ie. we
73 	 * don't actually need the rcu_read_lock()), however that is not a
74 	 * documented feature of the radix-tree API so it is better to be
75 	 * safe here (we don't have total exclusion from radix tree updates
76 	 * here, only deletes).
77 	 */
78 	rcu_read_lock();
79 	idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
80 	page = radix_tree_lookup(&brd->brd_pages, idx);
81 	rcu_read_unlock();
82 
83 	BUG_ON(page && page->index != idx);
84 
85 	return page;
86 }
87 
88 /*
89  * Look up and return a brd's page for a given sector.
90  * If one does not exist, allocate an empty page, and insert that. Then
91  * return it.
92  */
93 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
94 {
95 	pgoff_t idx;
96 	struct page *page;
97 	gfp_t gfp_flags;
98 
99 	page = brd_lookup_page(brd, sector);
100 	if (page)
101 		return page;
102 
103 	/*
104 	 * Must use NOIO because we don't want to recurse back into the
105 	 * block or filesystem layers from page reclaim.
106 	 *
107 	 * Cannot support DAX and highmem, because our ->direct_access
108 	 * routine for DAX must return memory that is always addressable.
109 	 * If DAX was reworked to use pfns and kmap throughout, this
110 	 * restriction might be able to be lifted.
111 	 */
112 	gfp_flags = GFP_NOIO | __GFP_ZERO;
113 #ifndef CONFIG_BLK_DEV_RAM_DAX
114 	gfp_flags |= __GFP_HIGHMEM;
115 #endif
116 	page = alloc_page(gfp_flags);
117 	if (!page)
118 		return NULL;
119 
120 	if (radix_tree_preload(GFP_NOIO)) {
121 		__free_page(page);
122 		return NULL;
123 	}
124 
125 	spin_lock(&brd->brd_lock);
126 	idx = sector >> PAGE_SECTORS_SHIFT;
127 	page->index = idx;
128 	if (radix_tree_insert(&brd->brd_pages, idx, page)) {
129 		__free_page(page);
130 		page = radix_tree_lookup(&brd->brd_pages, idx);
131 		BUG_ON(!page);
132 		BUG_ON(page->index != idx);
133 	}
134 	spin_unlock(&brd->brd_lock);
135 
136 	radix_tree_preload_end();
137 
138 	return page;
139 }
140 
141 /*
142  * Free all backing store pages and radix tree. This must only be called when
143  * there are no other users of the device.
144  */
145 #define FREE_BATCH 16
146 static void brd_free_pages(struct brd_device *brd)
147 {
148 	unsigned long pos = 0;
149 	struct page *pages[FREE_BATCH];
150 	int nr_pages;
151 
152 	do {
153 		int i;
154 
155 		nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
156 				(void **)pages, pos, FREE_BATCH);
157 
158 		for (i = 0; i < nr_pages; i++) {
159 			void *ret;
160 
161 			BUG_ON(pages[i]->index < pos);
162 			pos = pages[i]->index;
163 			ret = radix_tree_delete(&brd->brd_pages, pos);
164 			BUG_ON(!ret || ret != pages[i]);
165 			__free_page(pages[i]);
166 		}
167 
168 		pos++;
169 
170 		/*
171 		 * This assumes radix_tree_gang_lookup always returns as
172 		 * many pages as possible. If the radix-tree code changes,
173 		 * so will this have to.
174 		 */
175 	} while (nr_pages == FREE_BATCH);
176 }
177 
178 /*
179  * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
180  */
181 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
182 {
183 	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
184 	size_t copy;
185 
186 	copy = min_t(size_t, n, PAGE_SIZE - offset);
187 	if (!brd_insert_page(brd, sector))
188 		return -ENOSPC;
189 	if (copy < n) {
190 		sector += copy >> SECTOR_SHIFT;
191 		if (!brd_insert_page(brd, sector))
192 			return -ENOSPC;
193 	}
194 	return 0;
195 }
196 
197 /*
198  * Copy n bytes from src to the brd starting at sector. Does not sleep.
199  */
200 static void copy_to_brd(struct brd_device *brd, const void *src,
201 			sector_t sector, size_t n)
202 {
203 	struct page *page;
204 	void *dst;
205 	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
206 	size_t copy;
207 
208 	copy = min_t(size_t, n, PAGE_SIZE - offset);
209 	page = brd_lookup_page(brd, sector);
210 	BUG_ON(!page);
211 
212 	dst = kmap_atomic(page);
213 	memcpy(dst + offset, src, copy);
214 	kunmap_atomic(dst);
215 
216 	if (copy < n) {
217 		src += copy;
218 		sector += copy >> SECTOR_SHIFT;
219 		copy = n - copy;
220 		page = brd_lookup_page(brd, sector);
221 		BUG_ON(!page);
222 
223 		dst = kmap_atomic(page);
224 		memcpy(dst, src, copy);
225 		kunmap_atomic(dst);
226 	}
227 }
228 
229 /*
230  * Copy n bytes to dst from the brd starting at sector. Does not sleep.
231  */
232 static void copy_from_brd(void *dst, struct brd_device *brd,
233 			sector_t sector, size_t n)
234 {
235 	struct page *page;
236 	void *src;
237 	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
238 	size_t copy;
239 
240 	copy = min_t(size_t, n, PAGE_SIZE - offset);
241 	page = brd_lookup_page(brd, sector);
242 	if (page) {
243 		src = kmap_atomic(page);
244 		memcpy(dst, src + offset, copy);
245 		kunmap_atomic(src);
246 	} else
247 		memset(dst, 0, copy);
248 
249 	if (copy < n) {
250 		dst += copy;
251 		sector += copy >> SECTOR_SHIFT;
252 		copy = n - copy;
253 		page = brd_lookup_page(brd, sector);
254 		if (page) {
255 			src = kmap_atomic(page);
256 			memcpy(dst, src, copy);
257 			kunmap_atomic(src);
258 		} else
259 			memset(dst, 0, copy);
260 	}
261 }
262 
263 /*
264  * Process a single bvec of a bio.
265  */
266 static int brd_do_bvec(struct brd_device *brd, struct page *page,
267 			unsigned int len, unsigned int off, bool is_write,
268 			sector_t sector)
269 {
270 	void *mem;
271 	int err = 0;
272 
273 	if (is_write) {
274 		err = copy_to_brd_setup(brd, sector, len);
275 		if (err)
276 			goto out;
277 	}
278 
279 	mem = kmap_atomic(page);
280 	if (!is_write) {
281 		copy_from_brd(mem + off, brd, sector, len);
282 		flush_dcache_page(page);
283 	} else {
284 		flush_dcache_page(page);
285 		copy_to_brd(brd, mem + off, sector, len);
286 	}
287 	kunmap_atomic(mem);
288 
289 out:
290 	return err;
291 }
292 
293 static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
294 {
295 	struct block_device *bdev = bio->bi_bdev;
296 	struct brd_device *brd = bdev->bd_disk->private_data;
297 	struct bio_vec bvec;
298 	sector_t sector;
299 	struct bvec_iter iter;
300 
301 	sector = bio->bi_iter.bi_sector;
302 	if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
303 		goto io_error;
304 
305 	bio_for_each_segment(bvec, bio, iter) {
306 		unsigned int len = bvec.bv_len;
307 		int err;
308 
309 		err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
310 					op_is_write(bio_op(bio)), sector);
311 		if (err)
312 			goto io_error;
313 		sector += len >> SECTOR_SHIFT;
314 	}
315 
316 	bio_endio(bio);
317 	return BLK_QC_T_NONE;
318 io_error:
319 	bio_io_error(bio);
320 	return BLK_QC_T_NONE;
321 }
322 
323 static int brd_rw_page(struct block_device *bdev, sector_t sector,
324 		       struct page *page, bool is_write)
325 {
326 	struct brd_device *brd = bdev->bd_disk->private_data;
327 	int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
328 	page_endio(page, is_write, err);
329 	return err;
330 }
331 
332 #ifdef CONFIG_BLK_DEV_RAM_DAX
333 static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff,
334 		long nr_pages, void **kaddr, pfn_t *pfn)
335 {
336 	struct page *page;
337 
338 	if (!brd)
339 		return -ENODEV;
340 	page = brd_insert_page(brd, PFN_PHYS(pgoff) / 512);
341 	if (!page)
342 		return -ENOSPC;
343 	*kaddr = page_address(page);
344 	*pfn = page_to_pfn_t(page);
345 
346 	return 1;
347 }
348 
349 static long brd_dax_direct_access(struct dax_device *dax_dev,
350 		pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
351 {
352 	struct brd_device *brd = dax_get_private(dax_dev);
353 
354 	return __brd_direct_access(brd, pgoff, nr_pages, kaddr, pfn);
355 }
356 
357 static const struct dax_operations brd_dax_ops = {
358 	.direct_access = brd_dax_direct_access,
359 };
360 #endif
361 
362 static const struct block_device_operations brd_fops = {
363 	.owner =		THIS_MODULE,
364 	.rw_page =		brd_rw_page,
365 };
366 
367 /*
368  * And now the modules code and kernel interface.
369  */
370 static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
371 module_param(rd_nr, int, S_IRUGO);
372 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
373 
374 unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE;
375 module_param(rd_size, ulong, S_IRUGO);
376 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
377 
378 static int max_part = 1;
379 module_param(max_part, int, S_IRUGO);
380 MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
381 
382 MODULE_LICENSE("GPL");
383 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
384 MODULE_ALIAS("rd");
385 
386 #ifndef MODULE
387 /* Legacy boot options - nonmodular */
388 static int __init ramdisk_size(char *str)
389 {
390 	rd_size = simple_strtol(str, NULL, 0);
391 	return 1;
392 }
393 __setup("ramdisk_size=", ramdisk_size);
394 #endif
395 
396 /*
397  * The device scheme is derived from loop.c. Keep them in synch where possible
398  * (should share code eventually).
399  */
400 static LIST_HEAD(brd_devices);
401 static DEFINE_MUTEX(brd_devices_mutex);
402 
403 static struct brd_device *brd_alloc(int i)
404 {
405 	struct brd_device *brd;
406 	struct gendisk *disk;
407 
408 	brd = kzalloc(sizeof(*brd), GFP_KERNEL);
409 	if (!brd)
410 		goto out;
411 	brd->brd_number		= i;
412 	spin_lock_init(&brd->brd_lock);
413 	INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
414 
415 	brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
416 	if (!brd->brd_queue)
417 		goto out_free_dev;
418 
419 	blk_queue_make_request(brd->brd_queue, brd_make_request);
420 	blk_queue_max_hw_sectors(brd->brd_queue, 1024);
421 	blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
422 
423 	/* This is so fdisk will align partitions on 4k, because of
424 	 * direct_access API needing 4k alignment, returning a PFN
425 	 * (This is only a problem on very small devices <= 4M,
426 	 *  otherwise fdisk will align on 1M. Regardless this call
427 	 *  is harmless)
428 	 */
429 	blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
430 	disk = brd->brd_disk = alloc_disk(max_part);
431 	if (!disk)
432 		goto out_free_queue;
433 	disk->major		= RAMDISK_MAJOR;
434 	disk->first_minor	= i * max_part;
435 	disk->fops		= &brd_fops;
436 	disk->private_data	= brd;
437 	disk->queue		= brd->brd_queue;
438 	disk->flags		= GENHD_FL_EXT_DEVT;
439 	sprintf(disk->disk_name, "ram%d", i);
440 	set_capacity(disk, rd_size * 2);
441 
442 #ifdef CONFIG_BLK_DEV_RAM_DAX
443 	queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
444 	brd->dax_dev = alloc_dax(brd, disk->disk_name, &brd_dax_ops);
445 	if (!brd->dax_dev)
446 		goto out_free_inode;
447 #endif
448 
449 
450 	return brd;
451 
452 #ifdef CONFIG_BLK_DEV_RAM_DAX
453 out_free_inode:
454 	kill_dax(brd->dax_dev);
455 	put_dax(brd->dax_dev);
456 #endif
457 out_free_queue:
458 	blk_cleanup_queue(brd->brd_queue);
459 out_free_dev:
460 	kfree(brd);
461 out:
462 	return NULL;
463 }
464 
465 static void brd_free(struct brd_device *brd)
466 {
467 	put_disk(brd->brd_disk);
468 	blk_cleanup_queue(brd->brd_queue);
469 	brd_free_pages(brd);
470 	kfree(brd);
471 }
472 
473 static struct brd_device *brd_init_one(int i, bool *new)
474 {
475 	struct brd_device *brd;
476 
477 	*new = false;
478 	list_for_each_entry(brd, &brd_devices, brd_list) {
479 		if (brd->brd_number == i)
480 			goto out;
481 	}
482 
483 	brd = brd_alloc(i);
484 	if (brd) {
485 		add_disk(brd->brd_disk);
486 		list_add_tail(&brd->brd_list, &brd_devices);
487 	}
488 	*new = true;
489 out:
490 	return brd;
491 }
492 
493 static void brd_del_one(struct brd_device *brd)
494 {
495 	list_del(&brd->brd_list);
496 #ifdef CONFIG_BLK_DEV_RAM_DAX
497 	kill_dax(brd->dax_dev);
498 	put_dax(brd->dax_dev);
499 #endif
500 	del_gendisk(brd->brd_disk);
501 	brd_free(brd);
502 }
503 
504 static struct kobject *brd_probe(dev_t dev, int *part, void *data)
505 {
506 	struct brd_device *brd;
507 	struct kobject *kobj;
508 	bool new;
509 
510 	mutex_lock(&brd_devices_mutex);
511 	brd = brd_init_one(MINOR(dev) / max_part, &new);
512 	kobj = brd ? get_disk(brd->brd_disk) : NULL;
513 	mutex_unlock(&brd_devices_mutex);
514 
515 	if (new)
516 		*part = 0;
517 
518 	return kobj;
519 }
520 
521 static int __init brd_init(void)
522 {
523 	struct brd_device *brd, *next;
524 	int i;
525 
526 	/*
527 	 * brd module now has a feature to instantiate underlying device
528 	 * structure on-demand, provided that there is an access dev node.
529 	 *
530 	 * (1) if rd_nr is specified, create that many upfront. else
531 	 *     it defaults to CONFIG_BLK_DEV_RAM_COUNT
532 	 * (2) User can further extend brd devices by create dev node themselves
533 	 *     and have kernel automatically instantiate actual device
534 	 *     on-demand. Example:
535 	 *		mknod /path/devnod_name b 1 X	# 1 is the rd major
536 	 *		fdisk -l /path/devnod_name
537 	 *	If (X / max_part) was not already created it will be created
538 	 *	dynamically.
539 	 */
540 
541 	if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
542 		return -EIO;
543 
544 	if (unlikely(!max_part))
545 		max_part = 1;
546 
547 	for (i = 0; i < rd_nr; i++) {
548 		brd = brd_alloc(i);
549 		if (!brd)
550 			goto out_free;
551 		list_add_tail(&brd->brd_list, &brd_devices);
552 	}
553 
554 	/* point of no return */
555 
556 	list_for_each_entry(brd, &brd_devices, brd_list)
557 		add_disk(brd->brd_disk);
558 
559 	blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS,
560 				  THIS_MODULE, brd_probe, NULL, NULL);
561 
562 	pr_info("brd: module loaded\n");
563 	return 0;
564 
565 out_free:
566 	list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
567 		list_del(&brd->brd_list);
568 		brd_free(brd);
569 	}
570 	unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
571 
572 	pr_info("brd: module NOT loaded !!!\n");
573 	return -ENOMEM;
574 }
575 
576 static void __exit brd_exit(void)
577 {
578 	struct brd_device *brd, *next;
579 
580 	list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
581 		brd_del_one(brd);
582 
583 	blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS);
584 	unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
585 
586 	pr_info("brd: module unloaded\n");
587 }
588 
589 module_init(brd_init);
590 module_exit(brd_exit);
591 
592