1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Ram backed block device driver. 4 * 5 * Copyright (C) 2007 Nick Piggin 6 * Copyright (C) 2007 Novell Inc. 7 * 8 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright 9 * of their respective owners. 10 */ 11 12 #include <linux/init.h> 13 #include <linux/initrd.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/major.h> 17 #include <linux/blkdev.h> 18 #include <linux/bio.h> 19 #include <linux/highmem.h> 20 #include <linux/mutex.h> 21 #include <linux/radix-tree.h> 22 #include <linux/fs.h> 23 #include <linux/slab.h> 24 #include <linux/backing-dev.h> 25 26 #include <linux/uaccess.h> 27 28 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) 29 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) 30 31 /* 32 * Each block ramdisk device has a radix_tree brd_pages of pages that stores 33 * the pages containing the block device's contents. A brd page's ->index is 34 * its offset in PAGE_SIZE units. This is similar to, but in no way connected 35 * with, the kernel's pagecache or buffer cache (which sit above our block 36 * device). 37 */ 38 struct brd_device { 39 int brd_number; 40 41 struct request_queue *brd_queue; 42 struct gendisk *brd_disk; 43 struct list_head brd_list; 44 45 /* 46 * Backing store of pages and lock to protect it. This is the contents 47 * of the block device. 48 */ 49 spinlock_t brd_lock; 50 struct radix_tree_root brd_pages; 51 }; 52 53 /* 54 * Look up and return a brd's page for a given sector. 55 */ 56 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) 57 { 58 pgoff_t idx; 59 struct page *page; 60 61 /* 62 * The page lifetime is protected by the fact that we have opened the 63 * device node -- brd pages will never be deleted under us, so we 64 * don't need any further locking or refcounting. 65 * 66 * This is strictly true for the radix-tree nodes as well (ie. we 67 * don't actually need the rcu_read_lock()), however that is not a 68 * documented feature of the radix-tree API so it is better to be 69 * safe here (we don't have total exclusion from radix tree updates 70 * here, only deletes). 71 */ 72 rcu_read_lock(); 73 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */ 74 page = radix_tree_lookup(&brd->brd_pages, idx); 75 rcu_read_unlock(); 76 77 BUG_ON(page && page->index != idx); 78 79 return page; 80 } 81 82 /* 83 * Look up and return a brd's page for a given sector. 84 * If one does not exist, allocate an empty page, and insert that. Then 85 * return it. 86 */ 87 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) 88 { 89 pgoff_t idx; 90 struct page *page; 91 gfp_t gfp_flags; 92 93 page = brd_lookup_page(brd, sector); 94 if (page) 95 return page; 96 97 /* 98 * Must use NOIO because we don't want to recurse back into the 99 * block or filesystem layers from page reclaim. 100 */ 101 gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM; 102 page = alloc_page(gfp_flags); 103 if (!page) 104 return NULL; 105 106 if (radix_tree_preload(GFP_NOIO)) { 107 __free_page(page); 108 return NULL; 109 } 110 111 spin_lock(&brd->brd_lock); 112 idx = sector >> PAGE_SECTORS_SHIFT; 113 page->index = idx; 114 if (radix_tree_insert(&brd->brd_pages, idx, page)) { 115 __free_page(page); 116 page = radix_tree_lookup(&brd->brd_pages, idx); 117 BUG_ON(!page); 118 BUG_ON(page->index != idx); 119 } 120 spin_unlock(&brd->brd_lock); 121 122 radix_tree_preload_end(); 123 124 return page; 125 } 126 127 /* 128 * Free all backing store pages and radix tree. This must only be called when 129 * there are no other users of the device. 130 */ 131 #define FREE_BATCH 16 132 static void brd_free_pages(struct brd_device *brd) 133 { 134 unsigned long pos = 0; 135 struct page *pages[FREE_BATCH]; 136 int nr_pages; 137 138 do { 139 int i; 140 141 nr_pages = radix_tree_gang_lookup(&brd->brd_pages, 142 (void **)pages, pos, FREE_BATCH); 143 144 for (i = 0; i < nr_pages; i++) { 145 void *ret; 146 147 BUG_ON(pages[i]->index < pos); 148 pos = pages[i]->index; 149 ret = radix_tree_delete(&brd->brd_pages, pos); 150 BUG_ON(!ret || ret != pages[i]); 151 __free_page(pages[i]); 152 } 153 154 pos++; 155 156 /* 157 * It takes 3.4 seconds to remove 80GiB ramdisk. 158 * So, we need cond_resched to avoid stalling the CPU. 159 */ 160 cond_resched(); 161 162 /* 163 * This assumes radix_tree_gang_lookup always returns as 164 * many pages as possible. If the radix-tree code changes, 165 * so will this have to. 166 */ 167 } while (nr_pages == FREE_BATCH); 168 } 169 170 /* 171 * copy_to_brd_setup must be called before copy_to_brd. It may sleep. 172 */ 173 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) 174 { 175 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; 176 size_t copy; 177 178 copy = min_t(size_t, n, PAGE_SIZE - offset); 179 if (!brd_insert_page(brd, sector)) 180 return -ENOSPC; 181 if (copy < n) { 182 sector += copy >> SECTOR_SHIFT; 183 if (!brd_insert_page(brd, sector)) 184 return -ENOSPC; 185 } 186 return 0; 187 } 188 189 /* 190 * Copy n bytes from src to the brd starting at sector. Does not sleep. 191 */ 192 static void copy_to_brd(struct brd_device *brd, const void *src, 193 sector_t sector, size_t n) 194 { 195 struct page *page; 196 void *dst; 197 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; 198 size_t copy; 199 200 copy = min_t(size_t, n, PAGE_SIZE - offset); 201 page = brd_lookup_page(brd, sector); 202 BUG_ON(!page); 203 204 dst = kmap_atomic(page); 205 memcpy(dst + offset, src, copy); 206 kunmap_atomic(dst); 207 208 if (copy < n) { 209 src += copy; 210 sector += copy >> SECTOR_SHIFT; 211 copy = n - copy; 212 page = brd_lookup_page(brd, sector); 213 BUG_ON(!page); 214 215 dst = kmap_atomic(page); 216 memcpy(dst, src, copy); 217 kunmap_atomic(dst); 218 } 219 } 220 221 /* 222 * Copy n bytes to dst from the brd starting at sector. Does not sleep. 223 */ 224 static void copy_from_brd(void *dst, struct brd_device *brd, 225 sector_t sector, size_t n) 226 { 227 struct page *page; 228 void *src; 229 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; 230 size_t copy; 231 232 copy = min_t(size_t, n, PAGE_SIZE - offset); 233 page = brd_lookup_page(brd, sector); 234 if (page) { 235 src = kmap_atomic(page); 236 memcpy(dst, src + offset, copy); 237 kunmap_atomic(src); 238 } else 239 memset(dst, 0, copy); 240 241 if (copy < n) { 242 dst += copy; 243 sector += copy >> SECTOR_SHIFT; 244 copy = n - copy; 245 page = brd_lookup_page(brd, sector); 246 if (page) { 247 src = kmap_atomic(page); 248 memcpy(dst, src, copy); 249 kunmap_atomic(src); 250 } else 251 memset(dst, 0, copy); 252 } 253 } 254 255 /* 256 * Process a single bvec of a bio. 257 */ 258 static int brd_do_bvec(struct brd_device *brd, struct page *page, 259 unsigned int len, unsigned int off, unsigned int op, 260 sector_t sector) 261 { 262 void *mem; 263 int err = 0; 264 265 if (op_is_write(op)) { 266 err = copy_to_brd_setup(brd, sector, len); 267 if (err) 268 goto out; 269 } 270 271 mem = kmap_atomic(page); 272 if (!op_is_write(op)) { 273 copy_from_brd(mem + off, brd, sector, len); 274 flush_dcache_page(page); 275 } else { 276 flush_dcache_page(page); 277 copy_to_brd(brd, mem + off, sector, len); 278 } 279 kunmap_atomic(mem); 280 281 out: 282 return err; 283 } 284 285 static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) 286 { 287 struct brd_device *brd = bio->bi_disk->private_data; 288 struct bio_vec bvec; 289 sector_t sector; 290 struct bvec_iter iter; 291 292 sector = bio->bi_iter.bi_sector; 293 if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) 294 goto io_error; 295 296 bio_for_each_segment(bvec, bio, iter) { 297 unsigned int len = bvec.bv_len; 298 int err; 299 300 /* Don't support un-aligned buffer */ 301 WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) || 302 (len & (SECTOR_SIZE - 1))); 303 304 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, 305 bio_op(bio), sector); 306 if (err) 307 goto io_error; 308 sector += len >> SECTOR_SHIFT; 309 } 310 311 bio_endio(bio); 312 return BLK_QC_T_NONE; 313 io_error: 314 bio_io_error(bio); 315 return BLK_QC_T_NONE; 316 } 317 318 static int brd_rw_page(struct block_device *bdev, sector_t sector, 319 struct page *page, unsigned int op) 320 { 321 struct brd_device *brd = bdev->bd_disk->private_data; 322 int err; 323 324 if (PageTransHuge(page)) 325 return -ENOTSUPP; 326 err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector); 327 page_endio(page, op_is_write(op), err); 328 return err; 329 } 330 331 static const struct block_device_operations brd_fops = { 332 .owner = THIS_MODULE, 333 .rw_page = brd_rw_page, 334 }; 335 336 /* 337 * And now the modules code and kernel interface. 338 */ 339 static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT; 340 module_param(rd_nr, int, 0444); 341 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); 342 343 unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE; 344 module_param(rd_size, ulong, 0444); 345 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); 346 347 static int max_part = 1; 348 module_param(max_part, int, 0444); 349 MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices"); 350 351 MODULE_LICENSE("GPL"); 352 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); 353 MODULE_ALIAS("rd"); 354 355 #ifndef MODULE 356 /* Legacy boot options - nonmodular */ 357 static int __init ramdisk_size(char *str) 358 { 359 rd_size = simple_strtol(str, NULL, 0); 360 return 1; 361 } 362 __setup("ramdisk_size=", ramdisk_size); 363 #endif 364 365 /* 366 * The device scheme is derived from loop.c. Keep them in synch where possible 367 * (should share code eventually). 368 */ 369 static LIST_HEAD(brd_devices); 370 static DEFINE_MUTEX(brd_devices_mutex); 371 372 static struct brd_device *brd_alloc(int i) 373 { 374 struct brd_device *brd; 375 struct gendisk *disk; 376 377 brd = kzalloc(sizeof(*brd), GFP_KERNEL); 378 if (!brd) 379 goto out; 380 brd->brd_number = i; 381 spin_lock_init(&brd->brd_lock); 382 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); 383 384 brd->brd_queue = blk_alloc_queue(GFP_KERNEL); 385 if (!brd->brd_queue) 386 goto out_free_dev; 387 388 blk_queue_make_request(brd->brd_queue, brd_make_request); 389 390 /* This is so fdisk will align partitions on 4k, because of 391 * direct_access API needing 4k alignment, returning a PFN 392 * (This is only a problem on very small devices <= 4M, 393 * otherwise fdisk will align on 1M. Regardless this call 394 * is harmless) 395 */ 396 blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE); 397 disk = brd->brd_disk = alloc_disk(max_part); 398 if (!disk) 399 goto out_free_queue; 400 disk->major = RAMDISK_MAJOR; 401 disk->first_minor = i * max_part; 402 disk->fops = &brd_fops; 403 disk->private_data = brd; 404 disk->flags = GENHD_FL_EXT_DEVT; 405 sprintf(disk->disk_name, "ram%d", i); 406 set_capacity(disk, rd_size * 2); 407 brd->brd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO; 408 409 /* Tell the block layer that this is not a rotational device */ 410 blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue); 411 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, brd->brd_queue); 412 413 return brd; 414 415 out_free_queue: 416 blk_cleanup_queue(brd->brd_queue); 417 out_free_dev: 418 kfree(brd); 419 out: 420 return NULL; 421 } 422 423 static void brd_free(struct brd_device *brd) 424 { 425 put_disk(brd->brd_disk); 426 blk_cleanup_queue(brd->brd_queue); 427 brd_free_pages(brd); 428 kfree(brd); 429 } 430 431 static struct brd_device *brd_init_one(int i, bool *new) 432 { 433 struct brd_device *brd; 434 435 *new = false; 436 list_for_each_entry(brd, &brd_devices, brd_list) { 437 if (brd->brd_number == i) 438 goto out; 439 } 440 441 brd = brd_alloc(i); 442 if (brd) { 443 brd->brd_disk->queue = brd->brd_queue; 444 add_disk(brd->brd_disk); 445 list_add_tail(&brd->brd_list, &brd_devices); 446 } 447 *new = true; 448 out: 449 return brd; 450 } 451 452 static void brd_del_one(struct brd_device *brd) 453 { 454 list_del(&brd->brd_list); 455 del_gendisk(brd->brd_disk); 456 brd_free(brd); 457 } 458 459 static struct kobject *brd_probe(dev_t dev, int *part, void *data) 460 { 461 struct brd_device *brd; 462 struct kobject *kobj; 463 bool new; 464 465 mutex_lock(&brd_devices_mutex); 466 brd = brd_init_one(MINOR(dev) / max_part, &new); 467 kobj = brd ? get_disk_and_module(brd->brd_disk) : NULL; 468 mutex_unlock(&brd_devices_mutex); 469 470 if (new) 471 *part = 0; 472 473 return kobj; 474 } 475 476 static inline void brd_check_and_reset_par(void) 477 { 478 if (unlikely(!max_part)) 479 max_part = 1; 480 481 /* 482 * make sure 'max_part' can be divided exactly by (1U << MINORBITS), 483 * otherwise, it is possiable to get same dev_t when adding partitions. 484 */ 485 if ((1U << MINORBITS) % max_part != 0) 486 max_part = 1UL << fls(max_part); 487 488 if (max_part > DISK_MAX_PARTS) { 489 pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n", 490 DISK_MAX_PARTS, DISK_MAX_PARTS); 491 max_part = DISK_MAX_PARTS; 492 } 493 } 494 495 static int __init brd_init(void) 496 { 497 struct brd_device *brd, *next; 498 int i; 499 500 /* 501 * brd module now has a feature to instantiate underlying device 502 * structure on-demand, provided that there is an access dev node. 503 * 504 * (1) if rd_nr is specified, create that many upfront. else 505 * it defaults to CONFIG_BLK_DEV_RAM_COUNT 506 * (2) User can further extend brd devices by create dev node themselves 507 * and have kernel automatically instantiate actual device 508 * on-demand. Example: 509 * mknod /path/devnod_name b 1 X # 1 is the rd major 510 * fdisk -l /path/devnod_name 511 * If (X / max_part) was not already created it will be created 512 * dynamically. 513 */ 514 515 if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) 516 return -EIO; 517 518 brd_check_and_reset_par(); 519 520 for (i = 0; i < rd_nr; i++) { 521 brd = brd_alloc(i); 522 if (!brd) 523 goto out_free; 524 list_add_tail(&brd->brd_list, &brd_devices); 525 } 526 527 /* point of no return */ 528 529 list_for_each_entry(brd, &brd_devices, brd_list) { 530 /* 531 * associate with queue just before adding disk for 532 * avoiding to mess up failure path 533 */ 534 brd->brd_disk->queue = brd->brd_queue; 535 add_disk(brd->brd_disk); 536 } 537 538 blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS, 539 THIS_MODULE, brd_probe, NULL, NULL); 540 541 pr_info("brd: module loaded\n"); 542 return 0; 543 544 out_free: 545 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) { 546 list_del(&brd->brd_list); 547 brd_free(brd); 548 } 549 unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); 550 551 pr_info("brd: module NOT loaded !!!\n"); 552 return -ENOMEM; 553 } 554 555 static void __exit brd_exit(void) 556 { 557 struct brd_device *brd, *next; 558 559 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) 560 brd_del_one(brd); 561 562 blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS); 563 unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); 564 565 pr_info("brd: module unloaded\n"); 566 } 567 568 module_init(brd_init); 569 module_exit(brd_exit); 570 571