1 /* 2 * Compressed RAM block device 3 * 4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta 5 * 2012, 2013 Minchan Kim 6 * 7 * This code is released using a dual license strategy: BSD/GPL 8 * You can choose the licence that better fits your requirements. 9 * 10 * Released under the terms of 3-clause BSD License 11 * Released under the terms of GNU General Public License Version 2.0 12 * 13 */ 14 15 #define KMSG_COMPONENT "zram" 16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 17 18 #include <linux/module.h> 19 #include <linux/kernel.h> 20 #include <linux/bio.h> 21 #include <linux/bitops.h> 22 #include <linux/blkdev.h> 23 #include <linux/buffer_head.h> 24 #include <linux/device.h> 25 #include <linux/genhd.h> 26 #include <linux/highmem.h> 27 #include <linux/slab.h> 28 #include <linux/backing-dev.h> 29 #include <linux/string.h> 30 #include <linux/vmalloc.h> 31 #include <linux/err.h> 32 #include <linux/idr.h> 33 #include <linux/sysfs.h> 34 #include <linux/cpuhotplug.h> 35 36 #include "zram_drv.h" 37 38 static DEFINE_IDR(zram_index_idr); 39 /* idr index must be protected */ 40 static DEFINE_MUTEX(zram_index_mutex); 41 42 static int zram_major; 43 static const char *default_compressor = "lzo"; 44 45 /* Module params (documentation at end) */ 46 static unsigned int num_devices = 1; 47 48 static void zram_free_page(struct zram *zram, size_t index); 49 50 static inline bool init_done(struct zram *zram) 51 { 52 return zram->disksize; 53 } 54 55 static inline struct zram *dev_to_zram(struct device *dev) 56 { 57 return (struct zram *)dev_to_disk(dev)->private_data; 58 } 59 60 static unsigned long zram_get_handle(struct zram *zram, u32 index) 61 { 62 return zram->table[index].handle; 63 } 64 65 static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle) 66 { 67 zram->table[index].handle = handle; 68 } 69 70 /* flag operations require table entry bit_spin_lock() being held */ 71 static int zram_test_flag(struct zram *zram, u32 index, 72 enum zram_pageflags flag) 73 { 74 return zram->table[index].value & BIT(flag); 75 } 76 77 static void zram_set_flag(struct zram *zram, u32 index, 78 enum zram_pageflags flag) 79 { 80 zram->table[index].value |= BIT(flag); 81 } 82 83 static void zram_clear_flag(struct zram *zram, u32 index, 84 enum zram_pageflags flag) 85 { 86 zram->table[index].value &= ~BIT(flag); 87 } 88 89 static inline void zram_set_element(struct zram *zram, u32 index, 90 unsigned long element) 91 { 92 zram->table[index].element = element; 93 } 94 95 static unsigned long zram_get_element(struct zram *zram, u32 index) 96 { 97 return zram->table[index].element; 98 } 99 100 static size_t zram_get_obj_size(struct zram *zram, u32 index) 101 { 102 return zram->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); 103 } 104 105 static void zram_set_obj_size(struct zram *zram, 106 u32 index, size_t size) 107 { 108 unsigned long flags = zram->table[index].value >> ZRAM_FLAG_SHIFT; 109 110 zram->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; 111 } 112 113 #if PAGE_SIZE != 4096 114 static inline bool is_partial_io(struct bio_vec *bvec) 115 { 116 return bvec->bv_len != PAGE_SIZE; 117 } 118 #else 119 static inline bool is_partial_io(struct bio_vec *bvec) 120 { 121 return false; 122 } 123 #endif 124 125 static void zram_revalidate_disk(struct zram *zram) 126 { 127 revalidate_disk(zram->disk); 128 /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */ 129 zram->disk->queue->backing_dev_info->capabilities |= 130 BDI_CAP_STABLE_WRITES; 131 } 132 133 /* 134 * Check if request is within bounds and aligned on zram logical blocks. 135 */ 136 static inline bool valid_io_request(struct zram *zram, 137 sector_t start, unsigned int size) 138 { 139 u64 end, bound; 140 141 /* unaligned request */ 142 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) 143 return false; 144 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) 145 return false; 146 147 end = start + (size >> SECTOR_SHIFT); 148 bound = zram->disksize >> SECTOR_SHIFT; 149 /* out of range range */ 150 if (unlikely(start >= bound || end > bound || start > end)) 151 return false; 152 153 /* I/O request is valid */ 154 return true; 155 } 156 157 static void update_position(u32 *index, int *offset, struct bio_vec *bvec) 158 { 159 *index += (*offset + bvec->bv_len) / PAGE_SIZE; 160 *offset = (*offset + bvec->bv_len) % PAGE_SIZE; 161 } 162 163 static inline void update_used_max(struct zram *zram, 164 const unsigned long pages) 165 { 166 unsigned long old_max, cur_max; 167 168 old_max = atomic_long_read(&zram->stats.max_used_pages); 169 170 do { 171 cur_max = old_max; 172 if (pages > cur_max) 173 old_max = atomic_long_cmpxchg( 174 &zram->stats.max_used_pages, cur_max, pages); 175 } while (old_max != cur_max); 176 } 177 178 static inline void zram_fill_page(char *ptr, unsigned long len, 179 unsigned long value) 180 { 181 int i; 182 unsigned long *page = (unsigned long *)ptr; 183 184 WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long))); 185 186 if (likely(value == 0)) { 187 memset(ptr, 0, len); 188 } else { 189 for (i = 0; i < len / sizeof(*page); i++) 190 page[i] = value; 191 } 192 } 193 194 static bool page_same_filled(void *ptr, unsigned long *element) 195 { 196 unsigned int pos; 197 unsigned long *page; 198 unsigned long val; 199 200 page = (unsigned long *)ptr; 201 val = page[0]; 202 203 for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) { 204 if (val != page[pos]) 205 return false; 206 } 207 208 *element = val; 209 210 return true; 211 } 212 213 static ssize_t initstate_show(struct device *dev, 214 struct device_attribute *attr, char *buf) 215 { 216 u32 val; 217 struct zram *zram = dev_to_zram(dev); 218 219 down_read(&zram->init_lock); 220 val = init_done(zram); 221 up_read(&zram->init_lock); 222 223 return scnprintf(buf, PAGE_SIZE, "%u\n", val); 224 } 225 226 static ssize_t disksize_show(struct device *dev, 227 struct device_attribute *attr, char *buf) 228 { 229 struct zram *zram = dev_to_zram(dev); 230 231 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize); 232 } 233 234 static ssize_t mem_limit_store(struct device *dev, 235 struct device_attribute *attr, const char *buf, size_t len) 236 { 237 u64 limit; 238 char *tmp; 239 struct zram *zram = dev_to_zram(dev); 240 241 limit = memparse(buf, &tmp); 242 if (buf == tmp) /* no chars parsed, invalid input */ 243 return -EINVAL; 244 245 down_write(&zram->init_lock); 246 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; 247 up_write(&zram->init_lock); 248 249 return len; 250 } 251 252 static ssize_t mem_used_max_store(struct device *dev, 253 struct device_attribute *attr, const char *buf, size_t len) 254 { 255 int err; 256 unsigned long val; 257 struct zram *zram = dev_to_zram(dev); 258 259 err = kstrtoul(buf, 10, &val); 260 if (err || val != 0) 261 return -EINVAL; 262 263 down_read(&zram->init_lock); 264 if (init_done(zram)) { 265 atomic_long_set(&zram->stats.max_used_pages, 266 zs_get_total_pages(zram->mem_pool)); 267 } 268 up_read(&zram->init_lock); 269 270 return len; 271 } 272 273 /* 274 * We switched to per-cpu streams and this attr is not needed anymore. 275 * However, we will keep it around for some time, because: 276 * a) we may revert per-cpu streams in the future 277 * b) it's visible to user space and we need to follow our 2 years 278 * retirement rule; but we already have a number of 'soon to be 279 * altered' attrs, so max_comp_streams need to wait for the next 280 * layoff cycle. 281 */ 282 static ssize_t max_comp_streams_show(struct device *dev, 283 struct device_attribute *attr, char *buf) 284 { 285 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus()); 286 } 287 288 static ssize_t max_comp_streams_store(struct device *dev, 289 struct device_attribute *attr, const char *buf, size_t len) 290 { 291 return len; 292 } 293 294 static ssize_t comp_algorithm_show(struct device *dev, 295 struct device_attribute *attr, char *buf) 296 { 297 size_t sz; 298 struct zram *zram = dev_to_zram(dev); 299 300 down_read(&zram->init_lock); 301 sz = zcomp_available_show(zram->compressor, buf); 302 up_read(&zram->init_lock); 303 304 return sz; 305 } 306 307 static ssize_t comp_algorithm_store(struct device *dev, 308 struct device_attribute *attr, const char *buf, size_t len) 309 { 310 struct zram *zram = dev_to_zram(dev); 311 char compressor[CRYPTO_MAX_ALG_NAME]; 312 size_t sz; 313 314 strlcpy(compressor, buf, sizeof(compressor)); 315 /* ignore trailing newline */ 316 sz = strlen(compressor); 317 if (sz > 0 && compressor[sz - 1] == '\n') 318 compressor[sz - 1] = 0x00; 319 320 if (!zcomp_available_algorithm(compressor)) 321 return -EINVAL; 322 323 down_write(&zram->init_lock); 324 if (init_done(zram)) { 325 up_write(&zram->init_lock); 326 pr_info("Can't change algorithm for initialized device\n"); 327 return -EBUSY; 328 } 329 330 strlcpy(zram->compressor, compressor, sizeof(compressor)); 331 up_write(&zram->init_lock); 332 return len; 333 } 334 335 static ssize_t compact_store(struct device *dev, 336 struct device_attribute *attr, const char *buf, size_t len) 337 { 338 struct zram *zram = dev_to_zram(dev); 339 340 down_read(&zram->init_lock); 341 if (!init_done(zram)) { 342 up_read(&zram->init_lock); 343 return -EINVAL; 344 } 345 346 zs_compact(zram->mem_pool); 347 up_read(&zram->init_lock); 348 349 return len; 350 } 351 352 static ssize_t io_stat_show(struct device *dev, 353 struct device_attribute *attr, char *buf) 354 { 355 struct zram *zram = dev_to_zram(dev); 356 ssize_t ret; 357 358 down_read(&zram->init_lock); 359 ret = scnprintf(buf, PAGE_SIZE, 360 "%8llu %8llu %8llu %8llu\n", 361 (u64)atomic64_read(&zram->stats.failed_reads), 362 (u64)atomic64_read(&zram->stats.failed_writes), 363 (u64)atomic64_read(&zram->stats.invalid_io), 364 (u64)atomic64_read(&zram->stats.notify_free)); 365 up_read(&zram->init_lock); 366 367 return ret; 368 } 369 370 static ssize_t mm_stat_show(struct device *dev, 371 struct device_attribute *attr, char *buf) 372 { 373 struct zram *zram = dev_to_zram(dev); 374 struct zs_pool_stats pool_stats; 375 u64 orig_size, mem_used = 0; 376 long max_used; 377 ssize_t ret; 378 379 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats)); 380 381 down_read(&zram->init_lock); 382 if (init_done(zram)) { 383 mem_used = zs_get_total_pages(zram->mem_pool); 384 zs_pool_stats(zram->mem_pool, &pool_stats); 385 } 386 387 orig_size = atomic64_read(&zram->stats.pages_stored); 388 max_used = atomic_long_read(&zram->stats.max_used_pages); 389 390 ret = scnprintf(buf, PAGE_SIZE, 391 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n", 392 orig_size << PAGE_SHIFT, 393 (u64)atomic64_read(&zram->stats.compr_data_size), 394 mem_used << PAGE_SHIFT, 395 zram->limit_pages << PAGE_SHIFT, 396 max_used << PAGE_SHIFT, 397 (u64)atomic64_read(&zram->stats.same_pages), 398 pool_stats.pages_compacted); 399 up_read(&zram->init_lock); 400 401 return ret; 402 } 403 404 static ssize_t debug_stat_show(struct device *dev, 405 struct device_attribute *attr, char *buf) 406 { 407 int version = 1; 408 struct zram *zram = dev_to_zram(dev); 409 ssize_t ret; 410 411 down_read(&zram->init_lock); 412 ret = scnprintf(buf, PAGE_SIZE, 413 "version: %d\n%8llu\n", 414 version, 415 (u64)atomic64_read(&zram->stats.writestall)); 416 up_read(&zram->init_lock); 417 418 return ret; 419 } 420 421 static DEVICE_ATTR_RO(io_stat); 422 static DEVICE_ATTR_RO(mm_stat); 423 static DEVICE_ATTR_RO(debug_stat); 424 425 static void zram_slot_lock(struct zram *zram, u32 index) 426 { 427 bit_spin_lock(ZRAM_ACCESS, &zram->table[index].value); 428 } 429 430 static void zram_slot_unlock(struct zram *zram, u32 index) 431 { 432 bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value); 433 } 434 435 static bool zram_same_page_read(struct zram *zram, u32 index, 436 struct page *page, 437 unsigned int offset, unsigned int len) 438 { 439 zram_slot_lock(zram, index); 440 if (unlikely(!zram_get_handle(zram, index) || 441 zram_test_flag(zram, index, ZRAM_SAME))) { 442 void *mem; 443 444 zram_slot_unlock(zram, index); 445 mem = kmap_atomic(page); 446 zram_fill_page(mem + offset, len, 447 zram_get_element(zram, index)); 448 kunmap_atomic(mem); 449 return true; 450 } 451 zram_slot_unlock(zram, index); 452 453 return false; 454 } 455 456 static bool zram_same_page_write(struct zram *zram, u32 index, 457 struct page *page) 458 { 459 unsigned long element; 460 void *mem = kmap_atomic(page); 461 462 if (page_same_filled(mem, &element)) { 463 kunmap_atomic(mem); 464 /* Free memory associated with this sector now. */ 465 zram_slot_lock(zram, index); 466 zram_free_page(zram, index); 467 zram_set_flag(zram, index, ZRAM_SAME); 468 zram_set_element(zram, index, element); 469 zram_slot_unlock(zram, index); 470 471 atomic64_inc(&zram->stats.same_pages); 472 return true; 473 } 474 kunmap_atomic(mem); 475 476 return false; 477 } 478 479 static void zram_meta_free(struct zram *zram, u64 disksize) 480 { 481 size_t num_pages = disksize >> PAGE_SHIFT; 482 size_t index; 483 484 /* Free all pages that are still in this zram device */ 485 for (index = 0; index < num_pages; index++) 486 zram_free_page(zram, index); 487 488 zs_destroy_pool(zram->mem_pool); 489 vfree(zram->table); 490 } 491 492 static bool zram_meta_alloc(struct zram *zram, u64 disksize) 493 { 494 size_t num_pages; 495 496 num_pages = disksize >> PAGE_SHIFT; 497 zram->table = vzalloc(num_pages * sizeof(*zram->table)); 498 if (!zram->table) 499 return false; 500 501 zram->mem_pool = zs_create_pool(zram->disk->disk_name); 502 if (!zram->mem_pool) { 503 vfree(zram->table); 504 return false; 505 } 506 507 return true; 508 } 509 510 /* 511 * To protect concurrent access to the same index entry, 512 * caller should hold this table index entry's bit_spinlock to 513 * indicate this index entry is accessing. 514 */ 515 static void zram_free_page(struct zram *zram, size_t index) 516 { 517 unsigned long handle = zram_get_handle(zram, index); 518 519 /* 520 * No memory is allocated for same element filled pages. 521 * Simply clear same page flag. 522 */ 523 if (zram_test_flag(zram, index, ZRAM_SAME)) { 524 zram_clear_flag(zram, index, ZRAM_SAME); 525 zram_set_element(zram, index, 0); 526 atomic64_dec(&zram->stats.same_pages); 527 return; 528 } 529 530 if (!handle) 531 return; 532 533 zs_free(zram->mem_pool, handle); 534 535 atomic64_sub(zram_get_obj_size(zram, index), 536 &zram->stats.compr_data_size); 537 atomic64_dec(&zram->stats.pages_stored); 538 539 zram_set_handle(zram, index, 0); 540 zram_set_obj_size(zram, index, 0); 541 } 542 543 static int zram_decompress_page(struct zram *zram, struct page *page, u32 index) 544 { 545 int ret; 546 unsigned long handle; 547 unsigned int size; 548 void *src, *dst; 549 550 if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE)) 551 return 0; 552 553 zram_slot_lock(zram, index); 554 handle = zram_get_handle(zram, index); 555 size = zram_get_obj_size(zram, index); 556 557 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); 558 if (size == PAGE_SIZE) { 559 dst = kmap_atomic(page); 560 memcpy(dst, src, PAGE_SIZE); 561 kunmap_atomic(dst); 562 ret = 0; 563 } else { 564 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); 565 566 dst = kmap_atomic(page); 567 ret = zcomp_decompress(zstrm, src, size, dst); 568 kunmap_atomic(dst); 569 zcomp_stream_put(zram->comp); 570 } 571 zs_unmap_object(zram->mem_pool, handle); 572 zram_slot_unlock(zram, index); 573 574 /* Should NEVER happen. Return bio error if it does. */ 575 if (unlikely(ret)) 576 pr_err("Decompression failed! err=%d, page=%u\n", ret, index); 577 578 return ret; 579 } 580 581 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, 582 u32 index, int offset) 583 { 584 int ret; 585 struct page *page; 586 587 page = bvec->bv_page; 588 if (is_partial_io(bvec)) { 589 /* Use a temporary buffer to decompress the page */ 590 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM); 591 if (!page) 592 return -ENOMEM; 593 } 594 595 ret = zram_decompress_page(zram, page, index); 596 if (unlikely(ret)) 597 goto out; 598 599 if (is_partial_io(bvec)) { 600 void *dst = kmap_atomic(bvec->bv_page); 601 void *src = kmap_atomic(page); 602 603 memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len); 604 kunmap_atomic(src); 605 kunmap_atomic(dst); 606 } 607 out: 608 if (is_partial_io(bvec)) 609 __free_page(page); 610 611 return ret; 612 } 613 614 static int zram_compress(struct zram *zram, struct zcomp_strm **zstrm, 615 struct page *page, 616 unsigned long *out_handle, unsigned int *out_comp_len) 617 { 618 int ret; 619 unsigned int comp_len; 620 void *src; 621 unsigned long alloced_pages; 622 unsigned long handle = 0; 623 624 compress_again: 625 src = kmap_atomic(page); 626 ret = zcomp_compress(*zstrm, src, &comp_len); 627 kunmap_atomic(src); 628 629 if (unlikely(ret)) { 630 pr_err("Compression failed! err=%d\n", ret); 631 if (handle) 632 zs_free(zram->mem_pool, handle); 633 return ret; 634 } 635 636 if (unlikely(comp_len > max_zpage_size)) 637 comp_len = PAGE_SIZE; 638 639 /* 640 * handle allocation has 2 paths: 641 * a) fast path is executed with preemption disabled (for 642 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear, 643 * since we can't sleep; 644 * b) slow path enables preemption and attempts to allocate 645 * the page with __GFP_DIRECT_RECLAIM bit set. we have to 646 * put per-cpu compression stream and, thus, to re-do 647 * the compression once handle is allocated. 648 * 649 * if we have a 'non-null' handle here then we are coming 650 * from the slow path and handle has already been allocated. 651 */ 652 if (!handle) 653 handle = zs_malloc(zram->mem_pool, comp_len, 654 __GFP_KSWAPD_RECLAIM | 655 __GFP_NOWARN | 656 __GFP_HIGHMEM | 657 __GFP_MOVABLE); 658 if (!handle) { 659 zcomp_stream_put(zram->comp); 660 atomic64_inc(&zram->stats.writestall); 661 handle = zs_malloc(zram->mem_pool, comp_len, 662 GFP_NOIO | __GFP_HIGHMEM | 663 __GFP_MOVABLE); 664 *zstrm = zcomp_stream_get(zram->comp); 665 if (handle) 666 goto compress_again; 667 return -ENOMEM; 668 } 669 670 alloced_pages = zs_get_total_pages(zram->mem_pool); 671 update_used_max(zram, alloced_pages); 672 673 if (zram->limit_pages && alloced_pages > zram->limit_pages) { 674 zs_free(zram->mem_pool, handle); 675 return -ENOMEM; 676 } 677 678 *out_handle = handle; 679 *out_comp_len = comp_len; 680 return 0; 681 } 682 683 static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index) 684 { 685 int ret; 686 unsigned long handle; 687 unsigned int comp_len; 688 void *src, *dst; 689 struct zcomp_strm *zstrm; 690 struct page *page = bvec->bv_page; 691 692 if (zram_same_page_write(zram, index, page)) 693 return 0; 694 695 zstrm = zcomp_stream_get(zram->comp); 696 ret = zram_compress(zram, &zstrm, page, &handle, &comp_len); 697 if (ret) { 698 zcomp_stream_put(zram->comp); 699 return ret; 700 } 701 702 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); 703 704 src = zstrm->buffer; 705 if (comp_len == PAGE_SIZE) 706 src = kmap_atomic(page); 707 memcpy(dst, src, comp_len); 708 if (comp_len == PAGE_SIZE) 709 kunmap_atomic(src); 710 711 zcomp_stream_put(zram->comp); 712 zs_unmap_object(zram->mem_pool, handle); 713 714 /* 715 * Free memory associated with this sector 716 * before overwriting unused sectors. 717 */ 718 zram_slot_lock(zram, index); 719 zram_free_page(zram, index); 720 zram_set_handle(zram, index, handle); 721 zram_set_obj_size(zram, index, comp_len); 722 zram_slot_unlock(zram, index); 723 724 /* Update stats */ 725 atomic64_add(comp_len, &zram->stats.compr_data_size); 726 atomic64_inc(&zram->stats.pages_stored); 727 return 0; 728 } 729 730 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, 731 u32 index, int offset) 732 { 733 int ret; 734 struct page *page = NULL; 735 void *src; 736 struct bio_vec vec; 737 738 vec = *bvec; 739 if (is_partial_io(bvec)) { 740 void *dst; 741 /* 742 * This is a partial IO. We need to read the full page 743 * before to write the changes. 744 */ 745 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM); 746 if (!page) 747 return -ENOMEM; 748 749 ret = zram_decompress_page(zram, page, index); 750 if (ret) 751 goto out; 752 753 src = kmap_atomic(bvec->bv_page); 754 dst = kmap_atomic(page); 755 memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len); 756 kunmap_atomic(dst); 757 kunmap_atomic(src); 758 759 vec.bv_page = page; 760 vec.bv_len = PAGE_SIZE; 761 vec.bv_offset = 0; 762 } 763 764 ret = __zram_bvec_write(zram, &vec, index); 765 out: 766 if (is_partial_io(bvec)) 767 __free_page(page); 768 return ret; 769 } 770 771 /* 772 * zram_bio_discard - handler on discard request 773 * @index: physical block index in PAGE_SIZE units 774 * @offset: byte offset within physical block 775 */ 776 static void zram_bio_discard(struct zram *zram, u32 index, 777 int offset, struct bio *bio) 778 { 779 size_t n = bio->bi_iter.bi_size; 780 781 /* 782 * zram manages data in physical block size units. Because logical block 783 * size isn't identical with physical block size on some arch, we 784 * could get a discard request pointing to a specific offset within a 785 * certain physical block. Although we can handle this request by 786 * reading that physiclal block and decompressing and partially zeroing 787 * and re-compressing and then re-storing it, this isn't reasonable 788 * because our intent with a discard request is to save memory. So 789 * skipping this logical block is appropriate here. 790 */ 791 if (offset) { 792 if (n <= (PAGE_SIZE - offset)) 793 return; 794 795 n -= (PAGE_SIZE - offset); 796 index++; 797 } 798 799 while (n >= PAGE_SIZE) { 800 zram_slot_lock(zram, index); 801 zram_free_page(zram, index); 802 zram_slot_unlock(zram, index); 803 atomic64_inc(&zram->stats.notify_free); 804 index++; 805 n -= PAGE_SIZE; 806 } 807 } 808 809 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, 810 int offset, bool is_write) 811 { 812 unsigned long start_time = jiffies; 813 int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ; 814 int ret; 815 816 generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT, 817 &zram->disk->part0); 818 819 if (!is_write) { 820 atomic64_inc(&zram->stats.num_reads); 821 ret = zram_bvec_read(zram, bvec, index, offset); 822 flush_dcache_page(bvec->bv_page); 823 } else { 824 atomic64_inc(&zram->stats.num_writes); 825 ret = zram_bvec_write(zram, bvec, index, offset); 826 } 827 828 generic_end_io_acct(rw_acct, &zram->disk->part0, start_time); 829 830 if (unlikely(ret)) { 831 if (!is_write) 832 atomic64_inc(&zram->stats.failed_reads); 833 else 834 atomic64_inc(&zram->stats.failed_writes); 835 } 836 837 return ret; 838 } 839 840 static void __zram_make_request(struct zram *zram, struct bio *bio) 841 { 842 int offset; 843 u32 index; 844 struct bio_vec bvec; 845 struct bvec_iter iter; 846 847 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; 848 offset = (bio->bi_iter.bi_sector & 849 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; 850 851 switch (bio_op(bio)) { 852 case REQ_OP_DISCARD: 853 case REQ_OP_WRITE_ZEROES: 854 zram_bio_discard(zram, index, offset, bio); 855 bio_endio(bio); 856 return; 857 default: 858 break; 859 } 860 861 bio_for_each_segment(bvec, bio, iter) { 862 struct bio_vec bv = bvec; 863 unsigned int unwritten = bvec.bv_len; 864 865 do { 866 bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset, 867 unwritten); 868 if (zram_bvec_rw(zram, &bv, index, offset, 869 op_is_write(bio_op(bio))) < 0) 870 goto out; 871 872 bv.bv_offset += bv.bv_len; 873 unwritten -= bv.bv_len; 874 875 update_position(&index, &offset, &bv); 876 } while (unwritten); 877 } 878 879 bio_endio(bio); 880 return; 881 882 out: 883 bio_io_error(bio); 884 } 885 886 /* 887 * Handler function for all zram I/O requests. 888 */ 889 static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio) 890 { 891 struct zram *zram = queue->queuedata; 892 893 if (!valid_io_request(zram, bio->bi_iter.bi_sector, 894 bio->bi_iter.bi_size)) { 895 atomic64_inc(&zram->stats.invalid_io); 896 goto error; 897 } 898 899 __zram_make_request(zram, bio); 900 return BLK_QC_T_NONE; 901 902 error: 903 bio_io_error(bio); 904 return BLK_QC_T_NONE; 905 } 906 907 static void zram_slot_free_notify(struct block_device *bdev, 908 unsigned long index) 909 { 910 struct zram *zram; 911 912 zram = bdev->bd_disk->private_data; 913 914 zram_slot_lock(zram, index); 915 zram_free_page(zram, index); 916 zram_slot_unlock(zram, index); 917 atomic64_inc(&zram->stats.notify_free); 918 } 919 920 static int zram_rw_page(struct block_device *bdev, sector_t sector, 921 struct page *page, bool is_write) 922 { 923 int offset, err = -EIO; 924 u32 index; 925 struct zram *zram; 926 struct bio_vec bv; 927 928 zram = bdev->bd_disk->private_data; 929 930 if (!valid_io_request(zram, sector, PAGE_SIZE)) { 931 atomic64_inc(&zram->stats.invalid_io); 932 err = -EINVAL; 933 goto out; 934 } 935 936 index = sector >> SECTORS_PER_PAGE_SHIFT; 937 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; 938 939 bv.bv_page = page; 940 bv.bv_len = PAGE_SIZE; 941 bv.bv_offset = 0; 942 943 err = zram_bvec_rw(zram, &bv, index, offset, is_write); 944 out: 945 /* 946 * If I/O fails, just return error(ie, non-zero) without 947 * calling page_endio. 948 * It causes resubmit the I/O with bio request by upper functions 949 * of rw_page(e.g., swap_readpage, __swap_writepage) and 950 * bio->bi_end_io does things to handle the error 951 * (e.g., SetPageError, set_page_dirty and extra works). 952 */ 953 if (err == 0) 954 page_endio(page, is_write, 0); 955 return err; 956 } 957 958 static void zram_reset_device(struct zram *zram) 959 { 960 struct zcomp *comp; 961 u64 disksize; 962 963 down_write(&zram->init_lock); 964 965 zram->limit_pages = 0; 966 967 if (!init_done(zram)) { 968 up_write(&zram->init_lock); 969 return; 970 } 971 972 comp = zram->comp; 973 disksize = zram->disksize; 974 zram->disksize = 0; 975 976 set_capacity(zram->disk, 0); 977 part_stat_set_all(&zram->disk->part0, 0); 978 979 up_write(&zram->init_lock); 980 /* I/O operation under all of CPU are done so let's free */ 981 zram_meta_free(zram, disksize); 982 memset(&zram->stats, 0, sizeof(zram->stats)); 983 zcomp_destroy(comp); 984 } 985 986 static ssize_t disksize_store(struct device *dev, 987 struct device_attribute *attr, const char *buf, size_t len) 988 { 989 u64 disksize; 990 struct zcomp *comp; 991 struct zram *zram = dev_to_zram(dev); 992 int err; 993 994 disksize = memparse(buf, NULL); 995 if (!disksize) 996 return -EINVAL; 997 998 down_write(&zram->init_lock); 999 if (init_done(zram)) { 1000 pr_info("Cannot change disksize for initialized device\n"); 1001 err = -EBUSY; 1002 goto out_unlock; 1003 } 1004 1005 disksize = PAGE_ALIGN(disksize); 1006 if (!zram_meta_alloc(zram, disksize)) { 1007 err = -ENOMEM; 1008 goto out_unlock; 1009 } 1010 1011 comp = zcomp_create(zram->compressor); 1012 if (IS_ERR(comp)) { 1013 pr_err("Cannot initialise %s compressing backend\n", 1014 zram->compressor); 1015 err = PTR_ERR(comp); 1016 goto out_free_meta; 1017 } 1018 1019 zram->comp = comp; 1020 zram->disksize = disksize; 1021 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); 1022 zram_revalidate_disk(zram); 1023 up_write(&zram->init_lock); 1024 1025 return len; 1026 1027 out_free_meta: 1028 zram_meta_free(zram, disksize); 1029 out_unlock: 1030 up_write(&zram->init_lock); 1031 return err; 1032 } 1033 1034 static ssize_t reset_store(struct device *dev, 1035 struct device_attribute *attr, const char *buf, size_t len) 1036 { 1037 int ret; 1038 unsigned short do_reset; 1039 struct zram *zram; 1040 struct block_device *bdev; 1041 1042 ret = kstrtou16(buf, 10, &do_reset); 1043 if (ret) 1044 return ret; 1045 1046 if (!do_reset) 1047 return -EINVAL; 1048 1049 zram = dev_to_zram(dev); 1050 bdev = bdget_disk(zram->disk, 0); 1051 if (!bdev) 1052 return -ENOMEM; 1053 1054 mutex_lock(&bdev->bd_mutex); 1055 /* Do not reset an active device or claimed device */ 1056 if (bdev->bd_openers || zram->claim) { 1057 mutex_unlock(&bdev->bd_mutex); 1058 bdput(bdev); 1059 return -EBUSY; 1060 } 1061 1062 /* From now on, anyone can't open /dev/zram[0-9] */ 1063 zram->claim = true; 1064 mutex_unlock(&bdev->bd_mutex); 1065 1066 /* Make sure all the pending I/O are finished */ 1067 fsync_bdev(bdev); 1068 zram_reset_device(zram); 1069 zram_revalidate_disk(zram); 1070 bdput(bdev); 1071 1072 mutex_lock(&bdev->bd_mutex); 1073 zram->claim = false; 1074 mutex_unlock(&bdev->bd_mutex); 1075 1076 return len; 1077 } 1078 1079 static int zram_open(struct block_device *bdev, fmode_t mode) 1080 { 1081 int ret = 0; 1082 struct zram *zram; 1083 1084 WARN_ON(!mutex_is_locked(&bdev->bd_mutex)); 1085 1086 zram = bdev->bd_disk->private_data; 1087 /* zram was claimed to reset so open request fails */ 1088 if (zram->claim) 1089 ret = -EBUSY; 1090 1091 return ret; 1092 } 1093 1094 static const struct block_device_operations zram_devops = { 1095 .open = zram_open, 1096 .swap_slot_free_notify = zram_slot_free_notify, 1097 .rw_page = zram_rw_page, 1098 .owner = THIS_MODULE 1099 }; 1100 1101 static DEVICE_ATTR_WO(compact); 1102 static DEVICE_ATTR_RW(disksize); 1103 static DEVICE_ATTR_RO(initstate); 1104 static DEVICE_ATTR_WO(reset); 1105 static DEVICE_ATTR_WO(mem_limit); 1106 static DEVICE_ATTR_WO(mem_used_max); 1107 static DEVICE_ATTR_RW(max_comp_streams); 1108 static DEVICE_ATTR_RW(comp_algorithm); 1109 1110 static struct attribute *zram_disk_attrs[] = { 1111 &dev_attr_disksize.attr, 1112 &dev_attr_initstate.attr, 1113 &dev_attr_reset.attr, 1114 &dev_attr_compact.attr, 1115 &dev_attr_mem_limit.attr, 1116 &dev_attr_mem_used_max.attr, 1117 &dev_attr_max_comp_streams.attr, 1118 &dev_attr_comp_algorithm.attr, 1119 &dev_attr_io_stat.attr, 1120 &dev_attr_mm_stat.attr, 1121 &dev_attr_debug_stat.attr, 1122 NULL, 1123 }; 1124 1125 static struct attribute_group zram_disk_attr_group = { 1126 .attrs = zram_disk_attrs, 1127 }; 1128 1129 /* 1130 * Allocate and initialize new zram device. the function returns 1131 * '>= 0' device_id upon success, and negative value otherwise. 1132 */ 1133 static int zram_add(void) 1134 { 1135 struct zram *zram; 1136 struct request_queue *queue; 1137 int ret, device_id; 1138 1139 zram = kzalloc(sizeof(struct zram), GFP_KERNEL); 1140 if (!zram) 1141 return -ENOMEM; 1142 1143 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL); 1144 if (ret < 0) 1145 goto out_free_dev; 1146 device_id = ret; 1147 1148 init_rwsem(&zram->init_lock); 1149 1150 queue = blk_alloc_queue(GFP_KERNEL); 1151 if (!queue) { 1152 pr_err("Error allocating disk queue for device %d\n", 1153 device_id); 1154 ret = -ENOMEM; 1155 goto out_free_idr; 1156 } 1157 1158 blk_queue_make_request(queue, zram_make_request); 1159 1160 /* gendisk structure */ 1161 zram->disk = alloc_disk(1); 1162 if (!zram->disk) { 1163 pr_err("Error allocating disk structure for device %d\n", 1164 device_id); 1165 ret = -ENOMEM; 1166 goto out_free_queue; 1167 } 1168 1169 zram->disk->major = zram_major; 1170 zram->disk->first_minor = device_id; 1171 zram->disk->fops = &zram_devops; 1172 zram->disk->queue = queue; 1173 zram->disk->queue->queuedata = zram; 1174 zram->disk->private_data = zram; 1175 snprintf(zram->disk->disk_name, 16, "zram%d", device_id); 1176 1177 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */ 1178 set_capacity(zram->disk, 0); 1179 /* zram devices sort of resembles non-rotational disks */ 1180 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); 1181 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); 1182 /* 1183 * To ensure that we always get PAGE_SIZE aligned 1184 * and n*PAGE_SIZED sized I/O requests. 1185 */ 1186 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); 1187 blk_queue_logical_block_size(zram->disk->queue, 1188 ZRAM_LOGICAL_BLOCK_SIZE); 1189 blk_queue_io_min(zram->disk->queue, PAGE_SIZE); 1190 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); 1191 zram->disk->queue->limits.discard_granularity = PAGE_SIZE; 1192 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); 1193 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue); 1194 1195 /* 1196 * zram_bio_discard() will clear all logical blocks if logical block 1197 * size is identical with physical block size(PAGE_SIZE). But if it is 1198 * different, we will skip discarding some parts of logical blocks in 1199 * the part of the request range which isn't aligned to physical block 1200 * size. So we can't ensure that all discarded logical blocks are 1201 * zeroed. 1202 */ 1203 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE) 1204 blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX); 1205 1206 add_disk(zram->disk); 1207 1208 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, 1209 &zram_disk_attr_group); 1210 if (ret < 0) { 1211 pr_err("Error creating sysfs group for device %d\n", 1212 device_id); 1213 goto out_free_disk; 1214 } 1215 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); 1216 1217 pr_info("Added device: %s\n", zram->disk->disk_name); 1218 return device_id; 1219 1220 out_free_disk: 1221 del_gendisk(zram->disk); 1222 put_disk(zram->disk); 1223 out_free_queue: 1224 blk_cleanup_queue(queue); 1225 out_free_idr: 1226 idr_remove(&zram_index_idr, device_id); 1227 out_free_dev: 1228 kfree(zram); 1229 return ret; 1230 } 1231 1232 static int zram_remove(struct zram *zram) 1233 { 1234 struct block_device *bdev; 1235 1236 bdev = bdget_disk(zram->disk, 0); 1237 if (!bdev) 1238 return -ENOMEM; 1239 1240 mutex_lock(&bdev->bd_mutex); 1241 if (bdev->bd_openers || zram->claim) { 1242 mutex_unlock(&bdev->bd_mutex); 1243 bdput(bdev); 1244 return -EBUSY; 1245 } 1246 1247 zram->claim = true; 1248 mutex_unlock(&bdev->bd_mutex); 1249 1250 /* 1251 * Remove sysfs first, so no one will perform a disksize 1252 * store while we destroy the devices. This also helps during 1253 * hot_remove -- zram_reset_device() is the last holder of 1254 * ->init_lock, no later/concurrent disksize_store() or any 1255 * other sysfs handlers are possible. 1256 */ 1257 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, 1258 &zram_disk_attr_group); 1259 1260 /* Make sure all the pending I/O are finished */ 1261 fsync_bdev(bdev); 1262 zram_reset_device(zram); 1263 bdput(bdev); 1264 1265 pr_info("Removed device: %s\n", zram->disk->disk_name); 1266 1267 blk_cleanup_queue(zram->disk->queue); 1268 del_gendisk(zram->disk); 1269 put_disk(zram->disk); 1270 kfree(zram); 1271 return 0; 1272 } 1273 1274 /* zram-control sysfs attributes */ 1275 static ssize_t hot_add_show(struct class *class, 1276 struct class_attribute *attr, 1277 char *buf) 1278 { 1279 int ret; 1280 1281 mutex_lock(&zram_index_mutex); 1282 ret = zram_add(); 1283 mutex_unlock(&zram_index_mutex); 1284 1285 if (ret < 0) 1286 return ret; 1287 return scnprintf(buf, PAGE_SIZE, "%d\n", ret); 1288 } 1289 1290 static ssize_t hot_remove_store(struct class *class, 1291 struct class_attribute *attr, 1292 const char *buf, 1293 size_t count) 1294 { 1295 struct zram *zram; 1296 int ret, dev_id; 1297 1298 /* dev_id is gendisk->first_minor, which is `int' */ 1299 ret = kstrtoint(buf, 10, &dev_id); 1300 if (ret) 1301 return ret; 1302 if (dev_id < 0) 1303 return -EINVAL; 1304 1305 mutex_lock(&zram_index_mutex); 1306 1307 zram = idr_find(&zram_index_idr, dev_id); 1308 if (zram) { 1309 ret = zram_remove(zram); 1310 if (!ret) 1311 idr_remove(&zram_index_idr, dev_id); 1312 } else { 1313 ret = -ENODEV; 1314 } 1315 1316 mutex_unlock(&zram_index_mutex); 1317 return ret ? ret : count; 1318 } 1319 1320 /* 1321 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a 1322 * sense that reading from this file does alter the state of your system -- it 1323 * creates a new un-initialized zram device and returns back this device's 1324 * device_id (or an error code if it fails to create a new device). 1325 */ 1326 static struct class_attribute zram_control_class_attrs[] = { 1327 __ATTR(hot_add, 0400, hot_add_show, NULL), 1328 __ATTR_WO(hot_remove), 1329 __ATTR_NULL, 1330 }; 1331 1332 static struct class zram_control_class = { 1333 .name = "zram-control", 1334 .owner = THIS_MODULE, 1335 .class_attrs = zram_control_class_attrs, 1336 }; 1337 1338 static int zram_remove_cb(int id, void *ptr, void *data) 1339 { 1340 zram_remove(ptr); 1341 return 0; 1342 } 1343 1344 static void destroy_devices(void) 1345 { 1346 class_unregister(&zram_control_class); 1347 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL); 1348 idr_destroy(&zram_index_idr); 1349 unregister_blkdev(zram_major, "zram"); 1350 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE); 1351 } 1352 1353 static int __init zram_init(void) 1354 { 1355 int ret; 1356 1357 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare", 1358 zcomp_cpu_up_prepare, zcomp_cpu_dead); 1359 if (ret < 0) 1360 return ret; 1361 1362 ret = class_register(&zram_control_class); 1363 if (ret) { 1364 pr_err("Unable to register zram-control class\n"); 1365 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE); 1366 return ret; 1367 } 1368 1369 zram_major = register_blkdev(0, "zram"); 1370 if (zram_major <= 0) { 1371 pr_err("Unable to get major number\n"); 1372 class_unregister(&zram_control_class); 1373 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE); 1374 return -EBUSY; 1375 } 1376 1377 while (num_devices != 0) { 1378 mutex_lock(&zram_index_mutex); 1379 ret = zram_add(); 1380 mutex_unlock(&zram_index_mutex); 1381 if (ret < 0) 1382 goto out_error; 1383 num_devices--; 1384 } 1385 1386 return 0; 1387 1388 out_error: 1389 destroy_devices(); 1390 return ret; 1391 } 1392 1393 static void __exit zram_exit(void) 1394 { 1395 destroy_devices(); 1396 } 1397 1398 module_init(zram_init); 1399 module_exit(zram_exit); 1400 1401 module_param(num_devices, uint, 0); 1402 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices"); 1403 1404 MODULE_LICENSE("Dual BSD/GPL"); 1405 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); 1406 MODULE_DESCRIPTION("Compressed RAM Block Device"); 1407