1 /*
2 * Compressed RAM block device
3 *
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
6 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
13 */
14
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/backing-dev.h>
28 #include <linux/string.h>
29 #include <linux/vmalloc.h>
30 #include <linux/err.h>
31 #include <linux/idr.h>
32 #include <linux/sysfs.h>
33 #include <linux/debugfs.h>
34 #include <linux/cpuhotplug.h>
35 #include <linux/part_stat.h>
36
37 #include "zram_drv.h"
38
39 static DEFINE_IDR(zram_index_idr);
40 /* idr index must be protected */
41 static DEFINE_MUTEX(zram_index_mutex);
42
43 static int zram_major;
44 static const char *default_compressor = CONFIG_ZRAM_DEF_COMP;
45
46 /* Module params (documentation at end) */
47 static unsigned int num_devices = 1;
48 /*
49 * Pages that compress to sizes equals or greater than this are stored
50 * uncompressed in memory.
51 */
52 static size_t huge_class_size;
53
54 static const struct block_device_operations zram_devops;
55
56 static void zram_free_page(struct zram *zram, size_t index);
57 static int zram_read_page(struct zram *zram, struct page *page, u32 index,
58 struct bio *parent);
59
zram_slot_trylock(struct zram * zram,u32 index)60 static int zram_slot_trylock(struct zram *zram, u32 index)
61 {
62 return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
63 }
64
zram_slot_lock(struct zram * zram,u32 index)65 static void zram_slot_lock(struct zram *zram, u32 index)
66 {
67 bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
68 }
69
zram_slot_unlock(struct zram * zram,u32 index)70 static void zram_slot_unlock(struct zram *zram, u32 index)
71 {
72 bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
73 }
74
init_done(struct zram * zram)75 static inline bool init_done(struct zram *zram)
76 {
77 return zram->disksize;
78 }
79
dev_to_zram(struct device * dev)80 static inline struct zram *dev_to_zram(struct device *dev)
81 {
82 return (struct zram *)dev_to_disk(dev)->private_data;
83 }
84
zram_get_handle(struct zram * zram,u32 index)85 static unsigned long zram_get_handle(struct zram *zram, u32 index)
86 {
87 return zram->table[index].handle;
88 }
89
zram_set_handle(struct zram * zram,u32 index,unsigned long handle)90 static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
91 {
92 zram->table[index].handle = handle;
93 }
94
95 /* flag operations require table entry bit_spin_lock() being held */
zram_test_flag(struct zram * zram,u32 index,enum zram_pageflags flag)96 static bool zram_test_flag(struct zram *zram, u32 index,
97 enum zram_pageflags flag)
98 {
99 return zram->table[index].flags & BIT(flag);
100 }
101
zram_set_flag(struct zram * zram,u32 index,enum zram_pageflags flag)102 static void zram_set_flag(struct zram *zram, u32 index,
103 enum zram_pageflags flag)
104 {
105 zram->table[index].flags |= BIT(flag);
106 }
107
zram_clear_flag(struct zram * zram,u32 index,enum zram_pageflags flag)108 static void zram_clear_flag(struct zram *zram, u32 index,
109 enum zram_pageflags flag)
110 {
111 zram->table[index].flags &= ~BIT(flag);
112 }
113
zram_set_element(struct zram * zram,u32 index,unsigned long element)114 static inline void zram_set_element(struct zram *zram, u32 index,
115 unsigned long element)
116 {
117 zram->table[index].element = element;
118 }
119
zram_get_element(struct zram * zram,u32 index)120 static unsigned long zram_get_element(struct zram *zram, u32 index)
121 {
122 return zram->table[index].element;
123 }
124
zram_get_obj_size(struct zram * zram,u32 index)125 static size_t zram_get_obj_size(struct zram *zram, u32 index)
126 {
127 return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
128 }
129
zram_set_obj_size(struct zram * zram,u32 index,size_t size)130 static void zram_set_obj_size(struct zram *zram,
131 u32 index, size_t size)
132 {
133 unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
134
135 zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
136 }
137
zram_allocated(struct zram * zram,u32 index)138 static inline bool zram_allocated(struct zram *zram, u32 index)
139 {
140 return zram_get_obj_size(zram, index) ||
141 zram_test_flag(zram, index, ZRAM_SAME) ||
142 zram_test_flag(zram, index, ZRAM_WB);
143 }
144
145 #if PAGE_SIZE != 4096
is_partial_io(struct bio_vec * bvec)146 static inline bool is_partial_io(struct bio_vec *bvec)
147 {
148 return bvec->bv_len != PAGE_SIZE;
149 }
150 #define ZRAM_PARTIAL_IO 1
151 #else
is_partial_io(struct bio_vec * bvec)152 static inline bool is_partial_io(struct bio_vec *bvec)
153 {
154 return false;
155 }
156 #endif
157
zram_set_priority(struct zram * zram,u32 index,u32 prio)158 static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio)
159 {
160 prio &= ZRAM_COMP_PRIORITY_MASK;
161 /*
162 * Clear previous priority value first, in case if we recompress
163 * further an already recompressed page
164 */
165 zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
166 ZRAM_COMP_PRIORITY_BIT1);
167 zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
168 }
169
zram_get_priority(struct zram * zram,u32 index)170 static inline u32 zram_get_priority(struct zram *zram, u32 index)
171 {
172 u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1;
173
174 return prio & ZRAM_COMP_PRIORITY_MASK;
175 }
176
zram_accessed(struct zram * zram,u32 index)177 static void zram_accessed(struct zram *zram, u32 index)
178 {
179 zram_clear_flag(zram, index, ZRAM_IDLE);
180 #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
181 zram->table[index].ac_time = ktime_get_boottime();
182 #endif
183 }
184
update_used_max(struct zram * zram,const unsigned long pages)185 static inline void update_used_max(struct zram *zram,
186 const unsigned long pages)
187 {
188 unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
189
190 do {
191 if (cur_max >= pages)
192 return;
193 } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
194 &cur_max, pages));
195 }
196
zram_fill_page(void * ptr,unsigned long len,unsigned long value)197 static inline void zram_fill_page(void *ptr, unsigned long len,
198 unsigned long value)
199 {
200 WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
201 memset_l(ptr, value, len / sizeof(unsigned long));
202 }
203
page_same_filled(void * ptr,unsigned long * element)204 static bool page_same_filled(void *ptr, unsigned long *element)
205 {
206 unsigned long *page;
207 unsigned long val;
208 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
209
210 page = (unsigned long *)ptr;
211 val = page[0];
212
213 if (val != page[last_pos])
214 return false;
215
216 for (pos = 1; pos < last_pos; pos++) {
217 if (val != page[pos])
218 return false;
219 }
220
221 *element = val;
222
223 return true;
224 }
225
initstate_show(struct device * dev,struct device_attribute * attr,char * buf)226 static ssize_t initstate_show(struct device *dev,
227 struct device_attribute *attr, char *buf)
228 {
229 u32 val;
230 struct zram *zram = dev_to_zram(dev);
231
232 down_read(&zram->init_lock);
233 val = init_done(zram);
234 up_read(&zram->init_lock);
235
236 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
237 }
238
disksize_show(struct device * dev,struct device_attribute * attr,char * buf)239 static ssize_t disksize_show(struct device *dev,
240 struct device_attribute *attr, char *buf)
241 {
242 struct zram *zram = dev_to_zram(dev);
243
244 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
245 }
246
mem_limit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)247 static ssize_t mem_limit_store(struct device *dev,
248 struct device_attribute *attr, const char *buf, size_t len)
249 {
250 u64 limit;
251 char *tmp;
252 struct zram *zram = dev_to_zram(dev);
253
254 limit = memparse(buf, &tmp);
255 if (buf == tmp) /* no chars parsed, invalid input */
256 return -EINVAL;
257
258 down_write(&zram->init_lock);
259 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
260 up_write(&zram->init_lock);
261
262 return len;
263 }
264
mem_used_max_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)265 static ssize_t mem_used_max_store(struct device *dev,
266 struct device_attribute *attr, const char *buf, size_t len)
267 {
268 int err;
269 unsigned long val;
270 struct zram *zram = dev_to_zram(dev);
271
272 err = kstrtoul(buf, 10, &val);
273 if (err || val != 0)
274 return -EINVAL;
275
276 down_read(&zram->init_lock);
277 if (init_done(zram)) {
278 atomic_long_set(&zram->stats.max_used_pages,
279 zs_get_total_pages(zram->mem_pool));
280 }
281 up_read(&zram->init_lock);
282
283 return len;
284 }
285
286 /*
287 * Mark all pages which are older than or equal to cutoff as IDLE.
288 * Callers should hold the zram init lock in read mode
289 */
mark_idle(struct zram * zram,ktime_t cutoff)290 static void mark_idle(struct zram *zram, ktime_t cutoff)
291 {
292 int is_idle = 1;
293 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
294 int index;
295
296 for (index = 0; index < nr_pages; index++) {
297 /*
298 * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
299 * See the comment in writeback_store.
300 *
301 * Also do not mark ZRAM_SAME slots as ZRAM_IDLE, because no
302 * post-processing (recompress, writeback) happens to the
303 * ZRAM_SAME slot.
304 *
305 * And ZRAM_WB slots simply cannot be ZRAM_IDLE.
306 */
307 zram_slot_lock(zram, index);
308 if (!zram_allocated(zram, index) ||
309 zram_test_flag(zram, index, ZRAM_WB) ||
310 zram_test_flag(zram, index, ZRAM_UNDER_WB) ||
311 zram_test_flag(zram, index, ZRAM_SAME)) {
312 zram_slot_unlock(zram, index);
313 continue;
314 }
315
316 #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
317 is_idle = !cutoff ||
318 ktime_after(cutoff, zram->table[index].ac_time);
319 #endif
320 if (is_idle)
321 zram_set_flag(zram, index, ZRAM_IDLE);
322 else
323 zram_clear_flag(zram, index, ZRAM_IDLE);
324 zram_slot_unlock(zram, index);
325 }
326 }
327
idle_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)328 static ssize_t idle_store(struct device *dev,
329 struct device_attribute *attr, const char *buf, size_t len)
330 {
331 struct zram *zram = dev_to_zram(dev);
332 ktime_t cutoff_time = 0;
333 ssize_t rv = -EINVAL;
334
335 if (!sysfs_streq(buf, "all")) {
336 /*
337 * If it did not parse as 'all' try to treat it as an integer
338 * when we have memory tracking enabled.
339 */
340 u64 age_sec;
341
342 if (IS_ENABLED(CONFIG_ZRAM_TRACK_ENTRY_ACTIME) && !kstrtoull(buf, 0, &age_sec))
343 cutoff_time = ktime_sub(ktime_get_boottime(),
344 ns_to_ktime(age_sec * NSEC_PER_SEC));
345 else
346 goto out;
347 }
348
349 down_read(&zram->init_lock);
350 if (!init_done(zram))
351 goto out_unlock;
352
353 /*
354 * A cutoff_time of 0 marks everything as idle, this is the
355 * "all" behavior.
356 */
357 mark_idle(zram, cutoff_time);
358 rv = len;
359
360 out_unlock:
361 up_read(&zram->init_lock);
362 out:
363 return rv;
364 }
365
366 #ifdef CONFIG_ZRAM_WRITEBACK
writeback_limit_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)367 static ssize_t writeback_limit_enable_store(struct device *dev,
368 struct device_attribute *attr, const char *buf, size_t len)
369 {
370 struct zram *zram = dev_to_zram(dev);
371 u64 val;
372 ssize_t ret = -EINVAL;
373
374 if (kstrtoull(buf, 10, &val))
375 return ret;
376
377 down_read(&zram->init_lock);
378 spin_lock(&zram->wb_limit_lock);
379 zram->wb_limit_enable = val;
380 spin_unlock(&zram->wb_limit_lock);
381 up_read(&zram->init_lock);
382 ret = len;
383
384 return ret;
385 }
386
writeback_limit_enable_show(struct device * dev,struct device_attribute * attr,char * buf)387 static ssize_t writeback_limit_enable_show(struct device *dev,
388 struct device_attribute *attr, char *buf)
389 {
390 bool val;
391 struct zram *zram = dev_to_zram(dev);
392
393 down_read(&zram->init_lock);
394 spin_lock(&zram->wb_limit_lock);
395 val = zram->wb_limit_enable;
396 spin_unlock(&zram->wb_limit_lock);
397 up_read(&zram->init_lock);
398
399 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
400 }
401
writeback_limit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)402 static ssize_t writeback_limit_store(struct device *dev,
403 struct device_attribute *attr, const char *buf, size_t len)
404 {
405 struct zram *zram = dev_to_zram(dev);
406 u64 val;
407 ssize_t ret = -EINVAL;
408
409 if (kstrtoull(buf, 10, &val))
410 return ret;
411
412 down_read(&zram->init_lock);
413 spin_lock(&zram->wb_limit_lock);
414 zram->bd_wb_limit = val;
415 spin_unlock(&zram->wb_limit_lock);
416 up_read(&zram->init_lock);
417 ret = len;
418
419 return ret;
420 }
421
writeback_limit_show(struct device * dev,struct device_attribute * attr,char * buf)422 static ssize_t writeback_limit_show(struct device *dev,
423 struct device_attribute *attr, char *buf)
424 {
425 u64 val;
426 struct zram *zram = dev_to_zram(dev);
427
428 down_read(&zram->init_lock);
429 spin_lock(&zram->wb_limit_lock);
430 val = zram->bd_wb_limit;
431 spin_unlock(&zram->wb_limit_lock);
432 up_read(&zram->init_lock);
433
434 return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
435 }
436
reset_bdev(struct zram * zram)437 static void reset_bdev(struct zram *zram)
438 {
439 struct block_device *bdev;
440
441 if (!zram->backing_dev)
442 return;
443
444 bdev = zram->bdev;
445 blkdev_put(bdev, zram);
446 /* hope filp_close flush all of IO */
447 filp_close(zram->backing_dev, NULL);
448 zram->backing_dev = NULL;
449 zram->bdev = NULL;
450 zram->disk->fops = &zram_devops;
451 kvfree(zram->bitmap);
452 zram->bitmap = NULL;
453 }
454
backing_dev_show(struct device * dev,struct device_attribute * attr,char * buf)455 static ssize_t backing_dev_show(struct device *dev,
456 struct device_attribute *attr, char *buf)
457 {
458 struct file *file;
459 struct zram *zram = dev_to_zram(dev);
460 char *p;
461 ssize_t ret;
462
463 down_read(&zram->init_lock);
464 file = zram->backing_dev;
465 if (!file) {
466 memcpy(buf, "none\n", 5);
467 up_read(&zram->init_lock);
468 return 5;
469 }
470
471 p = file_path(file, buf, PAGE_SIZE - 1);
472 if (IS_ERR(p)) {
473 ret = PTR_ERR(p);
474 goto out;
475 }
476
477 ret = strlen(p);
478 memmove(buf, p, ret);
479 buf[ret++] = '\n';
480 out:
481 up_read(&zram->init_lock);
482 return ret;
483 }
484
backing_dev_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)485 static ssize_t backing_dev_store(struct device *dev,
486 struct device_attribute *attr, const char *buf, size_t len)
487 {
488 char *file_name;
489 size_t sz;
490 struct file *backing_dev = NULL;
491 struct inode *inode;
492 struct address_space *mapping;
493 unsigned int bitmap_sz;
494 unsigned long nr_pages, *bitmap = NULL;
495 struct block_device *bdev = NULL;
496 int err;
497 struct zram *zram = dev_to_zram(dev);
498
499 file_name = kmalloc(PATH_MAX, GFP_KERNEL);
500 if (!file_name)
501 return -ENOMEM;
502
503 down_write(&zram->init_lock);
504 if (init_done(zram)) {
505 pr_info("Can't setup backing device for initialized device\n");
506 err = -EBUSY;
507 goto out;
508 }
509
510 strscpy(file_name, buf, PATH_MAX);
511 /* ignore trailing newline */
512 sz = strlen(file_name);
513 if (sz > 0 && file_name[sz - 1] == '\n')
514 file_name[sz - 1] = 0x00;
515
516 backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
517 if (IS_ERR(backing_dev)) {
518 err = PTR_ERR(backing_dev);
519 backing_dev = NULL;
520 goto out;
521 }
522
523 mapping = backing_dev->f_mapping;
524 inode = mapping->host;
525
526 /* Support only block device in this moment */
527 if (!S_ISBLK(inode->i_mode)) {
528 err = -ENOTBLK;
529 goto out;
530 }
531
532 bdev = blkdev_get_by_dev(inode->i_rdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
533 zram, NULL);
534 if (IS_ERR(bdev)) {
535 err = PTR_ERR(bdev);
536 bdev = NULL;
537 goto out;
538 }
539
540 nr_pages = i_size_read(inode) >> PAGE_SHIFT;
541 /* Refuse to use zero sized device (also prevents self reference) */
542 if (!nr_pages) {
543 err = -EINVAL;
544 goto out;
545 }
546
547 bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
548 bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
549 if (!bitmap) {
550 err = -ENOMEM;
551 goto out;
552 }
553
554 reset_bdev(zram);
555
556 zram->bdev = bdev;
557 zram->backing_dev = backing_dev;
558 zram->bitmap = bitmap;
559 zram->nr_pages = nr_pages;
560 up_write(&zram->init_lock);
561
562 pr_info("setup backing device %s\n", file_name);
563 kfree(file_name);
564
565 return len;
566 out:
567 kvfree(bitmap);
568
569 if (bdev)
570 blkdev_put(bdev, zram);
571
572 if (backing_dev)
573 filp_close(backing_dev, NULL);
574
575 up_write(&zram->init_lock);
576
577 kfree(file_name);
578
579 return err;
580 }
581
alloc_block_bdev(struct zram * zram)582 static unsigned long alloc_block_bdev(struct zram *zram)
583 {
584 unsigned long blk_idx = 1;
585 retry:
586 /* skip 0 bit to confuse zram.handle = 0 */
587 blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
588 if (blk_idx == zram->nr_pages)
589 return 0;
590
591 if (test_and_set_bit(blk_idx, zram->bitmap))
592 goto retry;
593
594 atomic64_inc(&zram->stats.bd_count);
595 return blk_idx;
596 }
597
free_block_bdev(struct zram * zram,unsigned long blk_idx)598 static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
599 {
600 int was_set;
601
602 was_set = test_and_clear_bit(blk_idx, zram->bitmap);
603 WARN_ON_ONCE(!was_set);
604 atomic64_dec(&zram->stats.bd_count);
605 }
606
read_from_bdev_async(struct zram * zram,struct page * page,unsigned long entry,struct bio * parent)607 static void read_from_bdev_async(struct zram *zram, struct page *page,
608 unsigned long entry, struct bio *parent)
609 {
610 struct bio *bio;
611
612 bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO);
613 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
614 __bio_add_page(bio, page, PAGE_SIZE, 0);
615 bio_chain(bio, parent);
616 submit_bio(bio);
617 }
618
619 #define PAGE_WB_SIG "page_index="
620
621 #define PAGE_WRITEBACK 0
622 #define HUGE_WRITEBACK (1<<0)
623 #define IDLE_WRITEBACK (1<<1)
624 #define INCOMPRESSIBLE_WRITEBACK (1<<2)
625
writeback_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)626 static ssize_t writeback_store(struct device *dev,
627 struct device_attribute *attr, const char *buf, size_t len)
628 {
629 struct zram *zram = dev_to_zram(dev);
630 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
631 unsigned long index = 0;
632 struct bio bio;
633 struct bio_vec bio_vec;
634 struct page *page;
635 ssize_t ret = len;
636 int mode, err;
637 unsigned long blk_idx = 0;
638
639 if (sysfs_streq(buf, "idle"))
640 mode = IDLE_WRITEBACK;
641 else if (sysfs_streq(buf, "huge"))
642 mode = HUGE_WRITEBACK;
643 else if (sysfs_streq(buf, "huge_idle"))
644 mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
645 else if (sysfs_streq(buf, "incompressible"))
646 mode = INCOMPRESSIBLE_WRITEBACK;
647 else {
648 if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
649 return -EINVAL;
650
651 if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
652 index >= nr_pages)
653 return -EINVAL;
654
655 nr_pages = 1;
656 mode = PAGE_WRITEBACK;
657 }
658
659 down_read(&zram->init_lock);
660 if (!init_done(zram)) {
661 ret = -EINVAL;
662 goto release_init_lock;
663 }
664
665 if (!zram->backing_dev) {
666 ret = -ENODEV;
667 goto release_init_lock;
668 }
669
670 page = alloc_page(GFP_KERNEL);
671 if (!page) {
672 ret = -ENOMEM;
673 goto release_init_lock;
674 }
675
676 for (; nr_pages != 0; index++, nr_pages--) {
677 spin_lock(&zram->wb_limit_lock);
678 if (zram->wb_limit_enable && !zram->bd_wb_limit) {
679 spin_unlock(&zram->wb_limit_lock);
680 ret = -EIO;
681 break;
682 }
683 spin_unlock(&zram->wb_limit_lock);
684
685 if (!blk_idx) {
686 blk_idx = alloc_block_bdev(zram);
687 if (!blk_idx) {
688 ret = -ENOSPC;
689 break;
690 }
691 }
692
693 zram_slot_lock(zram, index);
694 if (!zram_allocated(zram, index))
695 goto next;
696
697 if (zram_test_flag(zram, index, ZRAM_WB) ||
698 zram_test_flag(zram, index, ZRAM_SAME) ||
699 zram_test_flag(zram, index, ZRAM_UNDER_WB))
700 goto next;
701
702 if (mode & IDLE_WRITEBACK &&
703 !zram_test_flag(zram, index, ZRAM_IDLE))
704 goto next;
705 if (mode & HUGE_WRITEBACK &&
706 !zram_test_flag(zram, index, ZRAM_HUGE))
707 goto next;
708 if (mode & INCOMPRESSIBLE_WRITEBACK &&
709 !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
710 goto next;
711
712 /*
713 * Clearing ZRAM_UNDER_WB is duty of caller.
714 * IOW, zram_free_page never clear it.
715 */
716 zram_set_flag(zram, index, ZRAM_UNDER_WB);
717 /* Need for hugepage writeback racing */
718 zram_set_flag(zram, index, ZRAM_IDLE);
719 zram_slot_unlock(zram, index);
720 if (zram_read_page(zram, page, index, NULL)) {
721 zram_slot_lock(zram, index);
722 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
723 zram_clear_flag(zram, index, ZRAM_IDLE);
724 zram_slot_unlock(zram, index);
725 continue;
726 }
727
728 bio_init(&bio, zram->bdev, &bio_vec, 1,
729 REQ_OP_WRITE | REQ_SYNC);
730 bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
731 __bio_add_page(&bio, page, PAGE_SIZE, 0);
732
733 /*
734 * XXX: A single page IO would be inefficient for write
735 * but it would be not bad as starter.
736 */
737 err = submit_bio_wait(&bio);
738 if (err) {
739 zram_slot_lock(zram, index);
740 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
741 zram_clear_flag(zram, index, ZRAM_IDLE);
742 zram_slot_unlock(zram, index);
743 /*
744 * BIO errors are not fatal, we continue and simply
745 * attempt to writeback the remaining objects (pages).
746 * At the same time we need to signal user-space that
747 * some writes (at least one, but also could be all of
748 * them) were not successful and we do so by returning
749 * the most recent BIO error.
750 */
751 ret = err;
752 continue;
753 }
754
755 atomic64_inc(&zram->stats.bd_writes);
756 /*
757 * We released zram_slot_lock so need to check if the slot was
758 * changed. If there is freeing for the slot, we can catch it
759 * easily by zram_allocated.
760 * A subtle case is the slot is freed/reallocated/marked as
761 * ZRAM_IDLE again. To close the race, idle_store doesn't
762 * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
763 * Thus, we could close the race by checking ZRAM_IDLE bit.
764 */
765 zram_slot_lock(zram, index);
766 if (!zram_allocated(zram, index) ||
767 !zram_test_flag(zram, index, ZRAM_IDLE)) {
768 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
769 zram_clear_flag(zram, index, ZRAM_IDLE);
770 goto next;
771 }
772
773 zram_free_page(zram, index);
774 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
775 zram_set_flag(zram, index, ZRAM_WB);
776 zram_set_element(zram, index, blk_idx);
777 blk_idx = 0;
778 atomic64_inc(&zram->stats.pages_stored);
779 spin_lock(&zram->wb_limit_lock);
780 if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
781 zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
782 spin_unlock(&zram->wb_limit_lock);
783 next:
784 zram_slot_unlock(zram, index);
785 }
786
787 if (blk_idx)
788 free_block_bdev(zram, blk_idx);
789 __free_page(page);
790 release_init_lock:
791 up_read(&zram->init_lock);
792
793 return ret;
794 }
795
796 struct zram_work {
797 struct work_struct work;
798 struct zram *zram;
799 unsigned long entry;
800 struct page *page;
801 int error;
802 };
803
zram_sync_read(struct work_struct * work)804 static void zram_sync_read(struct work_struct *work)
805 {
806 struct zram_work *zw = container_of(work, struct zram_work, work);
807 struct bio_vec bv;
808 struct bio bio;
809
810 bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ);
811 bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9);
812 __bio_add_page(&bio, zw->page, PAGE_SIZE, 0);
813 zw->error = submit_bio_wait(&bio);
814 }
815
816 /*
817 * Block layer want one ->submit_bio to be active at a time, so if we use
818 * chained IO with parent IO in same context, it's a deadlock. To avoid that,
819 * use a worker thread context.
820 */
read_from_bdev_sync(struct zram * zram,struct page * page,unsigned long entry)821 static int read_from_bdev_sync(struct zram *zram, struct page *page,
822 unsigned long entry)
823 {
824 struct zram_work work;
825
826 work.page = page;
827 work.zram = zram;
828 work.entry = entry;
829
830 INIT_WORK_ONSTACK(&work.work, zram_sync_read);
831 queue_work(system_unbound_wq, &work.work);
832 flush_work(&work.work);
833 destroy_work_on_stack(&work.work);
834
835 return work.error;
836 }
837
read_from_bdev(struct zram * zram,struct page * page,unsigned long entry,struct bio * parent)838 static int read_from_bdev(struct zram *zram, struct page *page,
839 unsigned long entry, struct bio *parent)
840 {
841 atomic64_inc(&zram->stats.bd_reads);
842 if (!parent) {
843 if (WARN_ON_ONCE(!IS_ENABLED(ZRAM_PARTIAL_IO)))
844 return -EIO;
845 return read_from_bdev_sync(zram, page, entry);
846 }
847 read_from_bdev_async(zram, page, entry, parent);
848 return 0;
849 }
850 #else
reset_bdev(struct zram * zram)851 static inline void reset_bdev(struct zram *zram) {};
read_from_bdev(struct zram * zram,struct page * page,unsigned long entry,struct bio * parent)852 static int read_from_bdev(struct zram *zram, struct page *page,
853 unsigned long entry, struct bio *parent)
854 {
855 return -EIO;
856 }
857
free_block_bdev(struct zram * zram,unsigned long blk_idx)858 static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
859 #endif
860
861 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
862
863 static struct dentry *zram_debugfs_root;
864
zram_debugfs_create(void)865 static void zram_debugfs_create(void)
866 {
867 zram_debugfs_root = debugfs_create_dir("zram", NULL);
868 }
869
zram_debugfs_destroy(void)870 static void zram_debugfs_destroy(void)
871 {
872 debugfs_remove_recursive(zram_debugfs_root);
873 }
874
read_block_state(struct file * file,char __user * buf,size_t count,loff_t * ppos)875 static ssize_t read_block_state(struct file *file, char __user *buf,
876 size_t count, loff_t *ppos)
877 {
878 char *kbuf;
879 ssize_t index, written = 0;
880 struct zram *zram = file->private_data;
881 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
882 struct timespec64 ts;
883
884 kbuf = kvmalloc(count, GFP_KERNEL);
885 if (!kbuf)
886 return -ENOMEM;
887
888 down_read(&zram->init_lock);
889 if (!init_done(zram)) {
890 up_read(&zram->init_lock);
891 kvfree(kbuf);
892 return -EINVAL;
893 }
894
895 for (index = *ppos; index < nr_pages; index++) {
896 int copied;
897
898 zram_slot_lock(zram, index);
899 if (!zram_allocated(zram, index))
900 goto next;
901
902 ts = ktime_to_timespec64(zram->table[index].ac_time);
903 copied = snprintf(kbuf + written, count,
904 "%12zd %12lld.%06lu %c%c%c%c%c%c\n",
905 index, (s64)ts.tv_sec,
906 ts.tv_nsec / NSEC_PER_USEC,
907 zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
908 zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
909 zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
910 zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.',
911 zram_get_priority(zram, index) ? 'r' : '.',
912 zram_test_flag(zram, index,
913 ZRAM_INCOMPRESSIBLE) ? 'n' : '.');
914
915 if (count <= copied) {
916 zram_slot_unlock(zram, index);
917 break;
918 }
919 written += copied;
920 count -= copied;
921 next:
922 zram_slot_unlock(zram, index);
923 *ppos += 1;
924 }
925
926 up_read(&zram->init_lock);
927 if (copy_to_user(buf, kbuf, written))
928 written = -EFAULT;
929 kvfree(kbuf);
930
931 return written;
932 }
933
934 static const struct file_operations proc_zram_block_state_op = {
935 .open = simple_open,
936 .read = read_block_state,
937 .llseek = default_llseek,
938 };
939
zram_debugfs_register(struct zram * zram)940 static void zram_debugfs_register(struct zram *zram)
941 {
942 if (!zram_debugfs_root)
943 return;
944
945 zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
946 zram_debugfs_root);
947 debugfs_create_file("block_state", 0400, zram->debugfs_dir,
948 zram, &proc_zram_block_state_op);
949 }
950
zram_debugfs_unregister(struct zram * zram)951 static void zram_debugfs_unregister(struct zram *zram)
952 {
953 debugfs_remove_recursive(zram->debugfs_dir);
954 }
955 #else
zram_debugfs_create(void)956 static void zram_debugfs_create(void) {};
zram_debugfs_destroy(void)957 static void zram_debugfs_destroy(void) {};
zram_debugfs_register(struct zram * zram)958 static void zram_debugfs_register(struct zram *zram) {};
zram_debugfs_unregister(struct zram * zram)959 static void zram_debugfs_unregister(struct zram *zram) {};
960 #endif
961
962 /*
963 * We switched to per-cpu streams and this attr is not needed anymore.
964 * However, we will keep it around for some time, because:
965 * a) we may revert per-cpu streams in the future
966 * b) it's visible to user space and we need to follow our 2 years
967 * retirement rule; but we already have a number of 'soon to be
968 * altered' attrs, so max_comp_streams need to wait for the next
969 * layoff cycle.
970 */
max_comp_streams_show(struct device * dev,struct device_attribute * attr,char * buf)971 static ssize_t max_comp_streams_show(struct device *dev,
972 struct device_attribute *attr, char *buf)
973 {
974 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
975 }
976
max_comp_streams_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)977 static ssize_t max_comp_streams_store(struct device *dev,
978 struct device_attribute *attr, const char *buf, size_t len)
979 {
980 return len;
981 }
982
comp_algorithm_set(struct zram * zram,u32 prio,const char * alg)983 static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
984 {
985 /* Do not free statically defined compression algorithms */
986 if (zram->comp_algs[prio] != default_compressor)
987 kfree(zram->comp_algs[prio]);
988
989 zram->comp_algs[prio] = alg;
990 }
991
__comp_algorithm_show(struct zram * zram,u32 prio,char * buf)992 static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf)
993 {
994 ssize_t sz;
995
996 down_read(&zram->init_lock);
997 sz = zcomp_available_show(zram->comp_algs[prio], buf);
998 up_read(&zram->init_lock);
999
1000 return sz;
1001 }
1002
__comp_algorithm_store(struct zram * zram,u32 prio,const char * buf)1003 static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
1004 {
1005 char *compressor;
1006 size_t sz;
1007
1008 sz = strlen(buf);
1009 if (sz >= CRYPTO_MAX_ALG_NAME)
1010 return -E2BIG;
1011
1012 compressor = kstrdup(buf, GFP_KERNEL);
1013 if (!compressor)
1014 return -ENOMEM;
1015
1016 /* ignore trailing newline */
1017 if (sz > 0 && compressor[sz - 1] == '\n')
1018 compressor[sz - 1] = 0x00;
1019
1020 if (!zcomp_available_algorithm(compressor)) {
1021 kfree(compressor);
1022 return -EINVAL;
1023 }
1024
1025 down_write(&zram->init_lock);
1026 if (init_done(zram)) {
1027 up_write(&zram->init_lock);
1028 kfree(compressor);
1029 pr_info("Can't change algorithm for initialized device\n");
1030 return -EBUSY;
1031 }
1032
1033 comp_algorithm_set(zram, prio, compressor);
1034 up_write(&zram->init_lock);
1035 return 0;
1036 }
1037
comp_algorithm_show(struct device * dev,struct device_attribute * attr,char * buf)1038 static ssize_t comp_algorithm_show(struct device *dev,
1039 struct device_attribute *attr,
1040 char *buf)
1041 {
1042 struct zram *zram = dev_to_zram(dev);
1043
1044 return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf);
1045 }
1046
comp_algorithm_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1047 static ssize_t comp_algorithm_store(struct device *dev,
1048 struct device_attribute *attr,
1049 const char *buf,
1050 size_t len)
1051 {
1052 struct zram *zram = dev_to_zram(dev);
1053 int ret;
1054
1055 ret = __comp_algorithm_store(zram, ZRAM_PRIMARY_COMP, buf);
1056 return ret ? ret : len;
1057 }
1058
1059 #ifdef CONFIG_ZRAM_MULTI_COMP
recomp_algorithm_show(struct device * dev,struct device_attribute * attr,char * buf)1060 static ssize_t recomp_algorithm_show(struct device *dev,
1061 struct device_attribute *attr,
1062 char *buf)
1063 {
1064 struct zram *zram = dev_to_zram(dev);
1065 ssize_t sz = 0;
1066 u32 prio;
1067
1068 for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
1069 if (!zram->comp_algs[prio])
1070 continue;
1071
1072 sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "#%d: ", prio);
1073 sz += __comp_algorithm_show(zram, prio, buf + sz);
1074 }
1075
1076 return sz;
1077 }
1078
recomp_algorithm_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1079 static ssize_t recomp_algorithm_store(struct device *dev,
1080 struct device_attribute *attr,
1081 const char *buf,
1082 size_t len)
1083 {
1084 struct zram *zram = dev_to_zram(dev);
1085 int prio = ZRAM_SECONDARY_COMP;
1086 char *args, *param, *val;
1087 char *alg = NULL;
1088 int ret;
1089
1090 args = skip_spaces(buf);
1091 while (*args) {
1092 args = next_arg(args, ¶m, &val);
1093
1094 if (!val || !*val)
1095 return -EINVAL;
1096
1097 if (!strcmp(param, "algo")) {
1098 alg = val;
1099 continue;
1100 }
1101
1102 if (!strcmp(param, "priority")) {
1103 ret = kstrtoint(val, 10, &prio);
1104 if (ret)
1105 return ret;
1106 continue;
1107 }
1108 }
1109
1110 if (!alg)
1111 return -EINVAL;
1112
1113 if (prio < ZRAM_SECONDARY_COMP || prio >= ZRAM_MAX_COMPS)
1114 return -EINVAL;
1115
1116 ret = __comp_algorithm_store(zram, prio, alg);
1117 return ret ? ret : len;
1118 }
1119 #endif
1120
compact_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1121 static ssize_t compact_store(struct device *dev,
1122 struct device_attribute *attr, const char *buf, size_t len)
1123 {
1124 struct zram *zram = dev_to_zram(dev);
1125
1126 down_read(&zram->init_lock);
1127 if (!init_done(zram)) {
1128 up_read(&zram->init_lock);
1129 return -EINVAL;
1130 }
1131
1132 zs_compact(zram->mem_pool);
1133 up_read(&zram->init_lock);
1134
1135 return len;
1136 }
1137
io_stat_show(struct device * dev,struct device_attribute * attr,char * buf)1138 static ssize_t io_stat_show(struct device *dev,
1139 struct device_attribute *attr, char *buf)
1140 {
1141 struct zram *zram = dev_to_zram(dev);
1142 ssize_t ret;
1143
1144 down_read(&zram->init_lock);
1145 ret = scnprintf(buf, PAGE_SIZE,
1146 "%8llu %8llu 0 %8llu\n",
1147 (u64)atomic64_read(&zram->stats.failed_reads),
1148 (u64)atomic64_read(&zram->stats.failed_writes),
1149 (u64)atomic64_read(&zram->stats.notify_free));
1150 up_read(&zram->init_lock);
1151
1152 return ret;
1153 }
1154
mm_stat_show(struct device * dev,struct device_attribute * attr,char * buf)1155 static ssize_t mm_stat_show(struct device *dev,
1156 struct device_attribute *attr, char *buf)
1157 {
1158 struct zram *zram = dev_to_zram(dev);
1159 struct zs_pool_stats pool_stats;
1160 u64 orig_size, mem_used = 0;
1161 long max_used;
1162 ssize_t ret;
1163
1164 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
1165
1166 down_read(&zram->init_lock);
1167 if (init_done(zram)) {
1168 mem_used = zs_get_total_pages(zram->mem_pool);
1169 zs_pool_stats(zram->mem_pool, &pool_stats);
1170 }
1171
1172 orig_size = atomic64_read(&zram->stats.pages_stored);
1173 max_used = atomic_long_read(&zram->stats.max_used_pages);
1174
1175 ret = scnprintf(buf, PAGE_SIZE,
1176 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu %8llu\n",
1177 orig_size << PAGE_SHIFT,
1178 (u64)atomic64_read(&zram->stats.compr_data_size),
1179 mem_used << PAGE_SHIFT,
1180 zram->limit_pages << PAGE_SHIFT,
1181 max_used << PAGE_SHIFT,
1182 (u64)atomic64_read(&zram->stats.same_pages),
1183 atomic_long_read(&pool_stats.pages_compacted),
1184 (u64)atomic64_read(&zram->stats.huge_pages),
1185 (u64)atomic64_read(&zram->stats.huge_pages_since));
1186 up_read(&zram->init_lock);
1187
1188 return ret;
1189 }
1190
1191 #ifdef CONFIG_ZRAM_WRITEBACK
1192 #define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12)))
bd_stat_show(struct device * dev,struct device_attribute * attr,char * buf)1193 static ssize_t bd_stat_show(struct device *dev,
1194 struct device_attribute *attr, char *buf)
1195 {
1196 struct zram *zram = dev_to_zram(dev);
1197 ssize_t ret;
1198
1199 down_read(&zram->init_lock);
1200 ret = scnprintf(buf, PAGE_SIZE,
1201 "%8llu %8llu %8llu\n",
1202 FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
1203 FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
1204 FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
1205 up_read(&zram->init_lock);
1206
1207 return ret;
1208 }
1209 #endif
1210
debug_stat_show(struct device * dev,struct device_attribute * attr,char * buf)1211 static ssize_t debug_stat_show(struct device *dev,
1212 struct device_attribute *attr, char *buf)
1213 {
1214 int version = 1;
1215 struct zram *zram = dev_to_zram(dev);
1216 ssize_t ret;
1217
1218 down_read(&zram->init_lock);
1219 ret = scnprintf(buf, PAGE_SIZE,
1220 "version: %d\n%8llu %8llu\n",
1221 version,
1222 (u64)atomic64_read(&zram->stats.writestall),
1223 (u64)atomic64_read(&zram->stats.miss_free));
1224 up_read(&zram->init_lock);
1225
1226 return ret;
1227 }
1228
1229 static DEVICE_ATTR_RO(io_stat);
1230 static DEVICE_ATTR_RO(mm_stat);
1231 #ifdef CONFIG_ZRAM_WRITEBACK
1232 static DEVICE_ATTR_RO(bd_stat);
1233 #endif
1234 static DEVICE_ATTR_RO(debug_stat);
1235
zram_meta_free(struct zram * zram,u64 disksize)1236 static void zram_meta_free(struct zram *zram, u64 disksize)
1237 {
1238 size_t num_pages = disksize >> PAGE_SHIFT;
1239 size_t index;
1240
1241 if (!zram->table)
1242 return;
1243
1244 /* Free all pages that are still in this zram device */
1245 for (index = 0; index < num_pages; index++)
1246 zram_free_page(zram, index);
1247
1248 zs_destroy_pool(zram->mem_pool);
1249 vfree(zram->table);
1250 zram->table = NULL;
1251 }
1252
zram_meta_alloc(struct zram * zram,u64 disksize)1253 static bool zram_meta_alloc(struct zram *zram, u64 disksize)
1254 {
1255 size_t num_pages;
1256
1257 num_pages = disksize >> PAGE_SHIFT;
1258 zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
1259 if (!zram->table)
1260 return false;
1261
1262 zram->mem_pool = zs_create_pool(zram->disk->disk_name);
1263 if (!zram->mem_pool) {
1264 vfree(zram->table);
1265 return false;
1266 }
1267
1268 if (!huge_class_size)
1269 huge_class_size = zs_huge_class_size(zram->mem_pool);
1270 return true;
1271 }
1272
1273 /*
1274 * To protect concurrent access to the same index entry,
1275 * caller should hold this table index entry's bit_spinlock to
1276 * indicate this index entry is accessing.
1277 */
zram_free_page(struct zram * zram,size_t index)1278 static void zram_free_page(struct zram *zram, size_t index)
1279 {
1280 unsigned long handle;
1281
1282 #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
1283 zram->table[index].ac_time = 0;
1284 #endif
1285 if (zram_test_flag(zram, index, ZRAM_IDLE))
1286 zram_clear_flag(zram, index, ZRAM_IDLE);
1287
1288 if (zram_test_flag(zram, index, ZRAM_HUGE)) {
1289 zram_clear_flag(zram, index, ZRAM_HUGE);
1290 atomic64_dec(&zram->stats.huge_pages);
1291 }
1292
1293 if (zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
1294 zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
1295
1296 zram_set_priority(zram, index, 0);
1297
1298 if (zram_test_flag(zram, index, ZRAM_WB)) {
1299 zram_clear_flag(zram, index, ZRAM_WB);
1300 free_block_bdev(zram, zram_get_element(zram, index));
1301 goto out;
1302 }
1303
1304 /*
1305 * No memory is allocated for same element filled pages.
1306 * Simply clear same page flag.
1307 */
1308 if (zram_test_flag(zram, index, ZRAM_SAME)) {
1309 zram_clear_flag(zram, index, ZRAM_SAME);
1310 atomic64_dec(&zram->stats.same_pages);
1311 goto out;
1312 }
1313
1314 handle = zram_get_handle(zram, index);
1315 if (!handle)
1316 return;
1317
1318 zs_free(zram->mem_pool, handle);
1319
1320 atomic64_sub(zram_get_obj_size(zram, index),
1321 &zram->stats.compr_data_size);
1322 out:
1323 atomic64_dec(&zram->stats.pages_stored);
1324 zram_set_handle(zram, index, 0);
1325 zram_set_obj_size(zram, index, 0);
1326 WARN_ON_ONCE(zram->table[index].flags &
1327 ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
1328 }
1329
1330 /*
1331 * Reads (decompresses if needed) a page from zspool (zsmalloc).
1332 * Corresponding ZRAM slot should be locked.
1333 */
zram_read_from_zspool(struct zram * zram,struct page * page,u32 index)1334 static int zram_read_from_zspool(struct zram *zram, struct page *page,
1335 u32 index)
1336 {
1337 struct zcomp_strm *zstrm;
1338 unsigned long handle;
1339 unsigned int size;
1340 void *src, *dst;
1341 u32 prio;
1342 int ret;
1343
1344 handle = zram_get_handle(zram, index);
1345 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1346 unsigned long value;
1347 void *mem;
1348
1349 value = handle ? zram_get_element(zram, index) : 0;
1350 mem = kmap_atomic(page);
1351 zram_fill_page(mem, PAGE_SIZE, value);
1352 kunmap_atomic(mem);
1353 return 0;
1354 }
1355
1356 size = zram_get_obj_size(zram, index);
1357
1358 if (size != PAGE_SIZE) {
1359 prio = zram_get_priority(zram, index);
1360 zstrm = zcomp_stream_get(zram->comps[prio]);
1361 }
1362
1363 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
1364 if (size == PAGE_SIZE) {
1365 dst = kmap_atomic(page);
1366 memcpy(dst, src, PAGE_SIZE);
1367 kunmap_atomic(dst);
1368 ret = 0;
1369 } else {
1370 dst = kmap_atomic(page);
1371 ret = zcomp_decompress(zstrm, src, size, dst);
1372 kunmap_atomic(dst);
1373 zcomp_stream_put(zram->comps[prio]);
1374 }
1375 zs_unmap_object(zram->mem_pool, handle);
1376 return ret;
1377 }
1378
zram_read_page(struct zram * zram,struct page * page,u32 index,struct bio * parent)1379 static int zram_read_page(struct zram *zram, struct page *page, u32 index,
1380 struct bio *parent)
1381 {
1382 int ret;
1383
1384 zram_slot_lock(zram, index);
1385 if (!zram_test_flag(zram, index, ZRAM_WB)) {
1386 /* Slot should be locked through out the function call */
1387 ret = zram_read_from_zspool(zram, page, index);
1388 zram_slot_unlock(zram, index);
1389 } else {
1390 /*
1391 * The slot should be unlocked before reading from the backing
1392 * device.
1393 */
1394 zram_slot_unlock(zram, index);
1395
1396 ret = read_from_bdev(zram, page, zram_get_element(zram, index),
1397 parent);
1398 }
1399
1400 /* Should NEVER happen. Return bio error if it does. */
1401 if (WARN_ON(ret < 0))
1402 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
1403
1404 return ret;
1405 }
1406
1407 /*
1408 * Use a temporary buffer to decompress the page, as the decompressor
1409 * always expects a full page for the output.
1410 */
zram_bvec_read_partial(struct zram * zram,struct bio_vec * bvec,u32 index,int offset)1411 static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec,
1412 u32 index, int offset)
1413 {
1414 struct page *page = alloc_page(GFP_NOIO);
1415 int ret;
1416
1417 if (!page)
1418 return -ENOMEM;
1419 ret = zram_read_page(zram, page, index, NULL);
1420 if (likely(!ret))
1421 memcpy_to_bvec(bvec, page_address(page) + offset);
1422 __free_page(page);
1423 return ret;
1424 }
1425
zram_bvec_read(struct zram * zram,struct bio_vec * bvec,u32 index,int offset,struct bio * bio)1426 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
1427 u32 index, int offset, struct bio *bio)
1428 {
1429 if (is_partial_io(bvec))
1430 return zram_bvec_read_partial(zram, bvec, index, offset);
1431 return zram_read_page(zram, bvec->bv_page, index, bio);
1432 }
1433
zram_write_page(struct zram * zram,struct page * page,u32 index)1434 static int zram_write_page(struct zram *zram, struct page *page, u32 index)
1435 {
1436 int ret = 0;
1437 unsigned long alloced_pages;
1438 unsigned long handle = -ENOMEM;
1439 unsigned int comp_len = 0;
1440 void *src, *dst, *mem;
1441 struct zcomp_strm *zstrm;
1442 unsigned long element = 0;
1443 enum zram_pageflags flags = 0;
1444
1445 mem = kmap_atomic(page);
1446 if (page_same_filled(mem, &element)) {
1447 kunmap_atomic(mem);
1448 /* Free memory associated with this sector now. */
1449 flags = ZRAM_SAME;
1450 atomic64_inc(&zram->stats.same_pages);
1451 goto out;
1452 }
1453 kunmap_atomic(mem);
1454
1455 compress_again:
1456 zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
1457 src = kmap_atomic(page);
1458 ret = zcomp_compress(zstrm, src, &comp_len);
1459 kunmap_atomic(src);
1460
1461 if (unlikely(ret)) {
1462 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1463 pr_err("Compression failed! err=%d\n", ret);
1464 zs_free(zram->mem_pool, handle);
1465 return ret;
1466 }
1467
1468 if (comp_len >= huge_class_size)
1469 comp_len = PAGE_SIZE;
1470 /*
1471 * handle allocation has 2 paths:
1472 * a) fast path is executed with preemption disabled (for
1473 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
1474 * since we can't sleep;
1475 * b) slow path enables preemption and attempts to allocate
1476 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
1477 * put per-cpu compression stream and, thus, to re-do
1478 * the compression once handle is allocated.
1479 *
1480 * if we have a 'non-null' handle here then we are coming
1481 * from the slow path and handle has already been allocated.
1482 */
1483 if (IS_ERR_VALUE(handle))
1484 handle = zs_malloc(zram->mem_pool, comp_len,
1485 __GFP_KSWAPD_RECLAIM |
1486 __GFP_NOWARN |
1487 __GFP_HIGHMEM |
1488 __GFP_MOVABLE);
1489 if (IS_ERR_VALUE(handle)) {
1490 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1491 atomic64_inc(&zram->stats.writestall);
1492 handle = zs_malloc(zram->mem_pool, comp_len,
1493 GFP_NOIO | __GFP_HIGHMEM |
1494 __GFP_MOVABLE);
1495 if (IS_ERR_VALUE(handle))
1496 return PTR_ERR((void *)handle);
1497
1498 if (comp_len != PAGE_SIZE)
1499 goto compress_again;
1500 /*
1501 * If the page is not compressible, you need to acquire the
1502 * lock and execute the code below. The zcomp_stream_get()
1503 * call is needed to disable the cpu hotplug and grab the
1504 * zstrm buffer back. It is necessary that the dereferencing
1505 * of the zstrm variable below occurs correctly.
1506 */
1507 zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
1508 }
1509
1510 alloced_pages = zs_get_total_pages(zram->mem_pool);
1511 update_used_max(zram, alloced_pages);
1512
1513 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
1514 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1515 zs_free(zram->mem_pool, handle);
1516 return -ENOMEM;
1517 }
1518
1519 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
1520
1521 src = zstrm->buffer;
1522 if (comp_len == PAGE_SIZE)
1523 src = kmap_atomic(page);
1524 memcpy(dst, src, comp_len);
1525 if (comp_len == PAGE_SIZE)
1526 kunmap_atomic(src);
1527
1528 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1529 zs_unmap_object(zram->mem_pool, handle);
1530 atomic64_add(comp_len, &zram->stats.compr_data_size);
1531 out:
1532 /*
1533 * Free memory associated with this sector
1534 * before overwriting unused sectors.
1535 */
1536 zram_slot_lock(zram, index);
1537 zram_free_page(zram, index);
1538
1539 if (comp_len == PAGE_SIZE) {
1540 zram_set_flag(zram, index, ZRAM_HUGE);
1541 atomic64_inc(&zram->stats.huge_pages);
1542 atomic64_inc(&zram->stats.huge_pages_since);
1543 }
1544
1545 if (flags) {
1546 zram_set_flag(zram, index, flags);
1547 zram_set_element(zram, index, element);
1548 } else {
1549 zram_set_handle(zram, index, handle);
1550 zram_set_obj_size(zram, index, comp_len);
1551 }
1552 zram_slot_unlock(zram, index);
1553
1554 /* Update stats */
1555 atomic64_inc(&zram->stats.pages_stored);
1556 return ret;
1557 }
1558
1559 /*
1560 * This is a partial IO. Read the full page before writing the changes.
1561 */
zram_bvec_write_partial(struct zram * zram,struct bio_vec * bvec,u32 index,int offset,struct bio * bio)1562 static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec,
1563 u32 index, int offset, struct bio *bio)
1564 {
1565 struct page *page = alloc_page(GFP_NOIO);
1566 int ret;
1567
1568 if (!page)
1569 return -ENOMEM;
1570
1571 ret = zram_read_page(zram, page, index, bio);
1572 if (!ret) {
1573 memcpy_from_bvec(page_address(page) + offset, bvec);
1574 ret = zram_write_page(zram, page, index);
1575 }
1576 __free_page(page);
1577 return ret;
1578 }
1579
zram_bvec_write(struct zram * zram,struct bio_vec * bvec,u32 index,int offset,struct bio * bio)1580 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1581 u32 index, int offset, struct bio *bio)
1582 {
1583 if (is_partial_io(bvec))
1584 return zram_bvec_write_partial(zram, bvec, index, offset, bio);
1585 return zram_write_page(zram, bvec->bv_page, index);
1586 }
1587
1588 #ifdef CONFIG_ZRAM_MULTI_COMP
1589 /*
1590 * This function will decompress (unless it's ZRAM_HUGE) the page and then
1591 * attempt to compress it using provided compression algorithm priority
1592 * (which is potentially more effective).
1593 *
1594 * Corresponding ZRAM slot should be locked.
1595 */
zram_recompress(struct zram * zram,u32 index,struct page * page,u32 threshold,u32 prio,u32 prio_max)1596 static int zram_recompress(struct zram *zram, u32 index, struct page *page,
1597 u32 threshold, u32 prio, u32 prio_max)
1598 {
1599 struct zcomp_strm *zstrm = NULL;
1600 unsigned long handle_old;
1601 unsigned long handle_new;
1602 unsigned int comp_len_old;
1603 unsigned int comp_len_new;
1604 unsigned int class_index_old;
1605 unsigned int class_index_new;
1606 u32 num_recomps = 0;
1607 void *src, *dst;
1608 int ret;
1609
1610 handle_old = zram_get_handle(zram, index);
1611 if (!handle_old)
1612 return -EINVAL;
1613
1614 comp_len_old = zram_get_obj_size(zram, index);
1615 /*
1616 * Do not recompress objects that are already "small enough".
1617 */
1618 if (comp_len_old < threshold)
1619 return 0;
1620
1621 ret = zram_read_from_zspool(zram, page, index);
1622 if (ret)
1623 return ret;
1624
1625 /*
1626 * We touched this entry so mark it as non-IDLE. This makes sure that
1627 * we don't preserve IDLE flag and don't incorrectly pick this entry
1628 * for different post-processing type (e.g. writeback).
1629 */
1630 zram_clear_flag(zram, index, ZRAM_IDLE);
1631
1632 class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old);
1633 /*
1634 * Iterate the secondary comp algorithms list (in order of priority)
1635 * and try to recompress the page.
1636 */
1637 for (; prio < prio_max; prio++) {
1638 if (!zram->comps[prio])
1639 continue;
1640
1641 /*
1642 * Skip if the object is already re-compressed with a higher
1643 * priority algorithm (or same algorithm).
1644 */
1645 if (prio <= zram_get_priority(zram, index))
1646 continue;
1647
1648 num_recomps++;
1649 zstrm = zcomp_stream_get(zram->comps[prio]);
1650 src = kmap_atomic(page);
1651 ret = zcomp_compress(zstrm, src, &comp_len_new);
1652 kunmap_atomic(src);
1653
1654 if (ret) {
1655 zcomp_stream_put(zram->comps[prio]);
1656 return ret;
1657 }
1658
1659 class_index_new = zs_lookup_class_index(zram->mem_pool,
1660 comp_len_new);
1661
1662 /* Continue until we make progress */
1663 if (class_index_new >= class_index_old ||
1664 (threshold && comp_len_new >= threshold)) {
1665 zcomp_stream_put(zram->comps[prio]);
1666 continue;
1667 }
1668
1669 /* Recompression was successful so break out */
1670 break;
1671 }
1672
1673 /*
1674 * We did not try to recompress, e.g. when we have only one
1675 * secondary algorithm and the page is already recompressed
1676 * using that algorithm
1677 */
1678 if (!zstrm)
1679 return 0;
1680
1681 if (class_index_new >= class_index_old) {
1682 /*
1683 * Secondary algorithms failed to re-compress the page
1684 * in a way that would save memory, mark the object as
1685 * incompressible so that we will not try to compress
1686 * it again.
1687 *
1688 * We need to make sure that all secondary algorithms have
1689 * failed, so we test if the number of recompressions matches
1690 * the number of active secondary algorithms.
1691 */
1692 if (num_recomps == zram->num_active_comps - 1)
1693 zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
1694 return 0;
1695 }
1696
1697 /* Successful recompression but above threshold */
1698 if (threshold && comp_len_new >= threshold)
1699 return 0;
1700
1701 /*
1702 * No direct reclaim (slow path) for handle allocation and no
1703 * re-compression attempt (unlike in zram_write_bvec()) since
1704 * we already have stored that object in zsmalloc. If we cannot
1705 * alloc memory for recompressed object then we bail out and
1706 * simply keep the old (existing) object in zsmalloc.
1707 */
1708 handle_new = zs_malloc(zram->mem_pool, comp_len_new,
1709 __GFP_KSWAPD_RECLAIM |
1710 __GFP_NOWARN |
1711 __GFP_HIGHMEM |
1712 __GFP_MOVABLE);
1713 if (IS_ERR_VALUE(handle_new)) {
1714 zcomp_stream_put(zram->comps[prio]);
1715 return PTR_ERR((void *)handle_new);
1716 }
1717
1718 dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO);
1719 memcpy(dst, zstrm->buffer, comp_len_new);
1720 zcomp_stream_put(zram->comps[prio]);
1721
1722 zs_unmap_object(zram->mem_pool, handle_new);
1723
1724 zram_free_page(zram, index);
1725 zram_set_handle(zram, index, handle_new);
1726 zram_set_obj_size(zram, index, comp_len_new);
1727 zram_set_priority(zram, index, prio);
1728
1729 atomic64_add(comp_len_new, &zram->stats.compr_data_size);
1730 atomic64_inc(&zram->stats.pages_stored);
1731
1732 return 0;
1733 }
1734
1735 #define RECOMPRESS_IDLE (1 << 0)
1736 #define RECOMPRESS_HUGE (1 << 1)
1737
recompress_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1738 static ssize_t recompress_store(struct device *dev,
1739 struct device_attribute *attr,
1740 const char *buf, size_t len)
1741 {
1742 u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS;
1743 struct zram *zram = dev_to_zram(dev);
1744 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
1745 char *args, *param, *val, *algo = NULL;
1746 u32 mode = 0, threshold = 0;
1747 unsigned long index;
1748 struct page *page;
1749 ssize_t ret;
1750
1751 args = skip_spaces(buf);
1752 while (*args) {
1753 args = next_arg(args, ¶m, &val);
1754
1755 if (!val || !*val)
1756 return -EINVAL;
1757
1758 if (!strcmp(param, "type")) {
1759 if (!strcmp(val, "idle"))
1760 mode = RECOMPRESS_IDLE;
1761 if (!strcmp(val, "huge"))
1762 mode = RECOMPRESS_HUGE;
1763 if (!strcmp(val, "huge_idle"))
1764 mode = RECOMPRESS_IDLE | RECOMPRESS_HUGE;
1765 continue;
1766 }
1767
1768 if (!strcmp(param, "threshold")) {
1769 /*
1770 * We will re-compress only idle objects equal or
1771 * greater in size than watermark.
1772 */
1773 ret = kstrtouint(val, 10, &threshold);
1774 if (ret)
1775 return ret;
1776 continue;
1777 }
1778
1779 if (!strcmp(param, "algo")) {
1780 algo = val;
1781 continue;
1782 }
1783 }
1784
1785 if (threshold >= huge_class_size)
1786 return -EINVAL;
1787
1788 down_read(&zram->init_lock);
1789 if (!init_done(zram)) {
1790 ret = -EINVAL;
1791 goto release_init_lock;
1792 }
1793
1794 if (algo) {
1795 bool found = false;
1796
1797 for (; prio < ZRAM_MAX_COMPS; prio++) {
1798 if (!zram->comp_algs[prio])
1799 continue;
1800
1801 if (!strcmp(zram->comp_algs[prio], algo)) {
1802 prio_max = min(prio + 1, ZRAM_MAX_COMPS);
1803 found = true;
1804 break;
1805 }
1806 }
1807
1808 if (!found) {
1809 ret = -EINVAL;
1810 goto release_init_lock;
1811 }
1812 }
1813
1814 page = alloc_page(GFP_KERNEL);
1815 if (!page) {
1816 ret = -ENOMEM;
1817 goto release_init_lock;
1818 }
1819
1820 ret = len;
1821 for (index = 0; index < nr_pages; index++) {
1822 int err = 0;
1823
1824 zram_slot_lock(zram, index);
1825
1826 if (!zram_allocated(zram, index))
1827 goto next;
1828
1829 if (mode & RECOMPRESS_IDLE &&
1830 !zram_test_flag(zram, index, ZRAM_IDLE))
1831 goto next;
1832
1833 if (mode & RECOMPRESS_HUGE &&
1834 !zram_test_flag(zram, index, ZRAM_HUGE))
1835 goto next;
1836
1837 if (zram_test_flag(zram, index, ZRAM_WB) ||
1838 zram_test_flag(zram, index, ZRAM_UNDER_WB) ||
1839 zram_test_flag(zram, index, ZRAM_SAME) ||
1840 zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
1841 goto next;
1842
1843 err = zram_recompress(zram, index, page, threshold,
1844 prio, prio_max);
1845 next:
1846 zram_slot_unlock(zram, index);
1847 if (err) {
1848 ret = err;
1849 break;
1850 }
1851
1852 cond_resched();
1853 }
1854
1855 __free_page(page);
1856
1857 release_init_lock:
1858 up_read(&zram->init_lock);
1859 return ret;
1860 }
1861 #endif
1862
zram_bio_discard(struct zram * zram,struct bio * bio)1863 static void zram_bio_discard(struct zram *zram, struct bio *bio)
1864 {
1865 size_t n = bio->bi_iter.bi_size;
1866 u32 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1867 u32 offset = (bio->bi_iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
1868 SECTOR_SHIFT;
1869
1870 /*
1871 * zram manages data in physical block size units. Because logical block
1872 * size isn't identical with physical block size on some arch, we
1873 * could get a discard request pointing to a specific offset within a
1874 * certain physical block. Although we can handle this request by
1875 * reading that physiclal block and decompressing and partially zeroing
1876 * and re-compressing and then re-storing it, this isn't reasonable
1877 * because our intent with a discard request is to save memory. So
1878 * skipping this logical block is appropriate here.
1879 */
1880 if (offset) {
1881 if (n <= (PAGE_SIZE - offset))
1882 return;
1883
1884 n -= (PAGE_SIZE - offset);
1885 index++;
1886 }
1887
1888 while (n >= PAGE_SIZE) {
1889 zram_slot_lock(zram, index);
1890 zram_free_page(zram, index);
1891 zram_slot_unlock(zram, index);
1892 atomic64_inc(&zram->stats.notify_free);
1893 index++;
1894 n -= PAGE_SIZE;
1895 }
1896
1897 bio_endio(bio);
1898 }
1899
zram_bio_read(struct zram * zram,struct bio * bio)1900 static void zram_bio_read(struct zram *zram, struct bio *bio)
1901 {
1902 unsigned long start_time = bio_start_io_acct(bio);
1903 struct bvec_iter iter = bio->bi_iter;
1904
1905 do {
1906 u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1907 u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
1908 SECTOR_SHIFT;
1909 struct bio_vec bv = bio_iter_iovec(bio, iter);
1910
1911 bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
1912
1913 if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
1914 atomic64_inc(&zram->stats.failed_reads);
1915 bio->bi_status = BLK_STS_IOERR;
1916 break;
1917 }
1918 flush_dcache_page(bv.bv_page);
1919
1920 zram_slot_lock(zram, index);
1921 zram_accessed(zram, index);
1922 zram_slot_unlock(zram, index);
1923
1924 bio_advance_iter_single(bio, &iter, bv.bv_len);
1925 } while (iter.bi_size);
1926
1927 bio_end_io_acct(bio, start_time);
1928 bio_endio(bio);
1929 }
1930
zram_bio_write(struct zram * zram,struct bio * bio)1931 static void zram_bio_write(struct zram *zram, struct bio *bio)
1932 {
1933 unsigned long start_time = bio_start_io_acct(bio);
1934 struct bvec_iter iter = bio->bi_iter;
1935
1936 do {
1937 u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1938 u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
1939 SECTOR_SHIFT;
1940 struct bio_vec bv = bio_iter_iovec(bio, iter);
1941
1942 bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
1943
1944 if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
1945 atomic64_inc(&zram->stats.failed_writes);
1946 bio->bi_status = BLK_STS_IOERR;
1947 break;
1948 }
1949
1950 zram_slot_lock(zram, index);
1951 zram_accessed(zram, index);
1952 zram_slot_unlock(zram, index);
1953
1954 bio_advance_iter_single(bio, &iter, bv.bv_len);
1955 } while (iter.bi_size);
1956
1957 bio_end_io_acct(bio, start_time);
1958 bio_endio(bio);
1959 }
1960
1961 /*
1962 * Handler function for all zram I/O requests.
1963 */
zram_submit_bio(struct bio * bio)1964 static void zram_submit_bio(struct bio *bio)
1965 {
1966 struct zram *zram = bio->bi_bdev->bd_disk->private_data;
1967
1968 switch (bio_op(bio)) {
1969 case REQ_OP_READ:
1970 zram_bio_read(zram, bio);
1971 break;
1972 case REQ_OP_WRITE:
1973 zram_bio_write(zram, bio);
1974 break;
1975 case REQ_OP_DISCARD:
1976 case REQ_OP_WRITE_ZEROES:
1977 zram_bio_discard(zram, bio);
1978 break;
1979 default:
1980 WARN_ON_ONCE(1);
1981 bio_endio(bio);
1982 }
1983 }
1984
zram_slot_free_notify(struct block_device * bdev,unsigned long index)1985 static void zram_slot_free_notify(struct block_device *bdev,
1986 unsigned long index)
1987 {
1988 struct zram *zram;
1989
1990 zram = bdev->bd_disk->private_data;
1991
1992 atomic64_inc(&zram->stats.notify_free);
1993 if (!zram_slot_trylock(zram, index)) {
1994 atomic64_inc(&zram->stats.miss_free);
1995 return;
1996 }
1997
1998 zram_free_page(zram, index);
1999 zram_slot_unlock(zram, index);
2000 }
2001
zram_destroy_comps(struct zram * zram)2002 static void zram_destroy_comps(struct zram *zram)
2003 {
2004 u32 prio;
2005
2006 for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) {
2007 struct zcomp *comp = zram->comps[prio];
2008
2009 zram->comps[prio] = NULL;
2010 if (!comp)
2011 continue;
2012 zcomp_destroy(comp);
2013 zram->num_active_comps--;
2014 }
2015
2016 for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
2017 /* Do not free statically defined compression algorithms */
2018 if (zram->comp_algs[prio] != default_compressor)
2019 kfree(zram->comp_algs[prio]);
2020 zram->comp_algs[prio] = NULL;
2021 }
2022 }
2023
zram_reset_device(struct zram * zram)2024 static void zram_reset_device(struct zram *zram)
2025 {
2026 down_write(&zram->init_lock);
2027
2028 zram->limit_pages = 0;
2029
2030 set_capacity_and_notify(zram->disk, 0);
2031 part_stat_set_all(zram->disk->part0, 0);
2032
2033 /* I/O operation under all of CPU are done so let's free */
2034 zram_meta_free(zram, zram->disksize);
2035 zram->disksize = 0;
2036 zram_destroy_comps(zram);
2037 memset(&zram->stats, 0, sizeof(zram->stats));
2038 reset_bdev(zram);
2039
2040 comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
2041 up_write(&zram->init_lock);
2042 }
2043
disksize_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2044 static ssize_t disksize_store(struct device *dev,
2045 struct device_attribute *attr, const char *buf, size_t len)
2046 {
2047 u64 disksize;
2048 struct zcomp *comp;
2049 struct zram *zram = dev_to_zram(dev);
2050 int err;
2051 u32 prio;
2052
2053 disksize = memparse(buf, NULL);
2054 if (!disksize)
2055 return -EINVAL;
2056
2057 down_write(&zram->init_lock);
2058 if (init_done(zram)) {
2059 pr_info("Cannot change disksize for initialized device\n");
2060 err = -EBUSY;
2061 goto out_unlock;
2062 }
2063
2064 disksize = PAGE_ALIGN(disksize);
2065 if (!zram_meta_alloc(zram, disksize)) {
2066 err = -ENOMEM;
2067 goto out_unlock;
2068 }
2069
2070 for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) {
2071 if (!zram->comp_algs[prio])
2072 continue;
2073
2074 comp = zcomp_create(zram->comp_algs[prio]);
2075 if (IS_ERR(comp)) {
2076 pr_err("Cannot initialise %s compressing backend\n",
2077 zram->comp_algs[prio]);
2078 err = PTR_ERR(comp);
2079 goto out_free_comps;
2080 }
2081
2082 zram->comps[prio] = comp;
2083 zram->num_active_comps++;
2084 }
2085 zram->disksize = disksize;
2086 set_capacity_and_notify(zram->disk, zram->disksize >> SECTOR_SHIFT);
2087 up_write(&zram->init_lock);
2088
2089 return len;
2090
2091 out_free_comps:
2092 zram_destroy_comps(zram);
2093 zram_meta_free(zram, disksize);
2094 out_unlock:
2095 up_write(&zram->init_lock);
2096 return err;
2097 }
2098
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2099 static ssize_t reset_store(struct device *dev,
2100 struct device_attribute *attr, const char *buf, size_t len)
2101 {
2102 int ret;
2103 unsigned short do_reset;
2104 struct zram *zram;
2105 struct gendisk *disk;
2106
2107 ret = kstrtou16(buf, 10, &do_reset);
2108 if (ret)
2109 return ret;
2110
2111 if (!do_reset)
2112 return -EINVAL;
2113
2114 zram = dev_to_zram(dev);
2115 disk = zram->disk;
2116
2117 mutex_lock(&disk->open_mutex);
2118 /* Do not reset an active device or claimed device */
2119 if (disk_openers(disk) || zram->claim) {
2120 mutex_unlock(&disk->open_mutex);
2121 return -EBUSY;
2122 }
2123
2124 /* From now on, anyone can't open /dev/zram[0-9] */
2125 zram->claim = true;
2126 mutex_unlock(&disk->open_mutex);
2127
2128 /* Make sure all the pending I/O are finished */
2129 sync_blockdev(disk->part0);
2130 zram_reset_device(zram);
2131
2132 mutex_lock(&disk->open_mutex);
2133 zram->claim = false;
2134 mutex_unlock(&disk->open_mutex);
2135
2136 return len;
2137 }
2138
zram_open(struct gendisk * disk,blk_mode_t mode)2139 static int zram_open(struct gendisk *disk, blk_mode_t mode)
2140 {
2141 struct zram *zram = disk->private_data;
2142
2143 WARN_ON(!mutex_is_locked(&disk->open_mutex));
2144
2145 /* zram was claimed to reset so open request fails */
2146 if (zram->claim)
2147 return -EBUSY;
2148 return 0;
2149 }
2150
2151 static const struct block_device_operations zram_devops = {
2152 .open = zram_open,
2153 .submit_bio = zram_submit_bio,
2154 .swap_slot_free_notify = zram_slot_free_notify,
2155 .owner = THIS_MODULE
2156 };
2157
2158 static DEVICE_ATTR_WO(compact);
2159 static DEVICE_ATTR_RW(disksize);
2160 static DEVICE_ATTR_RO(initstate);
2161 static DEVICE_ATTR_WO(reset);
2162 static DEVICE_ATTR_WO(mem_limit);
2163 static DEVICE_ATTR_WO(mem_used_max);
2164 static DEVICE_ATTR_WO(idle);
2165 static DEVICE_ATTR_RW(max_comp_streams);
2166 static DEVICE_ATTR_RW(comp_algorithm);
2167 #ifdef CONFIG_ZRAM_WRITEBACK
2168 static DEVICE_ATTR_RW(backing_dev);
2169 static DEVICE_ATTR_WO(writeback);
2170 static DEVICE_ATTR_RW(writeback_limit);
2171 static DEVICE_ATTR_RW(writeback_limit_enable);
2172 #endif
2173 #ifdef CONFIG_ZRAM_MULTI_COMP
2174 static DEVICE_ATTR_RW(recomp_algorithm);
2175 static DEVICE_ATTR_WO(recompress);
2176 #endif
2177
2178 static struct attribute *zram_disk_attrs[] = {
2179 &dev_attr_disksize.attr,
2180 &dev_attr_initstate.attr,
2181 &dev_attr_reset.attr,
2182 &dev_attr_compact.attr,
2183 &dev_attr_mem_limit.attr,
2184 &dev_attr_mem_used_max.attr,
2185 &dev_attr_idle.attr,
2186 &dev_attr_max_comp_streams.attr,
2187 &dev_attr_comp_algorithm.attr,
2188 #ifdef CONFIG_ZRAM_WRITEBACK
2189 &dev_attr_backing_dev.attr,
2190 &dev_attr_writeback.attr,
2191 &dev_attr_writeback_limit.attr,
2192 &dev_attr_writeback_limit_enable.attr,
2193 #endif
2194 &dev_attr_io_stat.attr,
2195 &dev_attr_mm_stat.attr,
2196 #ifdef CONFIG_ZRAM_WRITEBACK
2197 &dev_attr_bd_stat.attr,
2198 #endif
2199 &dev_attr_debug_stat.attr,
2200 #ifdef CONFIG_ZRAM_MULTI_COMP
2201 &dev_attr_recomp_algorithm.attr,
2202 &dev_attr_recompress.attr,
2203 #endif
2204 NULL,
2205 };
2206
2207 ATTRIBUTE_GROUPS(zram_disk);
2208
2209 /*
2210 * Allocate and initialize new zram device. the function returns
2211 * '>= 0' device_id upon success, and negative value otherwise.
2212 */
zram_add(void)2213 static int zram_add(void)
2214 {
2215 struct zram *zram;
2216 int ret, device_id;
2217
2218 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
2219 if (!zram)
2220 return -ENOMEM;
2221
2222 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
2223 if (ret < 0)
2224 goto out_free_dev;
2225 device_id = ret;
2226
2227 init_rwsem(&zram->init_lock);
2228 #ifdef CONFIG_ZRAM_WRITEBACK
2229 spin_lock_init(&zram->wb_limit_lock);
2230 #endif
2231
2232 /* gendisk structure */
2233 zram->disk = blk_alloc_disk(NUMA_NO_NODE);
2234 if (!zram->disk) {
2235 pr_err("Error allocating disk structure for device %d\n",
2236 device_id);
2237 ret = -ENOMEM;
2238 goto out_free_idr;
2239 }
2240
2241 zram->disk->major = zram_major;
2242 zram->disk->first_minor = device_id;
2243 zram->disk->minors = 1;
2244 zram->disk->flags |= GENHD_FL_NO_PART;
2245 zram->disk->fops = &zram_devops;
2246 zram->disk->private_data = zram;
2247 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
2248
2249 /* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
2250 set_capacity(zram->disk, 0);
2251 /* zram devices sort of resembles non-rotational disks */
2252 blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
2253 blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
2254
2255 /*
2256 * To ensure that we always get PAGE_SIZE aligned
2257 * and n*PAGE_SIZED sized I/O requests.
2258 */
2259 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
2260 blk_queue_logical_block_size(zram->disk->queue,
2261 ZRAM_LOGICAL_BLOCK_SIZE);
2262 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
2263 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
2264 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
2265 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
2266
2267 /*
2268 * zram_bio_discard() will clear all logical blocks if logical block
2269 * size is identical with physical block size(PAGE_SIZE). But if it is
2270 * different, we will skip discarding some parts of logical blocks in
2271 * the part of the request range which isn't aligned to physical block
2272 * size. So we can't ensure that all discarded logical blocks are
2273 * zeroed.
2274 */
2275 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
2276 blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
2277
2278 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
2279 ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
2280 if (ret)
2281 goto out_cleanup_disk;
2282
2283 comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
2284
2285 zram_debugfs_register(zram);
2286 pr_info("Added device: %s\n", zram->disk->disk_name);
2287 return device_id;
2288
2289 out_cleanup_disk:
2290 put_disk(zram->disk);
2291 out_free_idr:
2292 idr_remove(&zram_index_idr, device_id);
2293 out_free_dev:
2294 kfree(zram);
2295 return ret;
2296 }
2297
zram_remove(struct zram * zram)2298 static int zram_remove(struct zram *zram)
2299 {
2300 bool claimed;
2301
2302 mutex_lock(&zram->disk->open_mutex);
2303 if (disk_openers(zram->disk)) {
2304 mutex_unlock(&zram->disk->open_mutex);
2305 return -EBUSY;
2306 }
2307
2308 claimed = zram->claim;
2309 if (!claimed)
2310 zram->claim = true;
2311 mutex_unlock(&zram->disk->open_mutex);
2312
2313 zram_debugfs_unregister(zram);
2314
2315 if (claimed) {
2316 /*
2317 * If we were claimed by reset_store(), del_gendisk() will
2318 * wait until reset_store() is done, so nothing need to do.
2319 */
2320 ;
2321 } else {
2322 /* Make sure all the pending I/O are finished */
2323 sync_blockdev(zram->disk->part0);
2324 zram_reset_device(zram);
2325 }
2326
2327 pr_info("Removed device: %s\n", zram->disk->disk_name);
2328
2329 del_gendisk(zram->disk);
2330
2331 /* del_gendisk drains pending reset_store */
2332 WARN_ON_ONCE(claimed && zram->claim);
2333
2334 /*
2335 * disksize_store() may be called in between zram_reset_device()
2336 * and del_gendisk(), so run the last reset to avoid leaking
2337 * anything allocated with disksize_store()
2338 */
2339 zram_reset_device(zram);
2340
2341 put_disk(zram->disk);
2342 kfree(zram);
2343 return 0;
2344 }
2345
2346 /* zram-control sysfs attributes */
2347
2348 /*
2349 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
2350 * sense that reading from this file does alter the state of your system -- it
2351 * creates a new un-initialized zram device and returns back this device's
2352 * device_id (or an error code if it fails to create a new device).
2353 */
hot_add_show(const struct class * class,const struct class_attribute * attr,char * buf)2354 static ssize_t hot_add_show(const struct class *class,
2355 const struct class_attribute *attr,
2356 char *buf)
2357 {
2358 int ret;
2359
2360 mutex_lock(&zram_index_mutex);
2361 ret = zram_add();
2362 mutex_unlock(&zram_index_mutex);
2363
2364 if (ret < 0)
2365 return ret;
2366 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
2367 }
2368 /* This attribute must be set to 0400, so CLASS_ATTR_RO() can not be used */
2369 static struct class_attribute class_attr_hot_add =
2370 __ATTR(hot_add, 0400, hot_add_show, NULL);
2371
hot_remove_store(const struct class * class,const struct class_attribute * attr,const char * buf,size_t count)2372 static ssize_t hot_remove_store(const struct class *class,
2373 const struct class_attribute *attr,
2374 const char *buf,
2375 size_t count)
2376 {
2377 struct zram *zram;
2378 int ret, dev_id;
2379
2380 /* dev_id is gendisk->first_minor, which is `int' */
2381 ret = kstrtoint(buf, 10, &dev_id);
2382 if (ret)
2383 return ret;
2384 if (dev_id < 0)
2385 return -EINVAL;
2386
2387 mutex_lock(&zram_index_mutex);
2388
2389 zram = idr_find(&zram_index_idr, dev_id);
2390 if (zram) {
2391 ret = zram_remove(zram);
2392 if (!ret)
2393 idr_remove(&zram_index_idr, dev_id);
2394 } else {
2395 ret = -ENODEV;
2396 }
2397
2398 mutex_unlock(&zram_index_mutex);
2399 return ret ? ret : count;
2400 }
2401 static CLASS_ATTR_WO(hot_remove);
2402
2403 static struct attribute *zram_control_class_attrs[] = {
2404 &class_attr_hot_add.attr,
2405 &class_attr_hot_remove.attr,
2406 NULL,
2407 };
2408 ATTRIBUTE_GROUPS(zram_control_class);
2409
2410 static struct class zram_control_class = {
2411 .name = "zram-control",
2412 .class_groups = zram_control_class_groups,
2413 };
2414
zram_remove_cb(int id,void * ptr,void * data)2415 static int zram_remove_cb(int id, void *ptr, void *data)
2416 {
2417 WARN_ON_ONCE(zram_remove(ptr));
2418 return 0;
2419 }
2420
destroy_devices(void)2421 static void destroy_devices(void)
2422 {
2423 class_unregister(&zram_control_class);
2424 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
2425 zram_debugfs_destroy();
2426 idr_destroy(&zram_index_idr);
2427 unregister_blkdev(zram_major, "zram");
2428 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2429 }
2430
zram_init(void)2431 static int __init zram_init(void)
2432 {
2433 int ret;
2434
2435 BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > BITS_PER_LONG);
2436
2437 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
2438 zcomp_cpu_up_prepare, zcomp_cpu_dead);
2439 if (ret < 0)
2440 return ret;
2441
2442 ret = class_register(&zram_control_class);
2443 if (ret) {
2444 pr_err("Unable to register zram-control class\n");
2445 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2446 return ret;
2447 }
2448
2449 zram_debugfs_create();
2450 zram_major = register_blkdev(0, "zram");
2451 if (zram_major <= 0) {
2452 pr_err("Unable to get major number\n");
2453 class_unregister(&zram_control_class);
2454 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2455 return -EBUSY;
2456 }
2457
2458 while (num_devices != 0) {
2459 mutex_lock(&zram_index_mutex);
2460 ret = zram_add();
2461 mutex_unlock(&zram_index_mutex);
2462 if (ret < 0)
2463 goto out_error;
2464 num_devices--;
2465 }
2466
2467 return 0;
2468
2469 out_error:
2470 destroy_devices();
2471 return ret;
2472 }
2473
zram_exit(void)2474 static void __exit zram_exit(void)
2475 {
2476 destroy_devices();
2477 }
2478
2479 module_init(zram_init);
2480 module_exit(zram_exit);
2481
2482 module_param(num_devices, uint, 0);
2483 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
2484
2485 MODULE_LICENSE("Dual BSD/GPL");
2486 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
2487 MODULE_DESCRIPTION("Compressed RAM Block Device");
2488