xref: /openbmc/linux/drivers/block/zram/zram_drv.c (revision c6a564ffadc9105880329710164ee493f0de103c)
1cd67e10aSMinchan Kim /*
2cd67e10aSMinchan Kim  * Compressed RAM block device
3cd67e10aSMinchan Kim  *
4cd67e10aSMinchan Kim  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
57bfb3de8SMinchan Kim  *               2012, 2013 Minchan Kim
6cd67e10aSMinchan Kim  *
7cd67e10aSMinchan Kim  * This code is released using a dual license strategy: BSD/GPL
8cd67e10aSMinchan Kim  * You can choose the licence that better fits your requirements.
9cd67e10aSMinchan Kim  *
10cd67e10aSMinchan Kim  * Released under the terms of 3-clause BSD License
11cd67e10aSMinchan Kim  * Released under the terms of GNU General Public License Version 2.0
12cd67e10aSMinchan Kim  *
13cd67e10aSMinchan Kim  */
14cd67e10aSMinchan Kim 
15cd67e10aSMinchan Kim #define KMSG_COMPONENT "zram"
16cd67e10aSMinchan Kim #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17cd67e10aSMinchan Kim 
18cd67e10aSMinchan Kim #include <linux/module.h>
19cd67e10aSMinchan Kim #include <linux/kernel.h>
20cd67e10aSMinchan Kim #include <linux/bio.h>
21cd67e10aSMinchan Kim #include <linux/bitops.h>
22cd67e10aSMinchan Kim #include <linux/blkdev.h>
23cd67e10aSMinchan Kim #include <linux/buffer_head.h>
24cd67e10aSMinchan Kim #include <linux/device.h>
25cd67e10aSMinchan Kim #include <linux/genhd.h>
26cd67e10aSMinchan Kim #include <linux/highmem.h>
27cd67e10aSMinchan Kim #include <linux/slab.h>
28b09ab054SMinchan Kim #include <linux/backing-dev.h>
29cd67e10aSMinchan Kim #include <linux/string.h>
30cd67e10aSMinchan Kim #include <linux/vmalloc.h>
31fcfa8d95SSergey Senozhatsky #include <linux/err.h>
3285508ec6SSergey Senozhatsky #include <linux/idr.h>
336566d1a3SSergey Senozhatsky #include <linux/sysfs.h>
34c0265342SMinchan Kim #include <linux/debugfs.h>
351dd6c834SAnna-Maria Gleixner #include <linux/cpuhotplug.h>
36*c6a564ffSChristoph Hellwig #include <linux/part_stat.h>
37cd67e10aSMinchan Kim 
38cd67e10aSMinchan Kim #include "zram_drv.h"
39cd67e10aSMinchan Kim 
4085508ec6SSergey Senozhatsky static DEFINE_IDR(zram_index_idr);
416566d1a3SSergey Senozhatsky /* idr index must be protected */
426566d1a3SSergey Senozhatsky static DEFINE_MUTEX(zram_index_mutex);
436566d1a3SSergey Senozhatsky 
44cd67e10aSMinchan Kim static int zram_major;
45ce82f19fSDave Rodgman static const char *default_compressor = "lzo-rle";
46cd67e10aSMinchan Kim 
47cd67e10aSMinchan Kim /* Module params (documentation at end) */
48cd67e10aSMinchan Kim static unsigned int num_devices = 1;
4960f5921aSSergey Senozhatsky /*
5060f5921aSSergey Senozhatsky  * Pages that compress to sizes equals or greater than this are stored
5160f5921aSSergey Senozhatsky  * uncompressed in memory.
5260f5921aSSergey Senozhatsky  */
5360f5921aSSergey Senozhatsky static size_t huge_class_size;
54cd67e10aSMinchan Kim 
551f7319c7SMinchan Kim static void zram_free_page(struct zram *zram, size_t index);
56a939888eSMinchan Kim static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
57a939888eSMinchan Kim 				u32 index, int offset, struct bio *bio);
58a939888eSMinchan Kim 
591f7319c7SMinchan Kim 
603c9959e0SMinchan Kim static int zram_slot_trylock(struct zram *zram, u32 index)
613c9959e0SMinchan Kim {
627e529283SMinchan Kim 	return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
633c9959e0SMinchan Kim }
643c9959e0SMinchan Kim 
65c4d6c4ccSMinchan Kim static void zram_slot_lock(struct zram *zram, u32 index)
66c4d6c4ccSMinchan Kim {
677e529283SMinchan Kim 	bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
68c4d6c4ccSMinchan Kim }
69c4d6c4ccSMinchan Kim 
70c4d6c4ccSMinchan Kim static void zram_slot_unlock(struct zram *zram, u32 index)
71c4d6c4ccSMinchan Kim {
727e529283SMinchan Kim 	bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
73c4d6c4ccSMinchan Kim }
74c4d6c4ccSMinchan Kim 
7508eee69fSMinchan Kim static inline bool init_done(struct zram *zram)
76be2d1d56SSergey Senozhatsky {
7708eee69fSMinchan Kim 	return zram->disksize;
78be2d1d56SSergey Senozhatsky }
79be2d1d56SSergey Senozhatsky 
80cd67e10aSMinchan Kim static inline struct zram *dev_to_zram(struct device *dev)
81cd67e10aSMinchan Kim {
82cd67e10aSMinchan Kim 	return (struct zram *)dev_to_disk(dev)->private_data;
83cd67e10aSMinchan Kim }
84cd67e10aSMinchan Kim 
85643ae61dSMinchan Kim static unsigned long zram_get_handle(struct zram *zram, u32 index)
86643ae61dSMinchan Kim {
87643ae61dSMinchan Kim 	return zram->table[index].handle;
88643ae61dSMinchan Kim }
89643ae61dSMinchan Kim 
90643ae61dSMinchan Kim static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
91643ae61dSMinchan Kim {
92643ae61dSMinchan Kim 	zram->table[index].handle = handle;
93643ae61dSMinchan Kim }
94643ae61dSMinchan Kim 
95b31177f2SSergey Senozhatsky /* flag operations require table entry bit_spin_lock() being held */
96c0265342SMinchan Kim static bool zram_test_flag(struct zram *zram, u32 index,
97522698d7SSergey Senozhatsky 			enum zram_pageflags flag)
9899ebbd30SAndrew Morton {
997e529283SMinchan Kim 	return zram->table[index].flags & BIT(flag);
10099ebbd30SAndrew Morton }
10199ebbd30SAndrew Morton 
102beb6602cSMinchan Kim static void zram_set_flag(struct zram *zram, u32 index,
103522698d7SSergey Senozhatsky 			enum zram_pageflags flag)
104522698d7SSergey Senozhatsky {
1057e529283SMinchan Kim 	zram->table[index].flags |= BIT(flag);
10699ebbd30SAndrew Morton }
10799ebbd30SAndrew Morton 
108beb6602cSMinchan Kim static void zram_clear_flag(struct zram *zram, u32 index,
109522698d7SSergey Senozhatsky 			enum zram_pageflags flag)
110cd67e10aSMinchan Kim {
1117e529283SMinchan Kim 	zram->table[index].flags &= ~BIT(flag);
112522698d7SSergey Senozhatsky }
113cd67e10aSMinchan Kim 
114beb6602cSMinchan Kim static inline void zram_set_element(struct zram *zram, u32 index,
1158e19d540Szhouxianrong 			unsigned long element)
1168e19d540Szhouxianrong {
117beb6602cSMinchan Kim 	zram->table[index].element = element;
1188e19d540Szhouxianrong }
1198e19d540Szhouxianrong 
120643ae61dSMinchan Kim static unsigned long zram_get_element(struct zram *zram, u32 index)
1218e19d540Szhouxianrong {
122643ae61dSMinchan Kim 	return zram->table[index].element;
1238e19d540Szhouxianrong }
1248e19d540Szhouxianrong 
125beb6602cSMinchan Kim static size_t zram_get_obj_size(struct zram *zram, u32 index)
126522698d7SSergey Senozhatsky {
1277e529283SMinchan Kim 	return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
128522698d7SSergey Senozhatsky }
129522698d7SSergey Senozhatsky 
130beb6602cSMinchan Kim static void zram_set_obj_size(struct zram *zram,
131522698d7SSergey Senozhatsky 					u32 index, size_t size)
132522698d7SSergey Senozhatsky {
1337e529283SMinchan Kim 	unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
134522698d7SSergey Senozhatsky 
1357e529283SMinchan Kim 	zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
136522698d7SSergey Senozhatsky }
137522698d7SSergey Senozhatsky 
138a939888eSMinchan Kim static inline bool zram_allocated(struct zram *zram, u32 index)
139a939888eSMinchan Kim {
140a939888eSMinchan Kim 	return zram_get_obj_size(zram, index) ||
141a939888eSMinchan Kim 			zram_test_flag(zram, index, ZRAM_SAME) ||
142a939888eSMinchan Kim 			zram_test_flag(zram, index, ZRAM_WB);
143a939888eSMinchan Kim }
144a939888eSMinchan Kim 
1451f7319c7SMinchan Kim #if PAGE_SIZE != 4096
1461c53e0d2SGeliang Tang static inline bool is_partial_io(struct bio_vec *bvec)
147522698d7SSergey Senozhatsky {
148522698d7SSergey Senozhatsky 	return bvec->bv_len != PAGE_SIZE;
149522698d7SSergey Senozhatsky }
1501f7319c7SMinchan Kim #else
1511f7319c7SMinchan Kim static inline bool is_partial_io(struct bio_vec *bvec)
1521f7319c7SMinchan Kim {
1531f7319c7SMinchan Kim 	return false;
1541f7319c7SMinchan Kim }
1551f7319c7SMinchan Kim #endif
156522698d7SSergey Senozhatsky 
157522698d7SSergey Senozhatsky /*
158522698d7SSergey Senozhatsky  * Check if request is within bounds and aligned on zram logical blocks.
159522698d7SSergey Senozhatsky  */
1601c53e0d2SGeliang Tang static inline bool valid_io_request(struct zram *zram,
161522698d7SSergey Senozhatsky 		sector_t start, unsigned int size)
162522698d7SSergey Senozhatsky {
163522698d7SSergey Senozhatsky 	u64 end, bound;
164522698d7SSergey Senozhatsky 
165522698d7SSergey Senozhatsky 	/* unaligned request */
166522698d7SSergey Senozhatsky 	if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
1671c53e0d2SGeliang Tang 		return false;
168522698d7SSergey Senozhatsky 	if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
1691c53e0d2SGeliang Tang 		return false;
170522698d7SSergey Senozhatsky 
171522698d7SSergey Senozhatsky 	end = start + (size >> SECTOR_SHIFT);
172522698d7SSergey Senozhatsky 	bound = zram->disksize >> SECTOR_SHIFT;
173522698d7SSergey Senozhatsky 	/* out of range range */
174522698d7SSergey Senozhatsky 	if (unlikely(start >= bound || end > bound || start > end))
1751c53e0d2SGeliang Tang 		return false;
176522698d7SSergey Senozhatsky 
177522698d7SSergey Senozhatsky 	/* I/O request is valid */
1781c53e0d2SGeliang Tang 	return true;
179522698d7SSergey Senozhatsky }
180522698d7SSergey Senozhatsky 
181522698d7SSergey Senozhatsky static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
182522698d7SSergey Senozhatsky {
183e86942c7SMinchan Kim 	*index  += (*offset + bvec->bv_len) / PAGE_SIZE;
184522698d7SSergey Senozhatsky 	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
185522698d7SSergey Senozhatsky }
186522698d7SSergey Senozhatsky 
187522698d7SSergey Senozhatsky static inline void update_used_max(struct zram *zram,
188522698d7SSergey Senozhatsky 					const unsigned long pages)
189522698d7SSergey Senozhatsky {
190522698d7SSergey Senozhatsky 	unsigned long old_max, cur_max;
191522698d7SSergey Senozhatsky 
192522698d7SSergey Senozhatsky 	old_max = atomic_long_read(&zram->stats.max_used_pages);
193522698d7SSergey Senozhatsky 
194522698d7SSergey Senozhatsky 	do {
195522698d7SSergey Senozhatsky 		cur_max = old_max;
196522698d7SSergey Senozhatsky 		if (pages > cur_max)
197522698d7SSergey Senozhatsky 			old_max = atomic_long_cmpxchg(
198522698d7SSergey Senozhatsky 				&zram->stats.max_used_pages, cur_max, pages);
199522698d7SSergey Senozhatsky 	} while (old_max != cur_max);
200522698d7SSergey Senozhatsky }
201522698d7SSergey Senozhatsky 
20248ad1abeSMatthew Wilcox static inline void zram_fill_page(void *ptr, unsigned long len,
2038e19d540Szhouxianrong 					unsigned long value)
2048e19d540Szhouxianrong {
2058e19d540Szhouxianrong 	WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
20648ad1abeSMatthew Wilcox 	memset_l(ptr, value, len / sizeof(unsigned long));
2078e19d540Szhouxianrong }
2088e19d540Szhouxianrong 
2098e19d540Szhouxianrong static bool page_same_filled(void *ptr, unsigned long *element)
210522698d7SSergey Senozhatsky {
211522698d7SSergey Senozhatsky 	unsigned long *page;
212f0fe9984SSangwoo Park 	unsigned long val;
21390f82cbfSTaejoon Song 	unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
214522698d7SSergey Senozhatsky 
215522698d7SSergey Senozhatsky 	page = (unsigned long *)ptr;
216f0fe9984SSangwoo Park 	val = page[0];
217522698d7SSergey Senozhatsky 
21890f82cbfSTaejoon Song 	if (val != page[last_pos])
21990f82cbfSTaejoon Song 		return false;
22090f82cbfSTaejoon Song 
22190f82cbfSTaejoon Song 	for (pos = 1; pos < last_pos; pos++) {
222f0fe9984SSangwoo Park 		if (val != page[pos])
2231c53e0d2SGeliang Tang 			return false;
224522698d7SSergey Senozhatsky 	}
225522698d7SSergey Senozhatsky 
226f0fe9984SSangwoo Park 	*element = val;
2278e19d540Szhouxianrong 
2281c53e0d2SGeliang Tang 	return true;
229522698d7SSergey Senozhatsky }
230522698d7SSergey Senozhatsky 
231cd67e10aSMinchan Kim static ssize_t initstate_show(struct device *dev,
232cd67e10aSMinchan Kim 		struct device_attribute *attr, char *buf)
233cd67e10aSMinchan Kim {
234a68eb3b6SSergey Senozhatsky 	u32 val;
235cd67e10aSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
236cd67e10aSMinchan Kim 
237a68eb3b6SSergey Senozhatsky 	down_read(&zram->init_lock);
238a68eb3b6SSergey Senozhatsky 	val = init_done(zram);
239a68eb3b6SSergey Senozhatsky 	up_read(&zram->init_lock);
240cd67e10aSMinchan Kim 
24156b4e8cbSSergey Senozhatsky 	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
242cd67e10aSMinchan Kim }
243cd67e10aSMinchan Kim 
244522698d7SSergey Senozhatsky static ssize_t disksize_show(struct device *dev,
245522698d7SSergey Senozhatsky 		struct device_attribute *attr, char *buf)
246522698d7SSergey Senozhatsky {
247522698d7SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
248522698d7SSergey Senozhatsky 
249522698d7SSergey Senozhatsky 	return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
250522698d7SSergey Senozhatsky }
251522698d7SSergey Senozhatsky 
2529ada9da9SMinchan Kim static ssize_t mem_limit_store(struct device *dev,
2539ada9da9SMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
2549ada9da9SMinchan Kim {
2559ada9da9SMinchan Kim 	u64 limit;
2569ada9da9SMinchan Kim 	char *tmp;
2579ada9da9SMinchan Kim 	struct zram *zram = dev_to_zram(dev);
2589ada9da9SMinchan Kim 
2599ada9da9SMinchan Kim 	limit = memparse(buf, &tmp);
2609ada9da9SMinchan Kim 	if (buf == tmp) /* no chars parsed, invalid input */
2619ada9da9SMinchan Kim 		return -EINVAL;
2629ada9da9SMinchan Kim 
2639ada9da9SMinchan Kim 	down_write(&zram->init_lock);
2649ada9da9SMinchan Kim 	zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
2659ada9da9SMinchan Kim 	up_write(&zram->init_lock);
2669ada9da9SMinchan Kim 
2679ada9da9SMinchan Kim 	return len;
2689ada9da9SMinchan Kim }
2699ada9da9SMinchan Kim 
270461a8eeeSMinchan Kim static ssize_t mem_used_max_store(struct device *dev,
271461a8eeeSMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
272461a8eeeSMinchan Kim {
273461a8eeeSMinchan Kim 	int err;
274461a8eeeSMinchan Kim 	unsigned long val;
275461a8eeeSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
276461a8eeeSMinchan Kim 
277461a8eeeSMinchan Kim 	err = kstrtoul(buf, 10, &val);
278461a8eeeSMinchan Kim 	if (err || val != 0)
279461a8eeeSMinchan Kim 		return -EINVAL;
280461a8eeeSMinchan Kim 
281461a8eeeSMinchan Kim 	down_read(&zram->init_lock);
2825a99e95bSWeijie Yang 	if (init_done(zram)) {
283461a8eeeSMinchan Kim 		atomic_long_set(&zram->stats.max_used_pages,
284beb6602cSMinchan Kim 				zs_get_total_pages(zram->mem_pool));
2855a99e95bSWeijie Yang 	}
286461a8eeeSMinchan Kim 	up_read(&zram->init_lock);
287461a8eeeSMinchan Kim 
288461a8eeeSMinchan Kim 	return len;
289461a8eeeSMinchan Kim }
290461a8eeeSMinchan Kim 
291e82592c4SMinchan Kim static ssize_t idle_store(struct device *dev,
292e82592c4SMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
293e82592c4SMinchan Kim {
294e82592c4SMinchan Kim 	struct zram *zram = dev_to_zram(dev);
295e82592c4SMinchan Kim 	unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
296e82592c4SMinchan Kim 	int index;
297e82592c4SMinchan Kim 
2980bc9f5d1SMinchan Kim 	if (!sysfs_streq(buf, "all"))
299e82592c4SMinchan Kim 		return -EINVAL;
300e82592c4SMinchan Kim 
301e82592c4SMinchan Kim 	down_read(&zram->init_lock);
302e82592c4SMinchan Kim 	if (!init_done(zram)) {
303e82592c4SMinchan Kim 		up_read(&zram->init_lock);
304e82592c4SMinchan Kim 		return -EINVAL;
305e82592c4SMinchan Kim 	}
306e82592c4SMinchan Kim 
307e82592c4SMinchan Kim 	for (index = 0; index < nr_pages; index++) {
308a939888eSMinchan Kim 		/*
309a939888eSMinchan Kim 		 * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
310a939888eSMinchan Kim 		 * See the comment in writeback_store.
311a939888eSMinchan Kim 		 */
312e82592c4SMinchan Kim 		zram_slot_lock(zram, index);
3131d69a3f8SMinchan Kim 		if (zram_allocated(zram, index) &&
3141d69a3f8SMinchan Kim 				!zram_test_flag(zram, index, ZRAM_UNDER_WB))
315e82592c4SMinchan Kim 			zram_set_flag(zram, index, ZRAM_IDLE);
316e82592c4SMinchan Kim 		zram_slot_unlock(zram, index);
317e82592c4SMinchan Kim 	}
318e82592c4SMinchan Kim 
319e82592c4SMinchan Kim 	up_read(&zram->init_lock);
320e82592c4SMinchan Kim 
321e82592c4SMinchan Kim 	return len;
322e82592c4SMinchan Kim }
323e82592c4SMinchan Kim 
324013bf95aSMinchan Kim #ifdef CONFIG_ZRAM_WRITEBACK
3251d69a3f8SMinchan Kim static ssize_t writeback_limit_enable_store(struct device *dev,
3261d69a3f8SMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
3271d69a3f8SMinchan Kim {
3281d69a3f8SMinchan Kim 	struct zram *zram = dev_to_zram(dev);
3291d69a3f8SMinchan Kim 	u64 val;
3301d69a3f8SMinchan Kim 	ssize_t ret = -EINVAL;
3311d69a3f8SMinchan Kim 
3321d69a3f8SMinchan Kim 	if (kstrtoull(buf, 10, &val))
3331d69a3f8SMinchan Kim 		return ret;
3341d69a3f8SMinchan Kim 
3351d69a3f8SMinchan Kim 	down_read(&zram->init_lock);
3361d69a3f8SMinchan Kim 	spin_lock(&zram->wb_limit_lock);
3371d69a3f8SMinchan Kim 	zram->wb_limit_enable = val;
3381d69a3f8SMinchan Kim 	spin_unlock(&zram->wb_limit_lock);
3391d69a3f8SMinchan Kim 	up_read(&zram->init_lock);
3401d69a3f8SMinchan Kim 	ret = len;
3411d69a3f8SMinchan Kim 
3421d69a3f8SMinchan Kim 	return ret;
3431d69a3f8SMinchan Kim }
3441d69a3f8SMinchan Kim 
3451d69a3f8SMinchan Kim static ssize_t writeback_limit_enable_show(struct device *dev,
3461d69a3f8SMinchan Kim 		struct device_attribute *attr, char *buf)
3471d69a3f8SMinchan Kim {
3481d69a3f8SMinchan Kim 	bool val;
3491d69a3f8SMinchan Kim 	struct zram *zram = dev_to_zram(dev);
3501d69a3f8SMinchan Kim 
3511d69a3f8SMinchan Kim 	down_read(&zram->init_lock);
3521d69a3f8SMinchan Kim 	spin_lock(&zram->wb_limit_lock);
3531d69a3f8SMinchan Kim 	val = zram->wb_limit_enable;
3541d69a3f8SMinchan Kim 	spin_unlock(&zram->wb_limit_lock);
3551d69a3f8SMinchan Kim 	up_read(&zram->init_lock);
3561d69a3f8SMinchan Kim 
3571d69a3f8SMinchan Kim 	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
3581d69a3f8SMinchan Kim }
3591d69a3f8SMinchan Kim 
360bb416d18SMinchan Kim static ssize_t writeback_limit_store(struct device *dev,
361bb416d18SMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
362bb416d18SMinchan Kim {
363bb416d18SMinchan Kim 	struct zram *zram = dev_to_zram(dev);
364bb416d18SMinchan Kim 	u64 val;
365bb416d18SMinchan Kim 	ssize_t ret = -EINVAL;
366bb416d18SMinchan Kim 
367bb416d18SMinchan Kim 	if (kstrtoull(buf, 10, &val))
368bb416d18SMinchan Kim 		return ret;
369bb416d18SMinchan Kim 
370bb416d18SMinchan Kim 	down_read(&zram->init_lock);
3711d69a3f8SMinchan Kim 	spin_lock(&zram->wb_limit_lock);
3721d69a3f8SMinchan Kim 	zram->bd_wb_limit = val;
3731d69a3f8SMinchan Kim 	spin_unlock(&zram->wb_limit_lock);
374bb416d18SMinchan Kim 	up_read(&zram->init_lock);
375bb416d18SMinchan Kim 	ret = len;
376bb416d18SMinchan Kim 
377bb416d18SMinchan Kim 	return ret;
378bb416d18SMinchan Kim }
379bb416d18SMinchan Kim 
380bb416d18SMinchan Kim static ssize_t writeback_limit_show(struct device *dev,
381bb416d18SMinchan Kim 		struct device_attribute *attr, char *buf)
382bb416d18SMinchan Kim {
383bb416d18SMinchan Kim 	u64 val;
384bb416d18SMinchan Kim 	struct zram *zram = dev_to_zram(dev);
385bb416d18SMinchan Kim 
386bb416d18SMinchan Kim 	down_read(&zram->init_lock);
3871d69a3f8SMinchan Kim 	spin_lock(&zram->wb_limit_lock);
3881d69a3f8SMinchan Kim 	val = zram->bd_wb_limit;
3891d69a3f8SMinchan Kim 	spin_unlock(&zram->wb_limit_lock);
390bb416d18SMinchan Kim 	up_read(&zram->init_lock);
391bb416d18SMinchan Kim 
392bb416d18SMinchan Kim 	return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
393bb416d18SMinchan Kim }
394bb416d18SMinchan Kim 
395013bf95aSMinchan Kim static void reset_bdev(struct zram *zram)
396013bf95aSMinchan Kim {
397013bf95aSMinchan Kim 	struct block_device *bdev;
398013bf95aSMinchan Kim 
3997e529283SMinchan Kim 	if (!zram->backing_dev)
400013bf95aSMinchan Kim 		return;
401013bf95aSMinchan Kim 
402013bf95aSMinchan Kim 	bdev = zram->bdev;
403013bf95aSMinchan Kim 	if (zram->old_block_size)
404013bf95aSMinchan Kim 		set_blocksize(bdev, zram->old_block_size);
405013bf95aSMinchan Kim 	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
406013bf95aSMinchan Kim 	/* hope filp_close flush all of IO */
407013bf95aSMinchan Kim 	filp_close(zram->backing_dev, NULL);
408013bf95aSMinchan Kim 	zram->backing_dev = NULL;
409013bf95aSMinchan Kim 	zram->old_block_size = 0;
410013bf95aSMinchan Kim 	zram->bdev = NULL;
4114f7a7beaSMinchan Kim 	zram->disk->queue->backing_dev_info->capabilities |=
4124f7a7beaSMinchan Kim 				BDI_CAP_SYNCHRONOUS_IO;
4131363d466SMinchan Kim 	kvfree(zram->bitmap);
4141363d466SMinchan Kim 	zram->bitmap = NULL;
415013bf95aSMinchan Kim }
416013bf95aSMinchan Kim 
417013bf95aSMinchan Kim static ssize_t backing_dev_show(struct device *dev,
418013bf95aSMinchan Kim 		struct device_attribute *attr, char *buf)
419013bf95aSMinchan Kim {
420f7daefe4SChenwandun 	struct file *file;
421013bf95aSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
422013bf95aSMinchan Kim 	char *p;
423013bf95aSMinchan Kim 	ssize_t ret;
424013bf95aSMinchan Kim 
425013bf95aSMinchan Kim 	down_read(&zram->init_lock);
426f7daefe4SChenwandun 	file = zram->backing_dev;
427f7daefe4SChenwandun 	if (!file) {
428013bf95aSMinchan Kim 		memcpy(buf, "none\n", 5);
429013bf95aSMinchan Kim 		up_read(&zram->init_lock);
430013bf95aSMinchan Kim 		return 5;
431013bf95aSMinchan Kim 	}
432013bf95aSMinchan Kim 
433013bf95aSMinchan Kim 	p = file_path(file, buf, PAGE_SIZE - 1);
434013bf95aSMinchan Kim 	if (IS_ERR(p)) {
435013bf95aSMinchan Kim 		ret = PTR_ERR(p);
436013bf95aSMinchan Kim 		goto out;
437013bf95aSMinchan Kim 	}
438013bf95aSMinchan Kim 
439013bf95aSMinchan Kim 	ret = strlen(p);
440013bf95aSMinchan Kim 	memmove(buf, p, ret);
441013bf95aSMinchan Kim 	buf[ret++] = '\n';
442013bf95aSMinchan Kim out:
443013bf95aSMinchan Kim 	up_read(&zram->init_lock);
444013bf95aSMinchan Kim 	return ret;
445013bf95aSMinchan Kim }
446013bf95aSMinchan Kim 
447013bf95aSMinchan Kim static ssize_t backing_dev_store(struct device *dev,
448013bf95aSMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
449013bf95aSMinchan Kim {
450013bf95aSMinchan Kim 	char *file_name;
451c8bd134aSPeter Kalauskas 	size_t sz;
452013bf95aSMinchan Kim 	struct file *backing_dev = NULL;
453013bf95aSMinchan Kim 	struct inode *inode;
454013bf95aSMinchan Kim 	struct address_space *mapping;
4551363d466SMinchan Kim 	unsigned int bitmap_sz, old_block_size = 0;
4561363d466SMinchan Kim 	unsigned long nr_pages, *bitmap = NULL;
457013bf95aSMinchan Kim 	struct block_device *bdev = NULL;
458013bf95aSMinchan Kim 	int err;
459013bf95aSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
460013bf95aSMinchan Kim 
461013bf95aSMinchan Kim 	file_name = kmalloc(PATH_MAX, GFP_KERNEL);
462013bf95aSMinchan Kim 	if (!file_name)
463013bf95aSMinchan Kim 		return -ENOMEM;
464013bf95aSMinchan Kim 
465013bf95aSMinchan Kim 	down_write(&zram->init_lock);
466013bf95aSMinchan Kim 	if (init_done(zram)) {
467013bf95aSMinchan Kim 		pr_info("Can't setup backing device for initialized device\n");
468013bf95aSMinchan Kim 		err = -EBUSY;
469013bf95aSMinchan Kim 		goto out;
470013bf95aSMinchan Kim 	}
471013bf95aSMinchan Kim 
472c8bd134aSPeter Kalauskas 	strlcpy(file_name, buf, PATH_MAX);
473c8bd134aSPeter Kalauskas 	/* ignore trailing newline */
474c8bd134aSPeter Kalauskas 	sz = strlen(file_name);
475c8bd134aSPeter Kalauskas 	if (sz > 0 && file_name[sz - 1] == '\n')
476c8bd134aSPeter Kalauskas 		file_name[sz - 1] = 0x00;
477013bf95aSMinchan Kim 
478013bf95aSMinchan Kim 	backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
479013bf95aSMinchan Kim 	if (IS_ERR(backing_dev)) {
480013bf95aSMinchan Kim 		err = PTR_ERR(backing_dev);
481013bf95aSMinchan Kim 		backing_dev = NULL;
482013bf95aSMinchan Kim 		goto out;
483013bf95aSMinchan Kim 	}
484013bf95aSMinchan Kim 
485013bf95aSMinchan Kim 	mapping = backing_dev->f_mapping;
486013bf95aSMinchan Kim 	inode = mapping->host;
487013bf95aSMinchan Kim 
488013bf95aSMinchan Kim 	/* Support only block device in this moment */
489013bf95aSMinchan Kim 	if (!S_ISBLK(inode->i_mode)) {
490013bf95aSMinchan Kim 		err = -ENOTBLK;
491013bf95aSMinchan Kim 		goto out;
492013bf95aSMinchan Kim 	}
493013bf95aSMinchan Kim 
494013bf95aSMinchan Kim 	bdev = bdgrab(I_BDEV(inode));
495013bf95aSMinchan Kim 	err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
4965547932dSMinchan Kim 	if (err < 0) {
4975547932dSMinchan Kim 		bdev = NULL;
498013bf95aSMinchan Kim 		goto out;
4995547932dSMinchan Kim 	}
500013bf95aSMinchan Kim 
5011363d466SMinchan Kim 	nr_pages = i_size_read(inode) >> PAGE_SHIFT;
5021363d466SMinchan Kim 	bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
5031363d466SMinchan Kim 	bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
5041363d466SMinchan Kim 	if (!bitmap) {
5051363d466SMinchan Kim 		err = -ENOMEM;
5061363d466SMinchan Kim 		goto out;
5071363d466SMinchan Kim 	}
5081363d466SMinchan Kim 
509013bf95aSMinchan Kim 	old_block_size = block_size(bdev);
510013bf95aSMinchan Kim 	err = set_blocksize(bdev, PAGE_SIZE);
511013bf95aSMinchan Kim 	if (err)
512013bf95aSMinchan Kim 		goto out;
513013bf95aSMinchan Kim 
514013bf95aSMinchan Kim 	reset_bdev(zram);
515013bf95aSMinchan Kim 
516013bf95aSMinchan Kim 	zram->old_block_size = old_block_size;
517013bf95aSMinchan Kim 	zram->bdev = bdev;
518013bf95aSMinchan Kim 	zram->backing_dev = backing_dev;
5191363d466SMinchan Kim 	zram->bitmap = bitmap;
5201363d466SMinchan Kim 	zram->nr_pages = nr_pages;
5214f7a7beaSMinchan Kim 	/*
5224f7a7beaSMinchan Kim 	 * With writeback feature, zram does asynchronous IO so it's no longer
5234f7a7beaSMinchan Kim 	 * synchronous device so let's remove synchronous io flag. Othewise,
5244f7a7beaSMinchan Kim 	 * upper layer(e.g., swap) could wait IO completion rather than
5254f7a7beaSMinchan Kim 	 * (submit and return), which will cause system sluggish.
5264f7a7beaSMinchan Kim 	 * Furthermore, when the IO function returns(e.g., swap_readpage),
5274f7a7beaSMinchan Kim 	 * upper layer expects IO was done so it could deallocate the page
5284f7a7beaSMinchan Kim 	 * freely but in fact, IO is going on so finally could cause
5294f7a7beaSMinchan Kim 	 * use-after-free when the IO is really done.
5304f7a7beaSMinchan Kim 	 */
5314f7a7beaSMinchan Kim 	zram->disk->queue->backing_dev_info->capabilities &=
5324f7a7beaSMinchan Kim 			~BDI_CAP_SYNCHRONOUS_IO;
533013bf95aSMinchan Kim 	up_write(&zram->init_lock);
534013bf95aSMinchan Kim 
535013bf95aSMinchan Kim 	pr_info("setup backing device %s\n", file_name);
536013bf95aSMinchan Kim 	kfree(file_name);
537013bf95aSMinchan Kim 
538013bf95aSMinchan Kim 	return len;
539013bf95aSMinchan Kim out:
5401363d466SMinchan Kim 	if (bitmap)
5411363d466SMinchan Kim 		kvfree(bitmap);
5421363d466SMinchan Kim 
543013bf95aSMinchan Kim 	if (bdev)
544013bf95aSMinchan Kim 		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
545013bf95aSMinchan Kim 
546013bf95aSMinchan Kim 	if (backing_dev)
547013bf95aSMinchan Kim 		filp_close(backing_dev, NULL);
548013bf95aSMinchan Kim 
549013bf95aSMinchan Kim 	up_write(&zram->init_lock);
550013bf95aSMinchan Kim 
551013bf95aSMinchan Kim 	kfree(file_name);
552013bf95aSMinchan Kim 
553013bf95aSMinchan Kim 	return err;
554013bf95aSMinchan Kim }
555013bf95aSMinchan Kim 
5567e529283SMinchan Kim static unsigned long alloc_block_bdev(struct zram *zram)
5571363d466SMinchan Kim {
5583c9959e0SMinchan Kim 	unsigned long blk_idx = 1;
5593c9959e0SMinchan Kim retry:
5601363d466SMinchan Kim 	/* skip 0 bit to confuse zram.handle = 0 */
5613c9959e0SMinchan Kim 	blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
5623c9959e0SMinchan Kim 	if (blk_idx == zram->nr_pages)
5631363d466SMinchan Kim 		return 0;
5641363d466SMinchan Kim 
5653c9959e0SMinchan Kim 	if (test_and_set_bit(blk_idx, zram->bitmap))
5663c9959e0SMinchan Kim 		goto retry;
5671363d466SMinchan Kim 
56823eddf39SMinchan Kim 	atomic64_inc(&zram->stats.bd_count);
5693c9959e0SMinchan Kim 	return blk_idx;
5701363d466SMinchan Kim }
5711363d466SMinchan Kim 
5727e529283SMinchan Kim static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
5731363d466SMinchan Kim {
5741363d466SMinchan Kim 	int was_set;
5751363d466SMinchan Kim 
5767e529283SMinchan Kim 	was_set = test_and_clear_bit(blk_idx, zram->bitmap);
5771363d466SMinchan Kim 	WARN_ON_ONCE(!was_set);
57823eddf39SMinchan Kim 	atomic64_dec(&zram->stats.bd_count);
5791363d466SMinchan Kim }
5801363d466SMinchan Kim 
581384bc41fSColin Ian King static void zram_page_end_io(struct bio *bio)
582db8ffbd4SMinchan Kim {
583263663cdSMing Lei 	struct page *page = bio_first_page_all(bio);
584db8ffbd4SMinchan Kim 
585db8ffbd4SMinchan Kim 	page_endio(page, op_is_write(bio_op(bio)),
586db8ffbd4SMinchan Kim 			blk_status_to_errno(bio->bi_status));
587db8ffbd4SMinchan Kim 	bio_put(bio);
588db8ffbd4SMinchan Kim }
589db8ffbd4SMinchan Kim 
5908e654f8fSMinchan Kim /*
5918e654f8fSMinchan Kim  * Returns 1 if the submission is successful.
5928e654f8fSMinchan Kim  */
5938e654f8fSMinchan Kim static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
5948e654f8fSMinchan Kim 			unsigned long entry, struct bio *parent)
5958e654f8fSMinchan Kim {
5968e654f8fSMinchan Kim 	struct bio *bio;
5978e654f8fSMinchan Kim 
5988e654f8fSMinchan Kim 	bio = bio_alloc(GFP_ATOMIC, 1);
5998e654f8fSMinchan Kim 	if (!bio)
6008e654f8fSMinchan Kim 		return -ENOMEM;
6018e654f8fSMinchan Kim 
6028e654f8fSMinchan Kim 	bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
603a0725ab0SLinus Torvalds 	bio_set_dev(bio, zram->bdev);
6048e654f8fSMinchan Kim 	if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
6058e654f8fSMinchan Kim 		bio_put(bio);
6068e654f8fSMinchan Kim 		return -EIO;
6078e654f8fSMinchan Kim 	}
6088e654f8fSMinchan Kim 
6098e654f8fSMinchan Kim 	if (!parent) {
6108e654f8fSMinchan Kim 		bio->bi_opf = REQ_OP_READ;
6118e654f8fSMinchan Kim 		bio->bi_end_io = zram_page_end_io;
6128e654f8fSMinchan Kim 	} else {
6138e654f8fSMinchan Kim 		bio->bi_opf = parent->bi_opf;
6148e654f8fSMinchan Kim 		bio_chain(bio, parent);
6158e654f8fSMinchan Kim 	}
6168e654f8fSMinchan Kim 
6178e654f8fSMinchan Kim 	submit_bio(bio);
6188e654f8fSMinchan Kim 	return 1;
6198e654f8fSMinchan Kim }
6208e654f8fSMinchan Kim 
6211d69a3f8SMinchan Kim #define HUGE_WRITEBACK 1
6221d69a3f8SMinchan Kim #define IDLE_WRITEBACK 2
623a939888eSMinchan Kim 
624a939888eSMinchan Kim static ssize_t writeback_store(struct device *dev,
625a939888eSMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
626a939888eSMinchan Kim {
627a939888eSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
628a939888eSMinchan Kim 	unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
629a939888eSMinchan Kim 	unsigned long index;
630a939888eSMinchan Kim 	struct bio bio;
631a939888eSMinchan Kim 	struct bio_vec bio_vec;
632a939888eSMinchan Kim 	struct page *page;
6333b82a051SColin Ian King 	ssize_t ret = len;
6340bc9f5d1SMinchan Kim 	int mode;
635a939888eSMinchan Kim 	unsigned long blk_idx = 0;
636a939888eSMinchan Kim 
6370bc9f5d1SMinchan Kim 	if (sysfs_streq(buf, "idle"))
638a939888eSMinchan Kim 		mode = IDLE_WRITEBACK;
6390bc9f5d1SMinchan Kim 	else if (sysfs_streq(buf, "huge"))
640a939888eSMinchan Kim 		mode = HUGE_WRITEBACK;
6410bc9f5d1SMinchan Kim 	else
642a939888eSMinchan Kim 		return -EINVAL;
643a939888eSMinchan Kim 
644a939888eSMinchan Kim 	down_read(&zram->init_lock);
645a939888eSMinchan Kim 	if (!init_done(zram)) {
646a939888eSMinchan Kim 		ret = -EINVAL;
647a939888eSMinchan Kim 		goto release_init_lock;
648a939888eSMinchan Kim 	}
649a939888eSMinchan Kim 
650a939888eSMinchan Kim 	if (!zram->backing_dev) {
651a939888eSMinchan Kim 		ret = -ENODEV;
652a939888eSMinchan Kim 		goto release_init_lock;
653a939888eSMinchan Kim 	}
654a939888eSMinchan Kim 
655a939888eSMinchan Kim 	page = alloc_page(GFP_KERNEL);
656a939888eSMinchan Kim 	if (!page) {
657a939888eSMinchan Kim 		ret = -ENOMEM;
658a939888eSMinchan Kim 		goto release_init_lock;
659a939888eSMinchan Kim 	}
660a939888eSMinchan Kim 
661a939888eSMinchan Kim 	for (index = 0; index < nr_pages; index++) {
662a939888eSMinchan Kim 		struct bio_vec bvec;
663a939888eSMinchan Kim 
664a939888eSMinchan Kim 		bvec.bv_page = page;
665a939888eSMinchan Kim 		bvec.bv_len = PAGE_SIZE;
666a939888eSMinchan Kim 		bvec.bv_offset = 0;
667a939888eSMinchan Kim 
6681d69a3f8SMinchan Kim 		spin_lock(&zram->wb_limit_lock);
6691d69a3f8SMinchan Kim 		if (zram->wb_limit_enable && !zram->bd_wb_limit) {
6701d69a3f8SMinchan Kim 			spin_unlock(&zram->wb_limit_lock);
671bb416d18SMinchan Kim 			ret = -EIO;
672bb416d18SMinchan Kim 			break;
673bb416d18SMinchan Kim 		}
6741d69a3f8SMinchan Kim 		spin_unlock(&zram->wb_limit_lock);
675bb416d18SMinchan Kim 
676a939888eSMinchan Kim 		if (!blk_idx) {
677a939888eSMinchan Kim 			blk_idx = alloc_block_bdev(zram);
678a939888eSMinchan Kim 			if (!blk_idx) {
679a939888eSMinchan Kim 				ret = -ENOSPC;
680a939888eSMinchan Kim 				break;
681a939888eSMinchan Kim 			}
682a939888eSMinchan Kim 		}
683a939888eSMinchan Kim 
684a939888eSMinchan Kim 		zram_slot_lock(zram, index);
685a939888eSMinchan Kim 		if (!zram_allocated(zram, index))
686a939888eSMinchan Kim 			goto next;
687a939888eSMinchan Kim 
688a939888eSMinchan Kim 		if (zram_test_flag(zram, index, ZRAM_WB) ||
689a939888eSMinchan Kim 				zram_test_flag(zram, index, ZRAM_SAME) ||
690a939888eSMinchan Kim 				zram_test_flag(zram, index, ZRAM_UNDER_WB))
691a939888eSMinchan Kim 			goto next;
692a939888eSMinchan Kim 
6931d69a3f8SMinchan Kim 		if (mode == IDLE_WRITEBACK &&
6941d69a3f8SMinchan Kim 			  !zram_test_flag(zram, index, ZRAM_IDLE))
6951d69a3f8SMinchan Kim 			goto next;
6961d69a3f8SMinchan Kim 		if (mode == HUGE_WRITEBACK &&
6971d69a3f8SMinchan Kim 			  !zram_test_flag(zram, index, ZRAM_HUGE))
698a939888eSMinchan Kim 			goto next;
699a939888eSMinchan Kim 		/*
700a939888eSMinchan Kim 		 * Clearing ZRAM_UNDER_WB is duty of caller.
701a939888eSMinchan Kim 		 * IOW, zram_free_page never clear it.
702a939888eSMinchan Kim 		 */
703a939888eSMinchan Kim 		zram_set_flag(zram, index, ZRAM_UNDER_WB);
704a939888eSMinchan Kim 		/* Need for hugepage writeback racing */
705a939888eSMinchan Kim 		zram_set_flag(zram, index, ZRAM_IDLE);
706a939888eSMinchan Kim 		zram_slot_unlock(zram, index);
707a939888eSMinchan Kim 		if (zram_bvec_read(zram, &bvec, index, 0, NULL)) {
708a939888eSMinchan Kim 			zram_slot_lock(zram, index);
709a939888eSMinchan Kim 			zram_clear_flag(zram, index, ZRAM_UNDER_WB);
710a939888eSMinchan Kim 			zram_clear_flag(zram, index, ZRAM_IDLE);
711a939888eSMinchan Kim 			zram_slot_unlock(zram, index);
712a939888eSMinchan Kim 			continue;
713a939888eSMinchan Kim 		}
714a939888eSMinchan Kim 
715a939888eSMinchan Kim 		bio_init(&bio, &bio_vec, 1);
716a939888eSMinchan Kim 		bio_set_dev(&bio, zram->bdev);
717a939888eSMinchan Kim 		bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
718a939888eSMinchan Kim 		bio.bi_opf = REQ_OP_WRITE | REQ_SYNC;
719a939888eSMinchan Kim 
720a939888eSMinchan Kim 		bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
721a939888eSMinchan Kim 				bvec.bv_offset);
722a939888eSMinchan Kim 		/*
723a939888eSMinchan Kim 		 * XXX: A single page IO would be inefficient for write
724a939888eSMinchan Kim 		 * but it would be not bad as starter.
725a939888eSMinchan Kim 		 */
726a939888eSMinchan Kim 		ret = submit_bio_wait(&bio);
727a939888eSMinchan Kim 		if (ret) {
728a939888eSMinchan Kim 			zram_slot_lock(zram, index);
729a939888eSMinchan Kim 			zram_clear_flag(zram, index, ZRAM_UNDER_WB);
730a939888eSMinchan Kim 			zram_clear_flag(zram, index, ZRAM_IDLE);
731a939888eSMinchan Kim 			zram_slot_unlock(zram, index);
732a939888eSMinchan Kim 			continue;
733a939888eSMinchan Kim 		}
734a939888eSMinchan Kim 
73523eddf39SMinchan Kim 		atomic64_inc(&zram->stats.bd_writes);
736a939888eSMinchan Kim 		/*
737a939888eSMinchan Kim 		 * We released zram_slot_lock so need to check if the slot was
738a939888eSMinchan Kim 		 * changed. If there is freeing for the slot, we can catch it
739a939888eSMinchan Kim 		 * easily by zram_allocated.
740a939888eSMinchan Kim 		 * A subtle case is the slot is freed/reallocated/marked as
741a939888eSMinchan Kim 		 * ZRAM_IDLE again. To close the race, idle_store doesn't
742a939888eSMinchan Kim 		 * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
743a939888eSMinchan Kim 		 * Thus, we could close the race by checking ZRAM_IDLE bit.
744a939888eSMinchan Kim 		 */
745a939888eSMinchan Kim 		zram_slot_lock(zram, index);
746a939888eSMinchan Kim 		if (!zram_allocated(zram, index) ||
747a939888eSMinchan Kim 			  !zram_test_flag(zram, index, ZRAM_IDLE)) {
748a939888eSMinchan Kim 			zram_clear_flag(zram, index, ZRAM_UNDER_WB);
749a939888eSMinchan Kim 			zram_clear_flag(zram, index, ZRAM_IDLE);
750a939888eSMinchan Kim 			goto next;
751a939888eSMinchan Kim 		}
752a939888eSMinchan Kim 
753a939888eSMinchan Kim 		zram_free_page(zram, index);
754a939888eSMinchan Kim 		zram_clear_flag(zram, index, ZRAM_UNDER_WB);
755a939888eSMinchan Kim 		zram_set_flag(zram, index, ZRAM_WB);
756a939888eSMinchan Kim 		zram_set_element(zram, index, blk_idx);
757a939888eSMinchan Kim 		blk_idx = 0;
758a939888eSMinchan Kim 		atomic64_inc(&zram->stats.pages_stored);
7591d69a3f8SMinchan Kim 		spin_lock(&zram->wb_limit_lock);
7601d69a3f8SMinchan Kim 		if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
7611d69a3f8SMinchan Kim 			zram->bd_wb_limit -=  1UL << (PAGE_SHIFT - 12);
7621d69a3f8SMinchan Kim 		spin_unlock(&zram->wb_limit_lock);
763a939888eSMinchan Kim next:
764a939888eSMinchan Kim 		zram_slot_unlock(zram, index);
765a939888eSMinchan Kim 	}
766a939888eSMinchan Kim 
767a939888eSMinchan Kim 	if (blk_idx)
768a939888eSMinchan Kim 		free_block_bdev(zram, blk_idx);
769a939888eSMinchan Kim 	__free_page(page);
770a939888eSMinchan Kim release_init_lock:
771a939888eSMinchan Kim 	up_read(&zram->init_lock);
772a939888eSMinchan Kim 
773a939888eSMinchan Kim 	return ret;
774a939888eSMinchan Kim }
775a939888eSMinchan Kim 
7768e654f8fSMinchan Kim struct zram_work {
7778e654f8fSMinchan Kim 	struct work_struct work;
7788e654f8fSMinchan Kim 	struct zram *zram;
7798e654f8fSMinchan Kim 	unsigned long entry;
7808e654f8fSMinchan Kim 	struct bio *bio;
781e153abc0SJérôme Glisse 	struct bio_vec bvec;
7828e654f8fSMinchan Kim };
7838e654f8fSMinchan Kim 
7848e654f8fSMinchan Kim #if PAGE_SIZE != 4096
7858e654f8fSMinchan Kim static void zram_sync_read(struct work_struct *work)
7868e654f8fSMinchan Kim {
7878e654f8fSMinchan Kim 	struct zram_work *zw = container_of(work, struct zram_work, work);
7888e654f8fSMinchan Kim 	struct zram *zram = zw->zram;
7898e654f8fSMinchan Kim 	unsigned long entry = zw->entry;
7908e654f8fSMinchan Kim 	struct bio *bio = zw->bio;
7918e654f8fSMinchan Kim 
792e153abc0SJérôme Glisse 	read_from_bdev_async(zram, &zw->bvec, entry, bio);
7938e654f8fSMinchan Kim }
7948e654f8fSMinchan Kim 
7958e654f8fSMinchan Kim /*
7968e654f8fSMinchan Kim  * Block layer want one ->make_request_fn to be active at a time
7978e654f8fSMinchan Kim  * so if we use chained IO with parent IO in same context,
7988e654f8fSMinchan Kim  * it's a deadlock. To avoid, it, it uses worker thread context.
7998e654f8fSMinchan Kim  */
8008e654f8fSMinchan Kim static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
8018e654f8fSMinchan Kim 				unsigned long entry, struct bio *bio)
8028e654f8fSMinchan Kim {
8038e654f8fSMinchan Kim 	struct zram_work work;
8048e654f8fSMinchan Kim 
805e153abc0SJérôme Glisse 	work.bvec = *bvec;
8068e654f8fSMinchan Kim 	work.zram = zram;
8078e654f8fSMinchan Kim 	work.entry = entry;
8088e654f8fSMinchan Kim 	work.bio = bio;
8098e654f8fSMinchan Kim 
8108e654f8fSMinchan Kim 	INIT_WORK_ONSTACK(&work.work, zram_sync_read);
8118e654f8fSMinchan Kim 	queue_work(system_unbound_wq, &work.work);
8128e654f8fSMinchan Kim 	flush_work(&work.work);
8138e654f8fSMinchan Kim 	destroy_work_on_stack(&work.work);
8148e654f8fSMinchan Kim 
8158e654f8fSMinchan Kim 	return 1;
8168e654f8fSMinchan Kim }
8178e654f8fSMinchan Kim #else
8188e654f8fSMinchan Kim static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
8198e654f8fSMinchan Kim 				unsigned long entry, struct bio *bio)
8208e654f8fSMinchan Kim {
8218e654f8fSMinchan Kim 	WARN_ON(1);
8228e654f8fSMinchan Kim 	return -EIO;
8238e654f8fSMinchan Kim }
8248e654f8fSMinchan Kim #endif
8258e654f8fSMinchan Kim 
8268e654f8fSMinchan Kim static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
8278e654f8fSMinchan Kim 			unsigned long entry, struct bio *parent, bool sync)
8288e654f8fSMinchan Kim {
82923eddf39SMinchan Kim 	atomic64_inc(&zram->stats.bd_reads);
8308e654f8fSMinchan Kim 	if (sync)
8318e654f8fSMinchan Kim 		return read_from_bdev_sync(zram, bvec, entry, parent);
8328e654f8fSMinchan Kim 	else
8338e654f8fSMinchan Kim 		return read_from_bdev_async(zram, bvec, entry, parent);
8348e654f8fSMinchan Kim }
835013bf95aSMinchan Kim #else
836013bf95aSMinchan Kim static inline void reset_bdev(struct zram *zram) {};
8378e654f8fSMinchan Kim static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
8388e654f8fSMinchan Kim 			unsigned long entry, struct bio *parent, bool sync)
8398e654f8fSMinchan Kim {
8408e654f8fSMinchan Kim 	return -EIO;
8418e654f8fSMinchan Kim }
8427e529283SMinchan Kim 
8437e529283SMinchan Kim static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
844013bf95aSMinchan Kim #endif
845013bf95aSMinchan Kim 
846c0265342SMinchan Kim #ifdef CONFIG_ZRAM_MEMORY_TRACKING
847c0265342SMinchan Kim 
848c0265342SMinchan Kim static struct dentry *zram_debugfs_root;
849c0265342SMinchan Kim 
850c0265342SMinchan Kim static void zram_debugfs_create(void)
851c0265342SMinchan Kim {
852c0265342SMinchan Kim 	zram_debugfs_root = debugfs_create_dir("zram", NULL);
853c0265342SMinchan Kim }
854c0265342SMinchan Kim 
855c0265342SMinchan Kim static void zram_debugfs_destroy(void)
856c0265342SMinchan Kim {
857c0265342SMinchan Kim 	debugfs_remove_recursive(zram_debugfs_root);
858c0265342SMinchan Kim }
859c0265342SMinchan Kim 
860c0265342SMinchan Kim static void zram_accessed(struct zram *zram, u32 index)
861c0265342SMinchan Kim {
862e82592c4SMinchan Kim 	zram_clear_flag(zram, index, ZRAM_IDLE);
863c0265342SMinchan Kim 	zram->table[index].ac_time = ktime_get_boottime();
864c0265342SMinchan Kim }
865c0265342SMinchan Kim 
866c0265342SMinchan Kim static ssize_t read_block_state(struct file *file, char __user *buf,
867c0265342SMinchan Kim 				size_t count, loff_t *ppos)
868c0265342SMinchan Kim {
869c0265342SMinchan Kim 	char *kbuf;
870c0265342SMinchan Kim 	ssize_t index, written = 0;
871c0265342SMinchan Kim 	struct zram *zram = file->private_data;
872c0265342SMinchan Kim 	unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
873c0265342SMinchan Kim 	struct timespec64 ts;
874c0265342SMinchan Kim 
875c0265342SMinchan Kim 	kbuf = kvmalloc(count, GFP_KERNEL);
876c0265342SMinchan Kim 	if (!kbuf)
877c0265342SMinchan Kim 		return -ENOMEM;
878c0265342SMinchan Kim 
879c0265342SMinchan Kim 	down_read(&zram->init_lock);
880c0265342SMinchan Kim 	if (!init_done(zram)) {
881c0265342SMinchan Kim 		up_read(&zram->init_lock);
882c0265342SMinchan Kim 		kvfree(kbuf);
883c0265342SMinchan Kim 		return -EINVAL;
884c0265342SMinchan Kim 	}
885c0265342SMinchan Kim 
886c0265342SMinchan Kim 	for (index = *ppos; index < nr_pages; index++) {
887c0265342SMinchan Kim 		int copied;
888c0265342SMinchan Kim 
889c0265342SMinchan Kim 		zram_slot_lock(zram, index);
890c0265342SMinchan Kim 		if (!zram_allocated(zram, index))
891c0265342SMinchan Kim 			goto next;
892c0265342SMinchan Kim 
893c0265342SMinchan Kim 		ts = ktime_to_timespec64(zram->table[index].ac_time);
894c0265342SMinchan Kim 		copied = snprintf(kbuf + written, count,
895e82592c4SMinchan Kim 			"%12zd %12lld.%06lu %c%c%c%c\n",
896c0265342SMinchan Kim 			index, (s64)ts.tv_sec,
897c0265342SMinchan Kim 			ts.tv_nsec / NSEC_PER_USEC,
898c0265342SMinchan Kim 			zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
899c0265342SMinchan Kim 			zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
900e82592c4SMinchan Kim 			zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
901e82592c4SMinchan Kim 			zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.');
902c0265342SMinchan Kim 
903c0265342SMinchan Kim 		if (count < copied) {
904c0265342SMinchan Kim 			zram_slot_unlock(zram, index);
905c0265342SMinchan Kim 			break;
906c0265342SMinchan Kim 		}
907c0265342SMinchan Kim 		written += copied;
908c0265342SMinchan Kim 		count -= copied;
909c0265342SMinchan Kim next:
910c0265342SMinchan Kim 		zram_slot_unlock(zram, index);
911c0265342SMinchan Kim 		*ppos += 1;
912c0265342SMinchan Kim 	}
913c0265342SMinchan Kim 
914c0265342SMinchan Kim 	up_read(&zram->init_lock);
915c0265342SMinchan Kim 	if (copy_to_user(buf, kbuf, written))
916c0265342SMinchan Kim 		written = -EFAULT;
917c0265342SMinchan Kim 	kvfree(kbuf);
918c0265342SMinchan Kim 
919c0265342SMinchan Kim 	return written;
920c0265342SMinchan Kim }
921c0265342SMinchan Kim 
922c0265342SMinchan Kim static const struct file_operations proc_zram_block_state_op = {
923c0265342SMinchan Kim 	.open = simple_open,
924c0265342SMinchan Kim 	.read = read_block_state,
925c0265342SMinchan Kim 	.llseek = default_llseek,
926c0265342SMinchan Kim };
927c0265342SMinchan Kim 
928c0265342SMinchan Kim static void zram_debugfs_register(struct zram *zram)
929c0265342SMinchan Kim {
930c0265342SMinchan Kim 	if (!zram_debugfs_root)
931c0265342SMinchan Kim 		return;
932c0265342SMinchan Kim 
933c0265342SMinchan Kim 	zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
934c0265342SMinchan Kim 						zram_debugfs_root);
935c0265342SMinchan Kim 	debugfs_create_file("block_state", 0400, zram->debugfs_dir,
936c0265342SMinchan Kim 				zram, &proc_zram_block_state_op);
937c0265342SMinchan Kim }
938c0265342SMinchan Kim 
939c0265342SMinchan Kim static void zram_debugfs_unregister(struct zram *zram)
940c0265342SMinchan Kim {
941c0265342SMinchan Kim 	debugfs_remove_recursive(zram->debugfs_dir);
942c0265342SMinchan Kim }
943c0265342SMinchan Kim #else
944c0265342SMinchan Kim static void zram_debugfs_create(void) {};
945c0265342SMinchan Kim static void zram_debugfs_destroy(void) {};
946e82592c4SMinchan Kim static void zram_accessed(struct zram *zram, u32 index)
947e82592c4SMinchan Kim {
948e82592c4SMinchan Kim 	zram_clear_flag(zram, index, ZRAM_IDLE);
949e82592c4SMinchan Kim };
950c0265342SMinchan Kim static void zram_debugfs_register(struct zram *zram) {};
951c0265342SMinchan Kim static void zram_debugfs_unregister(struct zram *zram) {};
952c0265342SMinchan Kim #endif
953013bf95aSMinchan Kim 
95443209ea2SSergey Senozhatsky /*
95543209ea2SSergey Senozhatsky  * We switched to per-cpu streams and this attr is not needed anymore.
95643209ea2SSergey Senozhatsky  * However, we will keep it around for some time, because:
95743209ea2SSergey Senozhatsky  * a) we may revert per-cpu streams in the future
95843209ea2SSergey Senozhatsky  * b) it's visible to user space and we need to follow our 2 years
95943209ea2SSergey Senozhatsky  *    retirement rule; but we already have a number of 'soon to be
96043209ea2SSergey Senozhatsky  *    altered' attrs, so max_comp_streams need to wait for the next
96143209ea2SSergey Senozhatsky  *    layoff cycle.
96243209ea2SSergey Senozhatsky  */
963522698d7SSergey Senozhatsky static ssize_t max_comp_streams_show(struct device *dev,
964522698d7SSergey Senozhatsky 		struct device_attribute *attr, char *buf)
965522698d7SSergey Senozhatsky {
96643209ea2SSergey Senozhatsky 	return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
967522698d7SSergey Senozhatsky }
968522698d7SSergey Senozhatsky 
969beca3ec7SSergey Senozhatsky static ssize_t max_comp_streams_store(struct device *dev,
970beca3ec7SSergey Senozhatsky 		struct device_attribute *attr, const char *buf, size_t len)
971beca3ec7SSergey Senozhatsky {
97243209ea2SSergey Senozhatsky 	return len;
973beca3ec7SSergey Senozhatsky }
974beca3ec7SSergey Senozhatsky 
975e46b8a03SSergey Senozhatsky static ssize_t comp_algorithm_show(struct device *dev,
976e46b8a03SSergey Senozhatsky 		struct device_attribute *attr, char *buf)
977e46b8a03SSergey Senozhatsky {
978e46b8a03SSergey Senozhatsky 	size_t sz;
979e46b8a03SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
980e46b8a03SSergey Senozhatsky 
981e46b8a03SSergey Senozhatsky 	down_read(&zram->init_lock);
982e46b8a03SSergey Senozhatsky 	sz = zcomp_available_show(zram->compressor, buf);
983e46b8a03SSergey Senozhatsky 	up_read(&zram->init_lock);
984e46b8a03SSergey Senozhatsky 
985e46b8a03SSergey Senozhatsky 	return sz;
986e46b8a03SSergey Senozhatsky }
987e46b8a03SSergey Senozhatsky 
988e46b8a03SSergey Senozhatsky static ssize_t comp_algorithm_store(struct device *dev,
989e46b8a03SSergey Senozhatsky 		struct device_attribute *attr, const char *buf, size_t len)
990e46b8a03SSergey Senozhatsky {
991e46b8a03SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
992f357e345SMatthias Kaehlcke 	char compressor[ARRAY_SIZE(zram->compressor)];
9934bbacd51SSergey Senozhatsky 	size_t sz;
9944bbacd51SSergey Senozhatsky 
995415403beSSergey Senozhatsky 	strlcpy(compressor, buf, sizeof(compressor));
996415403beSSergey Senozhatsky 	/* ignore trailing newline */
997415403beSSergey Senozhatsky 	sz = strlen(compressor);
998415403beSSergey Senozhatsky 	if (sz > 0 && compressor[sz - 1] == '\n')
999415403beSSergey Senozhatsky 		compressor[sz - 1] = 0x00;
1000415403beSSergey Senozhatsky 
1001415403beSSergey Senozhatsky 	if (!zcomp_available_algorithm(compressor))
10021d5b43bfSLuis Henriques 		return -EINVAL;
10031d5b43bfSLuis Henriques 
1004e46b8a03SSergey Senozhatsky 	down_write(&zram->init_lock);
1005e46b8a03SSergey Senozhatsky 	if (init_done(zram)) {
1006e46b8a03SSergey Senozhatsky 		up_write(&zram->init_lock);
1007e46b8a03SSergey Senozhatsky 		pr_info("Can't change algorithm for initialized device\n");
1008e46b8a03SSergey Senozhatsky 		return -EBUSY;
1009e46b8a03SSergey Senozhatsky 	}
10104bbacd51SSergey Senozhatsky 
1011f357e345SMatthias Kaehlcke 	strcpy(zram->compressor, compressor);
1012e46b8a03SSergey Senozhatsky 	up_write(&zram->init_lock);
1013e46b8a03SSergey Senozhatsky 	return len;
1014e46b8a03SSergey Senozhatsky }
1015e46b8a03SSergey Senozhatsky 
1016522698d7SSergey Senozhatsky static ssize_t compact_store(struct device *dev,
1017522698d7SSergey Senozhatsky 		struct device_attribute *attr, const char *buf, size_t len)
1018cd67e10aSMinchan Kim {
1019522698d7SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
1020522698d7SSergey Senozhatsky 
1021522698d7SSergey Senozhatsky 	down_read(&zram->init_lock);
1022522698d7SSergey Senozhatsky 	if (!init_done(zram)) {
1023522698d7SSergey Senozhatsky 		up_read(&zram->init_lock);
1024522698d7SSergey Senozhatsky 		return -EINVAL;
1025cd67e10aSMinchan Kim 	}
1026cd67e10aSMinchan Kim 
1027beb6602cSMinchan Kim 	zs_compact(zram->mem_pool);
1028522698d7SSergey Senozhatsky 	up_read(&zram->init_lock);
1029522698d7SSergey Senozhatsky 
1030522698d7SSergey Senozhatsky 	return len;
1031cd67e10aSMinchan Kim }
1032cd67e10aSMinchan Kim 
1033522698d7SSergey Senozhatsky static ssize_t io_stat_show(struct device *dev,
1034522698d7SSergey Senozhatsky 		struct device_attribute *attr, char *buf)
1035cd67e10aSMinchan Kim {
1036522698d7SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
1037522698d7SSergey Senozhatsky 	ssize_t ret;
1038522698d7SSergey Senozhatsky 
1039522698d7SSergey Senozhatsky 	down_read(&zram->init_lock);
1040522698d7SSergey Senozhatsky 	ret = scnprintf(buf, PAGE_SIZE,
1041522698d7SSergey Senozhatsky 			"%8llu %8llu %8llu %8llu\n",
1042522698d7SSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.failed_reads),
1043522698d7SSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.failed_writes),
1044522698d7SSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.invalid_io),
1045522698d7SSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.notify_free));
1046522698d7SSergey Senozhatsky 	up_read(&zram->init_lock);
1047522698d7SSergey Senozhatsky 
1048522698d7SSergey Senozhatsky 	return ret;
1049d2d5e762SWeijie Yang }
1050d2d5e762SWeijie Yang 
1051522698d7SSergey Senozhatsky static ssize_t mm_stat_show(struct device *dev,
1052522698d7SSergey Senozhatsky 		struct device_attribute *attr, char *buf)
1053d2d5e762SWeijie Yang {
1054522698d7SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
10557d3f3938SSergey Senozhatsky 	struct zs_pool_stats pool_stats;
1056522698d7SSergey Senozhatsky 	u64 orig_size, mem_used = 0;
1057522698d7SSergey Senozhatsky 	long max_used;
1058522698d7SSergey Senozhatsky 	ssize_t ret;
1059522698d7SSergey Senozhatsky 
10607d3f3938SSergey Senozhatsky 	memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
10617d3f3938SSergey Senozhatsky 
1062522698d7SSergey Senozhatsky 	down_read(&zram->init_lock);
10637d3f3938SSergey Senozhatsky 	if (init_done(zram)) {
1064beb6602cSMinchan Kim 		mem_used = zs_get_total_pages(zram->mem_pool);
1065beb6602cSMinchan Kim 		zs_pool_stats(zram->mem_pool, &pool_stats);
10667d3f3938SSergey Senozhatsky 	}
1067522698d7SSergey Senozhatsky 
1068522698d7SSergey Senozhatsky 	orig_size = atomic64_read(&zram->stats.pages_stored);
1069522698d7SSergey Senozhatsky 	max_used = atomic_long_read(&zram->stats.max_used_pages);
1070522698d7SSergey Senozhatsky 
1071522698d7SSergey Senozhatsky 	ret = scnprintf(buf, PAGE_SIZE,
107289e85bceSMinchan Kim 			"%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu\n",
1073522698d7SSergey Senozhatsky 			orig_size << PAGE_SHIFT,
1074522698d7SSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.compr_data_size),
1075522698d7SSergey Senozhatsky 			mem_used << PAGE_SHIFT,
1076522698d7SSergey Senozhatsky 			zram->limit_pages << PAGE_SHIFT,
1077522698d7SSergey Senozhatsky 			max_used << PAGE_SHIFT,
10788e19d540Szhouxianrong 			(u64)atomic64_read(&zram->stats.same_pages),
107989e85bceSMinchan Kim 			pool_stats.pages_compacted,
108089e85bceSMinchan Kim 			(u64)atomic64_read(&zram->stats.huge_pages));
1081522698d7SSergey Senozhatsky 	up_read(&zram->init_lock);
1082522698d7SSergey Senozhatsky 
1083522698d7SSergey Senozhatsky 	return ret;
1084d2d5e762SWeijie Yang }
1085d2d5e762SWeijie Yang 
108623eddf39SMinchan Kim #ifdef CONFIG_ZRAM_WRITEBACK
1087bb416d18SMinchan Kim #define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12)))
108823eddf39SMinchan Kim static ssize_t bd_stat_show(struct device *dev,
108923eddf39SMinchan Kim 		struct device_attribute *attr, char *buf)
109023eddf39SMinchan Kim {
109123eddf39SMinchan Kim 	struct zram *zram = dev_to_zram(dev);
109223eddf39SMinchan Kim 	ssize_t ret;
109323eddf39SMinchan Kim 
109423eddf39SMinchan Kim 	down_read(&zram->init_lock);
109523eddf39SMinchan Kim 	ret = scnprintf(buf, PAGE_SIZE,
109623eddf39SMinchan Kim 		"%8llu %8llu %8llu\n",
1097bb416d18SMinchan Kim 			FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
1098bb416d18SMinchan Kim 			FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
1099bb416d18SMinchan Kim 			FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
110023eddf39SMinchan Kim 	up_read(&zram->init_lock);
110123eddf39SMinchan Kim 
110223eddf39SMinchan Kim 	return ret;
110323eddf39SMinchan Kim }
110423eddf39SMinchan Kim #endif
110523eddf39SMinchan Kim 
1106623e47fcSSergey Senozhatsky static ssize_t debug_stat_show(struct device *dev,
1107623e47fcSSergey Senozhatsky 		struct device_attribute *attr, char *buf)
1108623e47fcSSergey Senozhatsky {
1109623e47fcSSergey Senozhatsky 	int version = 1;
1110623e47fcSSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
1111623e47fcSSergey Senozhatsky 	ssize_t ret;
1112623e47fcSSergey Senozhatsky 
1113623e47fcSSergey Senozhatsky 	down_read(&zram->init_lock);
1114623e47fcSSergey Senozhatsky 	ret = scnprintf(buf, PAGE_SIZE,
11153c9959e0SMinchan Kim 			"version: %d\n%8llu %8llu\n",
1116623e47fcSSergey Senozhatsky 			version,
11173c9959e0SMinchan Kim 			(u64)atomic64_read(&zram->stats.writestall),
11183c9959e0SMinchan Kim 			(u64)atomic64_read(&zram->stats.miss_free));
1119623e47fcSSergey Senozhatsky 	up_read(&zram->init_lock);
1120623e47fcSSergey Senozhatsky 
1121623e47fcSSergey Senozhatsky 	return ret;
1122623e47fcSSergey Senozhatsky }
1123623e47fcSSergey Senozhatsky 
1124522698d7SSergey Senozhatsky static DEVICE_ATTR_RO(io_stat);
1125522698d7SSergey Senozhatsky static DEVICE_ATTR_RO(mm_stat);
112623eddf39SMinchan Kim #ifdef CONFIG_ZRAM_WRITEBACK
112723eddf39SMinchan Kim static DEVICE_ATTR_RO(bd_stat);
112823eddf39SMinchan Kim #endif
1129623e47fcSSergey Senozhatsky static DEVICE_ATTR_RO(debug_stat);
1130d2d5e762SWeijie Yang 
1131beb6602cSMinchan Kim static void zram_meta_free(struct zram *zram, u64 disksize)
1132cd67e10aSMinchan Kim {
11331fec1172SGanesh Mahendran 	size_t num_pages = disksize >> PAGE_SHIFT;
11341fec1172SGanesh Mahendran 	size_t index;
11351fec1172SGanesh Mahendran 
11361fec1172SGanesh Mahendran 	/* Free all pages that are still in this zram device */
1137302128dcSMinchan Kim 	for (index = 0; index < num_pages; index++)
1138302128dcSMinchan Kim 		zram_free_page(zram, index);
11391fec1172SGanesh Mahendran 
1140beb6602cSMinchan Kim 	zs_destroy_pool(zram->mem_pool);
1141beb6602cSMinchan Kim 	vfree(zram->table);
1142cd67e10aSMinchan Kim }
1143cd67e10aSMinchan Kim 
1144beb6602cSMinchan Kim static bool zram_meta_alloc(struct zram *zram, u64 disksize)
1145cd67e10aSMinchan Kim {
1146cd67e10aSMinchan Kim 	size_t num_pages;
1147cd67e10aSMinchan Kim 
1148cd67e10aSMinchan Kim 	num_pages = disksize >> PAGE_SHIFT;
1149fad953ceSKees Cook 	zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
1150beb6602cSMinchan Kim 	if (!zram->table)
1151beb6602cSMinchan Kim 		return false;
1152beb6602cSMinchan Kim 
1153beb6602cSMinchan Kim 	zram->mem_pool = zs_create_pool(zram->disk->disk_name);
1154beb6602cSMinchan Kim 	if (!zram->mem_pool) {
1155beb6602cSMinchan Kim 		vfree(zram->table);
1156beb6602cSMinchan Kim 		return false;
1157cd67e10aSMinchan Kim 	}
1158cd67e10aSMinchan Kim 
115960f5921aSSergey Senozhatsky 	if (!huge_class_size)
116060f5921aSSergey Senozhatsky 		huge_class_size = zs_huge_class_size(zram->mem_pool);
1161beb6602cSMinchan Kim 	return true;
1162cd67e10aSMinchan Kim }
1163cd67e10aSMinchan Kim 
1164d2d5e762SWeijie Yang /*
1165d2d5e762SWeijie Yang  * To protect concurrent access to the same index entry,
1166d2d5e762SWeijie Yang  * caller should hold this table index entry's bit_spinlock to
1167d2d5e762SWeijie Yang  * indicate this index entry is accessing.
1168d2d5e762SWeijie Yang  */
1169cd67e10aSMinchan Kim static void zram_free_page(struct zram *zram, size_t index)
1170cd67e10aSMinchan Kim {
1171db8ffbd4SMinchan Kim 	unsigned long handle;
1172db8ffbd4SMinchan Kim 
11737e529283SMinchan Kim #ifdef CONFIG_ZRAM_MEMORY_TRACKING
11747e529283SMinchan Kim 	zram->table[index].ac_time = 0;
11757e529283SMinchan Kim #endif
1176e82592c4SMinchan Kim 	if (zram_test_flag(zram, index, ZRAM_IDLE))
1177e82592c4SMinchan Kim 		zram_clear_flag(zram, index, ZRAM_IDLE);
1178e82592c4SMinchan Kim 
117989e85bceSMinchan Kim 	if (zram_test_flag(zram, index, ZRAM_HUGE)) {
118089e85bceSMinchan Kim 		zram_clear_flag(zram, index, ZRAM_HUGE);
118189e85bceSMinchan Kim 		atomic64_dec(&zram->stats.huge_pages);
118289e85bceSMinchan Kim 	}
118389e85bceSMinchan Kim 
11847e529283SMinchan Kim 	if (zram_test_flag(zram, index, ZRAM_WB)) {
11857e529283SMinchan Kim 		zram_clear_flag(zram, index, ZRAM_WB);
11867e529283SMinchan Kim 		free_block_bdev(zram, zram_get_element(zram, index));
11877e529283SMinchan Kim 		goto out;
1188db8ffbd4SMinchan Kim 	}
1189cd67e10aSMinchan Kim 
1190cd67e10aSMinchan Kim 	/*
11918e19d540Szhouxianrong 	 * No memory is allocated for same element filled pages.
11928e19d540Szhouxianrong 	 * Simply clear same page flag.
1193cd67e10aSMinchan Kim 	 */
1194beb6602cSMinchan Kim 	if (zram_test_flag(zram, index, ZRAM_SAME)) {
1195beb6602cSMinchan Kim 		zram_clear_flag(zram, index, ZRAM_SAME);
11968e19d540Szhouxianrong 		atomic64_dec(&zram->stats.same_pages);
11977e529283SMinchan Kim 		goto out;
1198cd67e10aSMinchan Kim 	}
1199cd67e10aSMinchan Kim 
1200db8ffbd4SMinchan Kim 	handle = zram_get_handle(zram, index);
12018e19d540Szhouxianrong 	if (!handle)
12028e19d540Szhouxianrong 		return;
12038e19d540Szhouxianrong 
1204beb6602cSMinchan Kim 	zs_free(zram->mem_pool, handle);
1205cd67e10aSMinchan Kim 
1206beb6602cSMinchan Kim 	atomic64_sub(zram_get_obj_size(zram, index),
1207d2d5e762SWeijie Yang 			&zram->stats.compr_data_size);
12087e529283SMinchan Kim out:
120990a7806eSSergey Senozhatsky 	atomic64_dec(&zram->stats.pages_stored);
1210643ae61dSMinchan Kim 	zram_set_handle(zram, index, 0);
1211beb6602cSMinchan Kim 	zram_set_obj_size(zram, index, 0);
1212a939888eSMinchan Kim 	WARN_ON_ONCE(zram->table[index].flags &
1213a939888eSMinchan Kim 		~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
1214cd67e10aSMinchan Kim }
1215cd67e10aSMinchan Kim 
12168e654f8fSMinchan Kim static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
12178e654f8fSMinchan Kim 				struct bio *bio, bool partial_io)
1218cd67e10aSMinchan Kim {
12191f7319c7SMinchan Kim 	int ret;
122092967471SMinchan Kim 	unsigned long handle;
1221ebaf9ab5SSergey Senozhatsky 	unsigned int size;
12221f7319c7SMinchan Kim 	void *src, *dst;
12231f7319c7SMinchan Kim 
12248e654f8fSMinchan Kim 	zram_slot_lock(zram, index);
12258e654f8fSMinchan Kim 	if (zram_test_flag(zram, index, ZRAM_WB)) {
12268e654f8fSMinchan Kim 		struct bio_vec bvec;
12278e654f8fSMinchan Kim 
12288e654f8fSMinchan Kim 		zram_slot_unlock(zram, index);
12298e654f8fSMinchan Kim 
12308e654f8fSMinchan Kim 		bvec.bv_page = page;
12318e654f8fSMinchan Kim 		bvec.bv_len = PAGE_SIZE;
12328e654f8fSMinchan Kim 		bvec.bv_offset = 0;
12338e654f8fSMinchan Kim 		return read_from_bdev(zram, &bvec,
12348e654f8fSMinchan Kim 				zram_get_element(zram, index),
12358e654f8fSMinchan Kim 				bio, partial_io);
12368e654f8fSMinchan Kim 	}
12378e654f8fSMinchan Kim 
1238643ae61dSMinchan Kim 	handle = zram_get_handle(zram, index);
1239ae94264eSMinchan Kim 	if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1240ae94264eSMinchan Kim 		unsigned long value;
1241ae94264eSMinchan Kim 		void *mem;
1242ae94264eSMinchan Kim 
1243ae94264eSMinchan Kim 		value = handle ? zram_get_element(zram, index) : 0;
1244ae94264eSMinchan Kim 		mem = kmap_atomic(page);
1245ae94264eSMinchan Kim 		zram_fill_page(mem, PAGE_SIZE, value);
1246ae94264eSMinchan Kim 		kunmap_atomic(mem);
1247ae94264eSMinchan Kim 		zram_slot_unlock(zram, index);
1248ae94264eSMinchan Kim 		return 0;
1249ae94264eSMinchan Kim 	}
1250ae94264eSMinchan Kim 
1251beb6602cSMinchan Kim 	size = zram_get_obj_size(zram, index);
1252cd67e10aSMinchan Kim 
1253beb6602cSMinchan Kim 	src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
1254ebaf9ab5SSergey Senozhatsky 	if (size == PAGE_SIZE) {
12551f7319c7SMinchan Kim 		dst = kmap_atomic(page);
12561f7319c7SMinchan Kim 		memcpy(dst, src, PAGE_SIZE);
12571f7319c7SMinchan Kim 		kunmap_atomic(dst);
12581f7319c7SMinchan Kim 		ret = 0;
1259ebaf9ab5SSergey Senozhatsky 	} else {
1260ebaf9ab5SSergey Senozhatsky 		struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
1261ebaf9ab5SSergey Senozhatsky 
12621f7319c7SMinchan Kim 		dst = kmap_atomic(page);
12631f7319c7SMinchan Kim 		ret = zcomp_decompress(zstrm, src, size, dst);
12641f7319c7SMinchan Kim 		kunmap_atomic(dst);
1265ebaf9ab5SSergey Senozhatsky 		zcomp_stream_put(zram->comp);
1266ebaf9ab5SSergey Senozhatsky 	}
1267beb6602cSMinchan Kim 	zs_unmap_object(zram->mem_pool, handle);
126886c49814SMinchan Kim 	zram_slot_unlock(zram, index);
1269cd67e10aSMinchan Kim 
1270cd67e10aSMinchan Kim 	/* Should NEVER happen. Return bio error if it does. */
12711f7319c7SMinchan Kim 	if (unlikely(ret))
1272cd67e10aSMinchan Kim 		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
1273cd67e10aSMinchan Kim 
12741f7319c7SMinchan Kim 	return ret;
1275cd67e10aSMinchan Kim }
1276cd67e10aSMinchan Kim 
1277cd67e10aSMinchan Kim static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
12788e654f8fSMinchan Kim 				u32 index, int offset, struct bio *bio)
1279cd67e10aSMinchan Kim {
1280cd67e10aSMinchan Kim 	int ret;
1281cd67e10aSMinchan Kim 	struct page *page;
12821f7319c7SMinchan Kim 
1283cd67e10aSMinchan Kim 	page = bvec->bv_page;
12841f7319c7SMinchan Kim 	if (is_partial_io(bvec)) {
1285cd67e10aSMinchan Kim 		/* Use a temporary buffer to decompress the page */
12861f7319c7SMinchan Kim 		page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
12871f7319c7SMinchan Kim 		if (!page)
12881f7319c7SMinchan Kim 			return -ENOMEM;
1289cd67e10aSMinchan Kim 	}
1290cd67e10aSMinchan Kim 
12918e654f8fSMinchan Kim 	ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
1292b7ca232eSSergey Senozhatsky 	if (unlikely(ret))
12931f7319c7SMinchan Kim 		goto out;
1294cd67e10aSMinchan Kim 
12951f7319c7SMinchan Kim 	if (is_partial_io(bvec)) {
12961f7319c7SMinchan Kim 		void *dst = kmap_atomic(bvec->bv_page);
12971f7319c7SMinchan Kim 		void *src = kmap_atomic(page);
1298cd67e10aSMinchan Kim 
12991f7319c7SMinchan Kim 		memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
13001f7319c7SMinchan Kim 		kunmap_atomic(src);
13011f7319c7SMinchan Kim 		kunmap_atomic(dst);
13021f7319c7SMinchan Kim 	}
13031f7319c7SMinchan Kim out:
1304cd67e10aSMinchan Kim 	if (is_partial_io(bvec))
13051f7319c7SMinchan Kim 		__free_page(page);
13061f7319c7SMinchan Kim 
1307cd67e10aSMinchan Kim 	return ret;
1308cd67e10aSMinchan Kim }
1309cd67e10aSMinchan Kim 
1310db8ffbd4SMinchan Kim static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1311db8ffbd4SMinchan Kim 				u32 index, struct bio *bio)
1312cd67e10aSMinchan Kim {
1313ae85a807SMinchan Kim 	int ret = 0;
1314461a8eeeSMinchan Kim 	unsigned long alloced_pages;
13151f7319c7SMinchan Kim 	unsigned long handle = 0;
131697ec7c8bSMinchan Kim 	unsigned int comp_len = 0;
131797ec7c8bSMinchan Kim 	void *src, *dst, *mem;
131897ec7c8bSMinchan Kim 	struct zcomp_strm *zstrm;
131997ec7c8bSMinchan Kim 	struct page *page = bvec->bv_page;
132097ec7c8bSMinchan Kim 	unsigned long element = 0;
132197ec7c8bSMinchan Kim 	enum zram_pageflags flags = 0;
132297ec7c8bSMinchan Kim 
132397ec7c8bSMinchan Kim 	mem = kmap_atomic(page);
132497ec7c8bSMinchan Kim 	if (page_same_filled(mem, &element)) {
132597ec7c8bSMinchan Kim 		kunmap_atomic(mem);
132697ec7c8bSMinchan Kim 		/* Free memory associated with this sector now. */
132797ec7c8bSMinchan Kim 		flags = ZRAM_SAME;
132897ec7c8bSMinchan Kim 		atomic64_inc(&zram->stats.same_pages);
132997ec7c8bSMinchan Kim 		goto out;
133097ec7c8bSMinchan Kim 	}
133197ec7c8bSMinchan Kim 	kunmap_atomic(mem);
1332cd67e10aSMinchan Kim 
1333da9556a2SSergey Senozhatsky compress_again:
133497ec7c8bSMinchan Kim 	zstrm = zcomp_stream_get(zram->comp);
13351f7319c7SMinchan Kim 	src = kmap_atomic(page);
133697ec7c8bSMinchan Kim 	ret = zcomp_compress(zstrm, src, &comp_len);
13371f7319c7SMinchan Kim 	kunmap_atomic(src);
1338cd67e10aSMinchan Kim 
1339b7ca232eSSergey Senozhatsky 	if (unlikely(ret)) {
134097ec7c8bSMinchan Kim 		zcomp_stream_put(zram->comp);
1341cd67e10aSMinchan Kim 		pr_err("Compression failed! err=%d\n", ret);
1342beb6602cSMinchan Kim 		zs_free(zram->mem_pool, handle);
13431f7319c7SMinchan Kim 		return ret;
1344cd67e10aSMinchan Kim 	}
1345da9556a2SSergey Senozhatsky 
1346a939888eSMinchan Kim 	if (comp_len >= huge_class_size)
134789e85bceSMinchan Kim 		comp_len = PAGE_SIZE;
1348da9556a2SSergey Senozhatsky 	/*
1349da9556a2SSergey Senozhatsky 	 * handle allocation has 2 paths:
1350da9556a2SSergey Senozhatsky 	 * a) fast path is executed with preemption disabled (for
1351da9556a2SSergey Senozhatsky 	 *  per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
1352da9556a2SSergey Senozhatsky 	 *  since we can't sleep;
1353da9556a2SSergey Senozhatsky 	 * b) slow path enables preemption and attempts to allocate
1354da9556a2SSergey Senozhatsky 	 *  the page with __GFP_DIRECT_RECLAIM bit set. we have to
1355da9556a2SSergey Senozhatsky 	 *  put per-cpu compression stream and, thus, to re-do
1356da9556a2SSergey Senozhatsky 	 *  the compression once handle is allocated.
1357da9556a2SSergey Senozhatsky 	 *
1358da9556a2SSergey Senozhatsky 	 * if we have a 'non-null' handle here then we are coming
1359da9556a2SSergey Senozhatsky 	 * from the slow path and handle has already been allocated.
1360da9556a2SSergey Senozhatsky 	 */
1361da9556a2SSergey Senozhatsky 	if (!handle)
1362beb6602cSMinchan Kim 		handle = zs_malloc(zram->mem_pool, comp_len,
1363da9556a2SSergey Senozhatsky 				__GFP_KSWAPD_RECLAIM |
1364da9556a2SSergey Senozhatsky 				__GFP_NOWARN |
13659bc482d3SMinchan Kim 				__GFP_HIGHMEM |
13669bc482d3SMinchan Kim 				__GFP_MOVABLE);
1367cd67e10aSMinchan Kim 	if (!handle) {
13682aea8493SSergey Senozhatsky 		zcomp_stream_put(zram->comp);
1369623e47fcSSergey Senozhatsky 		atomic64_inc(&zram->stats.writestall);
1370beb6602cSMinchan Kim 		handle = zs_malloc(zram->mem_pool, comp_len,
13719bc482d3SMinchan Kim 				GFP_NOIO | __GFP_HIGHMEM |
13729bc482d3SMinchan Kim 				__GFP_MOVABLE);
1373da9556a2SSergey Senozhatsky 		if (handle)
1374da9556a2SSergey Senozhatsky 			goto compress_again;
13751f7319c7SMinchan Kim 		return -ENOMEM;
1376cd67e10aSMinchan Kim 	}
13779ada9da9SMinchan Kim 
1378beb6602cSMinchan Kim 	alloced_pages = zs_get_total_pages(zram->mem_pool);
137912372755SSergey SENOZHATSKY 	update_used_max(zram, alloced_pages);
138012372755SSergey SENOZHATSKY 
1381461a8eeeSMinchan Kim 	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
138297ec7c8bSMinchan Kim 		zcomp_stream_put(zram->comp);
1383beb6602cSMinchan Kim 		zs_free(zram->mem_pool, handle);
13841f7319c7SMinchan Kim 		return -ENOMEM;
13859ada9da9SMinchan Kim 	}
13869ada9da9SMinchan Kim 
1387beb6602cSMinchan Kim 	dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
13881f7319c7SMinchan Kim 
13891f7319c7SMinchan Kim 	src = zstrm->buffer;
13901f7319c7SMinchan Kim 	if (comp_len == PAGE_SIZE)
1391cd67e10aSMinchan Kim 		src = kmap_atomic(page);
13921f7319c7SMinchan Kim 	memcpy(dst, src, comp_len);
13931f7319c7SMinchan Kim 	if (comp_len == PAGE_SIZE)
1394cd67e10aSMinchan Kim 		kunmap_atomic(src);
1395cd67e10aSMinchan Kim 
13962aea8493SSergey Senozhatsky 	zcomp_stream_put(zram->comp);
1397beb6602cSMinchan Kim 	zs_unmap_object(zram->mem_pool, handle);
13984ebbe7f7SMinchan Kim 	atomic64_add(comp_len, &zram->stats.compr_data_size);
13994ebbe7f7SMinchan Kim out:
1400cd67e10aSMinchan Kim 	/*
1401cd67e10aSMinchan Kim 	 * Free memory associated with this sector
1402cd67e10aSMinchan Kim 	 * before overwriting unused sectors.
1403cd67e10aSMinchan Kim 	 */
140486c49814SMinchan Kim 	zram_slot_lock(zram, index);
1405cd67e10aSMinchan Kim 	zram_free_page(zram, index);
1406db8ffbd4SMinchan Kim 
140789e85bceSMinchan Kim 	if (comp_len == PAGE_SIZE) {
140889e85bceSMinchan Kim 		zram_set_flag(zram, index, ZRAM_HUGE);
140989e85bceSMinchan Kim 		atomic64_inc(&zram->stats.huge_pages);
141089e85bceSMinchan Kim 	}
141189e85bceSMinchan Kim 
1412db8ffbd4SMinchan Kim 	if (flags) {
1413db8ffbd4SMinchan Kim 		zram_set_flag(zram, index, flags);
14144ebbe7f7SMinchan Kim 		zram_set_element(zram, index, element);
14154ebbe7f7SMinchan Kim 	}  else {
1416643ae61dSMinchan Kim 		zram_set_handle(zram, index, handle);
1417beb6602cSMinchan Kim 		zram_set_obj_size(zram, index, comp_len);
14184ebbe7f7SMinchan Kim 	}
141986c49814SMinchan Kim 	zram_slot_unlock(zram, index);
1420cd67e10aSMinchan Kim 
1421cd67e10aSMinchan Kim 	/* Update stats */
142290a7806eSSergey Senozhatsky 	atomic64_inc(&zram->stats.pages_stored);
1423ae85a807SMinchan Kim 	return ret;
14241f7319c7SMinchan Kim }
14251f7319c7SMinchan Kim 
14261f7319c7SMinchan Kim static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1427db8ffbd4SMinchan Kim 				u32 index, int offset, struct bio *bio)
14281f7319c7SMinchan Kim {
14291f7319c7SMinchan Kim 	int ret;
14301f7319c7SMinchan Kim 	struct page *page = NULL;
14311f7319c7SMinchan Kim 	void *src;
14321f7319c7SMinchan Kim 	struct bio_vec vec;
14331f7319c7SMinchan Kim 
14341f7319c7SMinchan Kim 	vec = *bvec;
14351f7319c7SMinchan Kim 	if (is_partial_io(bvec)) {
14361f7319c7SMinchan Kim 		void *dst;
14371f7319c7SMinchan Kim 		/*
14381f7319c7SMinchan Kim 		 * This is a partial IO. We need to read the full page
14391f7319c7SMinchan Kim 		 * before to write the changes.
14401f7319c7SMinchan Kim 		 */
14411f7319c7SMinchan Kim 		page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
14421f7319c7SMinchan Kim 		if (!page)
14431f7319c7SMinchan Kim 			return -ENOMEM;
14441f7319c7SMinchan Kim 
14458e654f8fSMinchan Kim 		ret = __zram_bvec_read(zram, page, index, bio, true);
14461f7319c7SMinchan Kim 		if (ret)
14471f7319c7SMinchan Kim 			goto out;
14481f7319c7SMinchan Kim 
14491f7319c7SMinchan Kim 		src = kmap_atomic(bvec->bv_page);
14501f7319c7SMinchan Kim 		dst = kmap_atomic(page);
14511f7319c7SMinchan Kim 		memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
14521f7319c7SMinchan Kim 		kunmap_atomic(dst);
14531f7319c7SMinchan Kim 		kunmap_atomic(src);
14541f7319c7SMinchan Kim 
14551f7319c7SMinchan Kim 		vec.bv_page = page;
14561f7319c7SMinchan Kim 		vec.bv_len = PAGE_SIZE;
14571f7319c7SMinchan Kim 		vec.bv_offset = 0;
14581f7319c7SMinchan Kim 	}
14591f7319c7SMinchan Kim 
1460db8ffbd4SMinchan Kim 	ret = __zram_bvec_write(zram, &vec, index, bio);
1461cd67e10aSMinchan Kim out:
1462cd67e10aSMinchan Kim 	if (is_partial_io(bvec))
14631f7319c7SMinchan Kim 		__free_page(page);
1464cd67e10aSMinchan Kim 	return ret;
1465cd67e10aSMinchan Kim }
1466cd67e10aSMinchan Kim 
1467f4659d8eSJoonsoo Kim /*
1468f4659d8eSJoonsoo Kim  * zram_bio_discard - handler on discard request
1469f4659d8eSJoonsoo Kim  * @index: physical block index in PAGE_SIZE units
1470f4659d8eSJoonsoo Kim  * @offset: byte offset within physical block
1471f4659d8eSJoonsoo Kim  */
1472f4659d8eSJoonsoo Kim static void zram_bio_discard(struct zram *zram, u32 index,
1473f4659d8eSJoonsoo Kim 			     int offset, struct bio *bio)
1474f4659d8eSJoonsoo Kim {
1475f4659d8eSJoonsoo Kim 	size_t n = bio->bi_iter.bi_size;
1476f4659d8eSJoonsoo Kim 
1477f4659d8eSJoonsoo Kim 	/*
1478f4659d8eSJoonsoo Kim 	 * zram manages data in physical block size units. Because logical block
1479f4659d8eSJoonsoo Kim 	 * size isn't identical with physical block size on some arch, we
1480f4659d8eSJoonsoo Kim 	 * could get a discard request pointing to a specific offset within a
1481f4659d8eSJoonsoo Kim 	 * certain physical block.  Although we can handle this request by
1482f4659d8eSJoonsoo Kim 	 * reading that physiclal block and decompressing and partially zeroing
1483f4659d8eSJoonsoo Kim 	 * and re-compressing and then re-storing it, this isn't reasonable
1484f4659d8eSJoonsoo Kim 	 * because our intent with a discard request is to save memory.  So
1485f4659d8eSJoonsoo Kim 	 * skipping this logical block is appropriate here.
1486f4659d8eSJoonsoo Kim 	 */
1487f4659d8eSJoonsoo Kim 	if (offset) {
148838515c73SWeijie Yang 		if (n <= (PAGE_SIZE - offset))
1489f4659d8eSJoonsoo Kim 			return;
1490f4659d8eSJoonsoo Kim 
149138515c73SWeijie Yang 		n -= (PAGE_SIZE - offset);
1492f4659d8eSJoonsoo Kim 		index++;
1493f4659d8eSJoonsoo Kim 	}
1494f4659d8eSJoonsoo Kim 
1495f4659d8eSJoonsoo Kim 	while (n >= PAGE_SIZE) {
149686c49814SMinchan Kim 		zram_slot_lock(zram, index);
1497f4659d8eSJoonsoo Kim 		zram_free_page(zram, index);
149886c49814SMinchan Kim 		zram_slot_unlock(zram, index);
1499015254daSSergey Senozhatsky 		atomic64_inc(&zram->stats.notify_free);
1500f4659d8eSJoonsoo Kim 		index++;
1501f4659d8eSJoonsoo Kim 		n -= PAGE_SIZE;
1502f4659d8eSJoonsoo Kim 	}
1503f4659d8eSJoonsoo Kim }
1504f4659d8eSJoonsoo Kim 
1505ae85a807SMinchan Kim /*
1506ae85a807SMinchan Kim  * Returns errno if it has some problem. Otherwise return 0 or 1.
1507ae85a807SMinchan Kim  * Returns 0 if IO request was done synchronously
1508ae85a807SMinchan Kim  * Returns 1 if IO request was successfully submitted.
1509ae85a807SMinchan Kim  */
1510522698d7SSergey Senozhatsky static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
15113f289dcbSTejun Heo 			int offset, unsigned int op, struct bio *bio)
1512522698d7SSergey Senozhatsky {
1513522698d7SSergey Senozhatsky 	unsigned long start_time = jiffies;
1514d62e26b3SJens Axboe 	struct request_queue *q = zram->disk->queue;
1515522698d7SSergey Senozhatsky 	int ret;
1516522698d7SSergey Senozhatsky 
1517ddcf35d3SMichael Callahan 	generic_start_io_acct(q, op, bvec->bv_len >> SECTOR_SHIFT,
1518522698d7SSergey Senozhatsky 			&zram->disk->part0);
1519522698d7SSergey Senozhatsky 
15203f289dcbSTejun Heo 	if (!op_is_write(op)) {
1521522698d7SSergey Senozhatsky 		atomic64_inc(&zram->stats.num_reads);
15228e654f8fSMinchan Kim 		ret = zram_bvec_read(zram, bvec, index, offset, bio);
15231f7319c7SMinchan Kim 		flush_dcache_page(bvec->bv_page);
1524522698d7SSergey Senozhatsky 	} else {
1525522698d7SSergey Senozhatsky 		atomic64_inc(&zram->stats.num_writes);
1526db8ffbd4SMinchan Kim 		ret = zram_bvec_write(zram, bvec, index, offset, bio);
1527522698d7SSergey Senozhatsky 	}
1528522698d7SSergey Senozhatsky 
1529ddcf35d3SMichael Callahan 	generic_end_io_acct(q, op, &zram->disk->part0, start_time);
1530522698d7SSergey Senozhatsky 
1531d7eac6b6SMinchan Kim 	zram_slot_lock(zram, index);
1532d7eac6b6SMinchan Kim 	zram_accessed(zram, index);
1533d7eac6b6SMinchan Kim 	zram_slot_unlock(zram, index);
1534d7eac6b6SMinchan Kim 
1535ae85a807SMinchan Kim 	if (unlikely(ret < 0)) {
15363f289dcbSTejun Heo 		if (!op_is_write(op))
1537522698d7SSergey Senozhatsky 			atomic64_inc(&zram->stats.failed_reads);
1538522698d7SSergey Senozhatsky 		else
1539522698d7SSergey Senozhatsky 			atomic64_inc(&zram->stats.failed_writes);
1540522698d7SSergey Senozhatsky 	}
1541522698d7SSergey Senozhatsky 
1542522698d7SSergey Senozhatsky 	return ret;
1543522698d7SSergey Senozhatsky }
1544522698d7SSergey Senozhatsky 
1545522698d7SSergey Senozhatsky static void __zram_make_request(struct zram *zram, struct bio *bio)
1546522698d7SSergey Senozhatsky {
1547abf54548SMike Christie 	int offset;
1548522698d7SSergey Senozhatsky 	u32 index;
1549522698d7SSergey Senozhatsky 	struct bio_vec bvec;
1550522698d7SSergey Senozhatsky 	struct bvec_iter iter;
1551522698d7SSergey Senozhatsky 
1552522698d7SSergey Senozhatsky 	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1553522698d7SSergey Senozhatsky 	offset = (bio->bi_iter.bi_sector &
1554522698d7SSergey Senozhatsky 		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1555522698d7SSergey Senozhatsky 
155631edeacdSChristoph Hellwig 	switch (bio_op(bio)) {
155731edeacdSChristoph Hellwig 	case REQ_OP_DISCARD:
155831edeacdSChristoph Hellwig 	case REQ_OP_WRITE_ZEROES:
1559522698d7SSergey Senozhatsky 		zram_bio_discard(zram, index, offset, bio);
15604246a0b6SChristoph Hellwig 		bio_endio(bio);
1561522698d7SSergey Senozhatsky 		return;
156231edeacdSChristoph Hellwig 	default:
156331edeacdSChristoph Hellwig 		break;
1564522698d7SSergey Senozhatsky 	}
1565522698d7SSergey Senozhatsky 
1566522698d7SSergey Senozhatsky 	bio_for_each_segment(bvec, bio, iter) {
1567e86942c7SMinchan Kim 		struct bio_vec bv = bvec;
1568e86942c7SMinchan Kim 		unsigned int unwritten = bvec.bv_len;
1569522698d7SSergey Senozhatsky 
1570e86942c7SMinchan Kim 		do {
1571e86942c7SMinchan Kim 			bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
1572e86942c7SMinchan Kim 							unwritten);
1573abf54548SMike Christie 			if (zram_bvec_rw(zram, &bv, index, offset,
15743f289dcbSTejun Heo 					 bio_op(bio), bio) < 0)
1575522698d7SSergey Senozhatsky 				goto out;
1576522698d7SSergey Senozhatsky 
1577e86942c7SMinchan Kim 			bv.bv_offset += bv.bv_len;
1578e86942c7SMinchan Kim 			unwritten -= bv.bv_len;
1579522698d7SSergey Senozhatsky 
1580e86942c7SMinchan Kim 			update_position(&index, &offset, &bv);
1581e86942c7SMinchan Kim 		} while (unwritten);
1582522698d7SSergey Senozhatsky 	}
1583522698d7SSergey Senozhatsky 
15844246a0b6SChristoph Hellwig 	bio_endio(bio);
1585522698d7SSergey Senozhatsky 	return;
1586522698d7SSergey Senozhatsky 
1587522698d7SSergey Senozhatsky out:
1588522698d7SSergey Senozhatsky 	bio_io_error(bio);
1589522698d7SSergey Senozhatsky }
1590522698d7SSergey Senozhatsky 
1591522698d7SSergey Senozhatsky /*
1592522698d7SSergey Senozhatsky  * Handler function for all zram I/O requests.
1593522698d7SSergey Senozhatsky  */
1594dece1635SJens Axboe static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
1595522698d7SSergey Senozhatsky {
1596522698d7SSergey Senozhatsky 	struct zram *zram = queue->queuedata;
1597522698d7SSergey Senozhatsky 
1598522698d7SSergey Senozhatsky 	if (!valid_io_request(zram, bio->bi_iter.bi_sector,
1599522698d7SSergey Senozhatsky 					bio->bi_iter.bi_size)) {
1600522698d7SSergey Senozhatsky 		atomic64_inc(&zram->stats.invalid_io);
1601a09759acSMinchan Kim 		goto error;
1602522698d7SSergey Senozhatsky 	}
1603522698d7SSergey Senozhatsky 
1604522698d7SSergey Senozhatsky 	__zram_make_request(zram, bio);
1605dece1635SJens Axboe 	return BLK_QC_T_NONE;
1606a09759acSMinchan Kim 
1607522698d7SSergey Senozhatsky error:
1608522698d7SSergey Senozhatsky 	bio_io_error(bio);
1609dece1635SJens Axboe 	return BLK_QC_T_NONE;
1610522698d7SSergey Senozhatsky }
1611522698d7SSergey Senozhatsky 
1612522698d7SSergey Senozhatsky static void zram_slot_free_notify(struct block_device *bdev,
1613522698d7SSergey Senozhatsky 				unsigned long index)
1614522698d7SSergey Senozhatsky {
1615522698d7SSergey Senozhatsky 	struct zram *zram;
1616522698d7SSergey Senozhatsky 
1617522698d7SSergey Senozhatsky 	zram = bdev->bd_disk->private_data;
1618522698d7SSergey Senozhatsky 
16193c9959e0SMinchan Kim 	atomic64_inc(&zram->stats.notify_free);
16203c9959e0SMinchan Kim 	if (!zram_slot_trylock(zram, index)) {
16213c9959e0SMinchan Kim 		atomic64_inc(&zram->stats.miss_free);
16223c9959e0SMinchan Kim 		return;
16233c9959e0SMinchan Kim 	}
16243c9959e0SMinchan Kim 
1625522698d7SSergey Senozhatsky 	zram_free_page(zram, index);
162686c49814SMinchan Kim 	zram_slot_unlock(zram, index);
1627522698d7SSergey Senozhatsky }
1628522698d7SSergey Senozhatsky 
1629522698d7SSergey Senozhatsky static int zram_rw_page(struct block_device *bdev, sector_t sector,
16303f289dcbSTejun Heo 		       struct page *page, unsigned int op)
1631522698d7SSergey Senozhatsky {
1632ae85a807SMinchan Kim 	int offset, ret;
1633522698d7SSergey Senozhatsky 	u32 index;
1634522698d7SSergey Senozhatsky 	struct zram *zram;
1635522698d7SSergey Senozhatsky 	struct bio_vec bv;
1636522698d7SSergey Senozhatsky 
163798cc093cSHuang Ying 	if (PageTransHuge(page))
163898cc093cSHuang Ying 		return -ENOTSUPP;
1639522698d7SSergey Senozhatsky 	zram = bdev->bd_disk->private_data;
1640522698d7SSergey Senozhatsky 
1641522698d7SSergey Senozhatsky 	if (!valid_io_request(zram, sector, PAGE_SIZE)) {
1642522698d7SSergey Senozhatsky 		atomic64_inc(&zram->stats.invalid_io);
1643ae85a807SMinchan Kim 		ret = -EINVAL;
1644a09759acSMinchan Kim 		goto out;
1645522698d7SSergey Senozhatsky 	}
1646522698d7SSergey Senozhatsky 
1647522698d7SSergey Senozhatsky 	index = sector >> SECTORS_PER_PAGE_SHIFT;
16484ca82dabSMinchan Kim 	offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1649522698d7SSergey Senozhatsky 
1650522698d7SSergey Senozhatsky 	bv.bv_page = page;
1651522698d7SSergey Senozhatsky 	bv.bv_len = PAGE_SIZE;
1652522698d7SSergey Senozhatsky 	bv.bv_offset = 0;
1653522698d7SSergey Senozhatsky 
16543f289dcbSTejun Heo 	ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
1655522698d7SSergey Senozhatsky out:
1656522698d7SSergey Senozhatsky 	/*
1657522698d7SSergey Senozhatsky 	 * If I/O fails, just return error(ie, non-zero) without
1658522698d7SSergey Senozhatsky 	 * calling page_endio.
1659522698d7SSergey Senozhatsky 	 * It causes resubmit the I/O with bio request by upper functions
1660522698d7SSergey Senozhatsky 	 * of rw_page(e.g., swap_readpage, __swap_writepage) and
1661522698d7SSergey Senozhatsky 	 * bio->bi_end_io does things to handle the error
1662522698d7SSergey Senozhatsky 	 * (e.g., SetPageError, set_page_dirty and extra works).
1663522698d7SSergey Senozhatsky 	 */
1664ae85a807SMinchan Kim 	if (unlikely(ret < 0))
1665ae85a807SMinchan Kim 		return ret;
1666ae85a807SMinchan Kim 
1667ae85a807SMinchan Kim 	switch (ret) {
1668ae85a807SMinchan Kim 	case 0:
16693f289dcbSTejun Heo 		page_endio(page, op_is_write(op), 0);
1670ae85a807SMinchan Kim 		break;
1671ae85a807SMinchan Kim 	case 1:
1672ae85a807SMinchan Kim 		ret = 0;
1673ae85a807SMinchan Kim 		break;
1674ae85a807SMinchan Kim 	default:
1675ae85a807SMinchan Kim 		WARN_ON(1);
1676ae85a807SMinchan Kim 	}
1677ae85a807SMinchan Kim 	return ret;
1678522698d7SSergey Senozhatsky }
1679522698d7SSergey Senozhatsky 
1680ba6b17d6SSergey Senozhatsky static void zram_reset_device(struct zram *zram)
1681cd67e10aSMinchan Kim {
168208eee69fSMinchan Kim 	struct zcomp *comp;
168308eee69fSMinchan Kim 	u64 disksize;
168408eee69fSMinchan Kim 
1685cd67e10aSMinchan Kim 	down_write(&zram->init_lock);
16869ada9da9SMinchan Kim 
16879ada9da9SMinchan Kim 	zram->limit_pages = 0;
16889ada9da9SMinchan Kim 
1689be2d1d56SSergey Senozhatsky 	if (!init_done(zram)) {
1690cd67e10aSMinchan Kim 		up_write(&zram->init_lock);
1691cd67e10aSMinchan Kim 		return;
1692cd67e10aSMinchan Kim 	}
1693cd67e10aSMinchan Kim 
169408eee69fSMinchan Kim 	comp = zram->comp;
169508eee69fSMinchan Kim 	disksize = zram->disksize;
1696cd67e10aSMinchan Kim 	zram->disksize = 0;
1697d7ad41a1SWeijie Yang 
1698a096cafcSSergey Senozhatsky 	set_capacity(zram->disk, 0);
1699d7ad41a1SWeijie Yang 	part_stat_set_all(&zram->disk->part0, 0);
1700a096cafcSSergey Senozhatsky 
1701cd67e10aSMinchan Kim 	up_write(&zram->init_lock);
170208eee69fSMinchan Kim 	/* I/O operation under all of CPU are done so let's free */
1703beb6602cSMinchan Kim 	zram_meta_free(zram, disksize);
1704302128dcSMinchan Kim 	memset(&zram->stats, 0, sizeof(zram->stats));
170508eee69fSMinchan Kim 	zcomp_destroy(comp);
1706013bf95aSMinchan Kim 	reset_bdev(zram);
1707cd67e10aSMinchan Kim }
1708cd67e10aSMinchan Kim 
1709cd67e10aSMinchan Kim static ssize_t disksize_store(struct device *dev,
1710cd67e10aSMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
1711cd67e10aSMinchan Kim {
1712cd67e10aSMinchan Kim 	u64 disksize;
1713d61f98c7SSergey Senozhatsky 	struct zcomp *comp;
1714cd67e10aSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
1715fcfa8d95SSergey Senozhatsky 	int err;
1716cd67e10aSMinchan Kim 
1717cd67e10aSMinchan Kim 	disksize = memparse(buf, NULL);
1718cd67e10aSMinchan Kim 	if (!disksize)
1719cd67e10aSMinchan Kim 		return -EINVAL;
1720cd67e10aSMinchan Kim 
1721beb6602cSMinchan Kim 	down_write(&zram->init_lock);
1722beb6602cSMinchan Kim 	if (init_done(zram)) {
1723beb6602cSMinchan Kim 		pr_info("Cannot change disksize for initialized device\n");
1724beb6602cSMinchan Kim 		err = -EBUSY;
1725beb6602cSMinchan Kim 		goto out_unlock;
1726beb6602cSMinchan Kim 	}
1727beb6602cSMinchan Kim 
1728cd67e10aSMinchan Kim 	disksize = PAGE_ALIGN(disksize);
1729beb6602cSMinchan Kim 	if (!zram_meta_alloc(zram, disksize)) {
1730beb6602cSMinchan Kim 		err = -ENOMEM;
1731beb6602cSMinchan Kim 		goto out_unlock;
1732beb6602cSMinchan Kim 	}
1733b67d1ec1SSergey Senozhatsky 
1734da9556a2SSergey Senozhatsky 	comp = zcomp_create(zram->compressor);
1735fcfa8d95SSergey Senozhatsky 	if (IS_ERR(comp)) {
173670864969SSergey Senozhatsky 		pr_err("Cannot initialise %s compressing backend\n",
1737e46b8a03SSergey Senozhatsky 				zram->compressor);
1738fcfa8d95SSergey Senozhatsky 		err = PTR_ERR(comp);
1739fcfa8d95SSergey Senozhatsky 		goto out_free_meta;
1740d61f98c7SSergey Senozhatsky 	}
1741d61f98c7SSergey Senozhatsky 
1742d61f98c7SSergey Senozhatsky 	zram->comp = comp;
1743cd67e10aSMinchan Kim 	zram->disksize = disksize;
1744cd67e10aSMinchan Kim 	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1745e447a015SMinchan Kim 
1746e447a015SMinchan Kim 	revalidate_disk(zram->disk);
1747e7ccfc4cSMinchan Kim 	up_write(&zram->init_lock);
1748b4c5c609SMinchan Kim 
1749cd67e10aSMinchan Kim 	return len;
1750b7ca232eSSergey Senozhatsky 
1751fcfa8d95SSergey Senozhatsky out_free_meta:
1752beb6602cSMinchan Kim 	zram_meta_free(zram, disksize);
1753beb6602cSMinchan Kim out_unlock:
1754beb6602cSMinchan Kim 	up_write(&zram->init_lock);
1755b7ca232eSSergey Senozhatsky 	return err;
1756cd67e10aSMinchan Kim }
1757cd67e10aSMinchan Kim 
1758cd67e10aSMinchan Kim static ssize_t reset_store(struct device *dev,
1759cd67e10aSMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
1760cd67e10aSMinchan Kim {
1761cd67e10aSMinchan Kim 	int ret;
1762cd67e10aSMinchan Kim 	unsigned short do_reset;
1763cd67e10aSMinchan Kim 	struct zram *zram;
1764cd67e10aSMinchan Kim 	struct block_device *bdev;
1765cd67e10aSMinchan Kim 
1766f405c445SSergey Senozhatsky 	ret = kstrtou16(buf, 10, &do_reset);
1767f405c445SSergey Senozhatsky 	if (ret)
1768f405c445SSergey Senozhatsky 		return ret;
1769f405c445SSergey Senozhatsky 
1770f405c445SSergey Senozhatsky 	if (!do_reset)
1771f405c445SSergey Senozhatsky 		return -EINVAL;
1772f405c445SSergey Senozhatsky 
1773cd67e10aSMinchan Kim 	zram = dev_to_zram(dev);
1774cd67e10aSMinchan Kim 	bdev = bdget_disk(zram->disk, 0);
1775cd67e10aSMinchan Kim 	if (!bdev)
1776cd67e10aSMinchan Kim 		return -ENOMEM;
1777cd67e10aSMinchan Kim 
1778ba6b17d6SSergey Senozhatsky 	mutex_lock(&bdev->bd_mutex);
1779f405c445SSergey Senozhatsky 	/* Do not reset an active device or claimed device */
1780f405c445SSergey Senozhatsky 	if (bdev->bd_openers || zram->claim) {
1781f405c445SSergey Senozhatsky 		mutex_unlock(&bdev->bd_mutex);
1782f405c445SSergey Senozhatsky 		bdput(bdev);
1783f405c445SSergey Senozhatsky 		return -EBUSY;
1784cd67e10aSMinchan Kim 	}
1785cd67e10aSMinchan Kim 
1786f405c445SSergey Senozhatsky 	/* From now on, anyone can't open /dev/zram[0-9] */
1787f405c445SSergey Senozhatsky 	zram->claim = true;
1788f405c445SSergey Senozhatsky 	mutex_unlock(&bdev->bd_mutex);
1789cd67e10aSMinchan Kim 
1790f405c445SSergey Senozhatsky 	/* Make sure all the pending I/O are finished */
1791cd67e10aSMinchan Kim 	fsync_bdev(bdev);
1792ba6b17d6SSergey Senozhatsky 	zram_reset_device(zram);
1793e447a015SMinchan Kim 	revalidate_disk(zram->disk);
1794cd67e10aSMinchan Kim 	bdput(bdev);
1795cd67e10aSMinchan Kim 
1796f405c445SSergey Senozhatsky 	mutex_lock(&bdev->bd_mutex);
1797f405c445SSergey Senozhatsky 	zram->claim = false;
1798ba6b17d6SSergey Senozhatsky 	mutex_unlock(&bdev->bd_mutex);
1799f405c445SSergey Senozhatsky 
1800f405c445SSergey Senozhatsky 	return len;
1801f405c445SSergey Senozhatsky }
1802f405c445SSergey Senozhatsky 
1803f405c445SSergey Senozhatsky static int zram_open(struct block_device *bdev, fmode_t mode)
1804f405c445SSergey Senozhatsky {
1805f405c445SSergey Senozhatsky 	int ret = 0;
1806f405c445SSergey Senozhatsky 	struct zram *zram;
1807f405c445SSergey Senozhatsky 
1808f405c445SSergey Senozhatsky 	WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1809f405c445SSergey Senozhatsky 
1810f405c445SSergey Senozhatsky 	zram = bdev->bd_disk->private_data;
1811f405c445SSergey Senozhatsky 	/* zram was claimed to reset so open request fails */
1812f405c445SSergey Senozhatsky 	if (zram->claim)
1813f405c445SSergey Senozhatsky 		ret = -EBUSY;
1814f405c445SSergey Senozhatsky 
1815cd67e10aSMinchan Kim 	return ret;
1816cd67e10aSMinchan Kim }
1817cd67e10aSMinchan Kim 
1818cd67e10aSMinchan Kim static const struct block_device_operations zram_devops = {
1819f405c445SSergey Senozhatsky 	.open = zram_open,
1820cd67e10aSMinchan Kim 	.swap_slot_free_notify = zram_slot_free_notify,
18218c7f0102Skaram.lee 	.rw_page = zram_rw_page,
1822cd67e10aSMinchan Kim 	.owner = THIS_MODULE
1823cd67e10aSMinchan Kim };
1824cd67e10aSMinchan Kim 
182599ebbd30SAndrew Morton static DEVICE_ATTR_WO(compact);
1826083914eaSGanesh Mahendran static DEVICE_ATTR_RW(disksize);
1827083914eaSGanesh Mahendran static DEVICE_ATTR_RO(initstate);
1828083914eaSGanesh Mahendran static DEVICE_ATTR_WO(reset);
1829c87d1655SSergey Senozhatsky static DEVICE_ATTR_WO(mem_limit);
1830c87d1655SSergey Senozhatsky static DEVICE_ATTR_WO(mem_used_max);
1831e82592c4SMinchan Kim static DEVICE_ATTR_WO(idle);
1832083914eaSGanesh Mahendran static DEVICE_ATTR_RW(max_comp_streams);
1833083914eaSGanesh Mahendran static DEVICE_ATTR_RW(comp_algorithm);
1834013bf95aSMinchan Kim #ifdef CONFIG_ZRAM_WRITEBACK
1835013bf95aSMinchan Kim static DEVICE_ATTR_RW(backing_dev);
1836a939888eSMinchan Kim static DEVICE_ATTR_WO(writeback);
1837bb416d18SMinchan Kim static DEVICE_ATTR_RW(writeback_limit);
18381d69a3f8SMinchan Kim static DEVICE_ATTR_RW(writeback_limit_enable);
1839013bf95aSMinchan Kim #endif
1840cd67e10aSMinchan Kim 
1841cd67e10aSMinchan Kim static struct attribute *zram_disk_attrs[] = {
1842cd67e10aSMinchan Kim 	&dev_attr_disksize.attr,
1843cd67e10aSMinchan Kim 	&dev_attr_initstate.attr,
1844cd67e10aSMinchan Kim 	&dev_attr_reset.attr,
184599ebbd30SAndrew Morton 	&dev_attr_compact.attr,
18469ada9da9SMinchan Kim 	&dev_attr_mem_limit.attr,
1847461a8eeeSMinchan Kim 	&dev_attr_mem_used_max.attr,
1848e82592c4SMinchan Kim 	&dev_attr_idle.attr,
1849beca3ec7SSergey Senozhatsky 	&dev_attr_max_comp_streams.attr,
1850e46b8a03SSergey Senozhatsky 	&dev_attr_comp_algorithm.attr,
1851013bf95aSMinchan Kim #ifdef CONFIG_ZRAM_WRITEBACK
1852013bf95aSMinchan Kim 	&dev_attr_backing_dev.attr,
1853a939888eSMinchan Kim 	&dev_attr_writeback.attr,
1854bb416d18SMinchan Kim 	&dev_attr_writeback_limit.attr,
18551d69a3f8SMinchan Kim 	&dev_attr_writeback_limit_enable.attr,
1856013bf95aSMinchan Kim #endif
18572f6a3bedSSergey Senozhatsky 	&dev_attr_io_stat.attr,
18584f2109f6SSergey Senozhatsky 	&dev_attr_mm_stat.attr,
185923eddf39SMinchan Kim #ifdef CONFIG_ZRAM_WRITEBACK
186023eddf39SMinchan Kim 	&dev_attr_bd_stat.attr,
186123eddf39SMinchan Kim #endif
1862623e47fcSSergey Senozhatsky 	&dev_attr_debug_stat.attr,
1863cd67e10aSMinchan Kim 	NULL,
1864cd67e10aSMinchan Kim };
1865cd67e10aSMinchan Kim 
1866bc1bb362SArvind Yadav static const struct attribute_group zram_disk_attr_group = {
1867cd67e10aSMinchan Kim 	.attrs = zram_disk_attrs,
1868cd67e10aSMinchan Kim };
1869cd67e10aSMinchan Kim 
187098af4d4dSHannes Reinecke static const struct attribute_group *zram_disk_attr_groups[] = {
187198af4d4dSHannes Reinecke 	&zram_disk_attr_group,
187298af4d4dSHannes Reinecke 	NULL,
187398af4d4dSHannes Reinecke };
187498af4d4dSHannes Reinecke 
187592ff1528SSergey Senozhatsky /*
187692ff1528SSergey Senozhatsky  * Allocate and initialize new zram device. the function returns
187792ff1528SSergey Senozhatsky  * '>= 0' device_id upon success, and negative value otherwise.
187892ff1528SSergey Senozhatsky  */
187992ff1528SSergey Senozhatsky static int zram_add(void)
1880cd67e10aSMinchan Kim {
188185508ec6SSergey Senozhatsky 	struct zram *zram;
1882ee980160SSergey Senozhatsky 	struct request_queue *queue;
188392ff1528SSergey Senozhatsky 	int ret, device_id;
188485508ec6SSergey Senozhatsky 
188585508ec6SSergey Senozhatsky 	zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
188685508ec6SSergey Senozhatsky 	if (!zram)
188785508ec6SSergey Senozhatsky 		return -ENOMEM;
188885508ec6SSergey Senozhatsky 
188992ff1528SSergey Senozhatsky 	ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
189085508ec6SSergey Senozhatsky 	if (ret < 0)
189185508ec6SSergey Senozhatsky 		goto out_free_dev;
189292ff1528SSergey Senozhatsky 	device_id = ret;
1893cd67e10aSMinchan Kim 
1894cd67e10aSMinchan Kim 	init_rwsem(&zram->init_lock);
18951d69a3f8SMinchan Kim #ifdef CONFIG_ZRAM_WRITEBACK
18961d69a3f8SMinchan Kim 	spin_lock_init(&zram->wb_limit_lock);
18971d69a3f8SMinchan Kim #endif
1898ee980160SSergey Senozhatsky 	queue = blk_alloc_queue(GFP_KERNEL);
1899ee980160SSergey Senozhatsky 	if (!queue) {
1900cd67e10aSMinchan Kim 		pr_err("Error allocating disk queue for device %d\n",
1901cd67e10aSMinchan Kim 			device_id);
190285508ec6SSergey Senozhatsky 		ret = -ENOMEM;
190385508ec6SSergey Senozhatsky 		goto out_free_idr;
1904cd67e10aSMinchan Kim 	}
1905cd67e10aSMinchan Kim 
1906ee980160SSergey Senozhatsky 	blk_queue_make_request(queue, zram_make_request);
1907cd67e10aSMinchan Kim 
1908cd67e10aSMinchan Kim 	/* gendisk structure */
1909cd67e10aSMinchan Kim 	zram->disk = alloc_disk(1);
1910cd67e10aSMinchan Kim 	if (!zram->disk) {
191170864969SSergey Senozhatsky 		pr_err("Error allocating disk structure for device %d\n",
1912cd67e10aSMinchan Kim 			device_id);
1913201c7b72SJulia Lawall 		ret = -ENOMEM;
1914cd67e10aSMinchan Kim 		goto out_free_queue;
1915cd67e10aSMinchan Kim 	}
1916cd67e10aSMinchan Kim 
1917cd67e10aSMinchan Kim 	zram->disk->major = zram_major;
1918cd67e10aSMinchan Kim 	zram->disk->first_minor = device_id;
1919cd67e10aSMinchan Kim 	zram->disk->fops = &zram_devops;
1920ee980160SSergey Senozhatsky 	zram->disk->queue = queue;
1921ee980160SSergey Senozhatsky 	zram->disk->queue->queuedata = zram;
1922cd67e10aSMinchan Kim 	zram->disk->private_data = zram;
1923cd67e10aSMinchan Kim 	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1924cd67e10aSMinchan Kim 
1925cd67e10aSMinchan Kim 	/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1926cd67e10aSMinchan Kim 	set_capacity(zram->disk, 0);
1927b67d1ec1SSergey Senozhatsky 	/* zram devices sort of resembles non-rotational disks */
19288b904b5bSBart Van Assche 	blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
19298b904b5bSBart Van Assche 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1930e447a015SMinchan Kim 
1931cd67e10aSMinchan Kim 	/*
1932cd67e10aSMinchan Kim 	 * To ensure that we always get PAGE_SIZE aligned
1933cd67e10aSMinchan Kim 	 * and n*PAGE_SIZED sized I/O requests.
1934cd67e10aSMinchan Kim 	 */
1935cd67e10aSMinchan Kim 	blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1936cd67e10aSMinchan Kim 	blk_queue_logical_block_size(zram->disk->queue,
1937cd67e10aSMinchan Kim 					ZRAM_LOGICAL_BLOCK_SIZE);
1938cd67e10aSMinchan Kim 	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1939cd67e10aSMinchan Kim 	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1940f4659d8eSJoonsoo Kim 	zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
19412bb4cd5cSJens Axboe 	blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
19428b904b5bSBart Van Assche 	blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue);
194331edeacdSChristoph Hellwig 
1944f4659d8eSJoonsoo Kim 	/*
1945f4659d8eSJoonsoo Kim 	 * zram_bio_discard() will clear all logical blocks if logical block
1946f4659d8eSJoonsoo Kim 	 * size is identical with physical block size(PAGE_SIZE). But if it is
1947f4659d8eSJoonsoo Kim 	 * different, we will skip discarding some parts of logical blocks in
1948f4659d8eSJoonsoo Kim 	 * the part of the request range which isn't aligned to physical block
1949f4659d8eSJoonsoo Kim 	 * size.  So we can't ensure that all discarded logical blocks are
1950f4659d8eSJoonsoo Kim 	 * zeroed.
1951f4659d8eSJoonsoo Kim 	 */
1952f4659d8eSJoonsoo Kim 	if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
195331edeacdSChristoph Hellwig 		blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
1954cd67e10aSMinchan Kim 
1955e447a015SMinchan Kim 	zram->disk->queue->backing_dev_info->capabilities |=
195623c47d2aSMinchan Kim 			(BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
195798af4d4dSHannes Reinecke 	device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
1958cd67e10aSMinchan Kim 
1959e46b8a03SSergey Senozhatsky 	strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1960d12b63c9SSergey Senozhatsky 
1961c0265342SMinchan Kim 	zram_debugfs_register(zram);
1962d12b63c9SSergey Senozhatsky 	pr_info("Added device: %s\n", zram->disk->disk_name);
196392ff1528SSergey Senozhatsky 	return device_id;
1964cd67e10aSMinchan Kim 
1965cd67e10aSMinchan Kim out_free_queue:
1966ee980160SSergey Senozhatsky 	blk_cleanup_queue(queue);
196785508ec6SSergey Senozhatsky out_free_idr:
196885508ec6SSergey Senozhatsky 	idr_remove(&zram_index_idr, device_id);
196985508ec6SSergey Senozhatsky out_free_dev:
197085508ec6SSergey Senozhatsky 	kfree(zram);
1971cd67e10aSMinchan Kim 	return ret;
1972cd67e10aSMinchan Kim }
1973cd67e10aSMinchan Kim 
19746566d1a3SSergey Senozhatsky static int zram_remove(struct zram *zram)
1975cd67e10aSMinchan Kim {
19766566d1a3SSergey Senozhatsky 	struct block_device *bdev;
19776566d1a3SSergey Senozhatsky 
19786566d1a3SSergey Senozhatsky 	bdev = bdget_disk(zram->disk, 0);
19796566d1a3SSergey Senozhatsky 	if (!bdev)
19806566d1a3SSergey Senozhatsky 		return -ENOMEM;
19816566d1a3SSergey Senozhatsky 
19826566d1a3SSergey Senozhatsky 	mutex_lock(&bdev->bd_mutex);
19836566d1a3SSergey Senozhatsky 	if (bdev->bd_openers || zram->claim) {
19846566d1a3SSergey Senozhatsky 		mutex_unlock(&bdev->bd_mutex);
19856566d1a3SSergey Senozhatsky 		bdput(bdev);
19866566d1a3SSergey Senozhatsky 		return -EBUSY;
19876566d1a3SSergey Senozhatsky 	}
19886566d1a3SSergey Senozhatsky 
19896566d1a3SSergey Senozhatsky 	zram->claim = true;
19906566d1a3SSergey Senozhatsky 	mutex_unlock(&bdev->bd_mutex);
19916566d1a3SSergey Senozhatsky 
1992c0265342SMinchan Kim 	zram_debugfs_unregister(zram);
1993cd67e10aSMinchan Kim 
19946566d1a3SSergey Senozhatsky 	/* Make sure all the pending I/O are finished */
19956566d1a3SSergey Senozhatsky 	fsync_bdev(bdev);
1996a096cafcSSergey Senozhatsky 	zram_reset_device(zram);
19976566d1a3SSergey Senozhatsky 	bdput(bdev);
19986566d1a3SSergey Senozhatsky 
19996566d1a3SSergey Senozhatsky 	pr_info("Removed device: %s\n", zram->disk->disk_name);
20006566d1a3SSergey Senozhatsky 
2001cd67e10aSMinchan Kim 	del_gendisk(zram->disk);
2002392db380SBart Van Assche 	blk_cleanup_queue(zram->disk->queue);
2003cd67e10aSMinchan Kim 	put_disk(zram->disk);
200485508ec6SSergey Senozhatsky 	kfree(zram);
20056566d1a3SSergey Senozhatsky 	return 0;
2006cd67e10aSMinchan Kim }
2007cd67e10aSMinchan Kim 
20086566d1a3SSergey Senozhatsky /* zram-control sysfs attributes */
200927104a53SGreg Kroah-Hartman 
201027104a53SGreg Kroah-Hartman /*
201127104a53SGreg Kroah-Hartman  * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
201227104a53SGreg Kroah-Hartman  * sense that reading from this file does alter the state of your system -- it
201327104a53SGreg Kroah-Hartman  * creates a new un-initialized zram device and returns back this device's
201427104a53SGreg Kroah-Hartman  * device_id (or an error code if it fails to create a new device).
201527104a53SGreg Kroah-Hartman  */
20166566d1a3SSergey Senozhatsky static ssize_t hot_add_show(struct class *class,
20176566d1a3SSergey Senozhatsky 			struct class_attribute *attr,
20186566d1a3SSergey Senozhatsky 			char *buf)
20196566d1a3SSergey Senozhatsky {
20206566d1a3SSergey Senozhatsky 	int ret;
20216566d1a3SSergey Senozhatsky 
20226566d1a3SSergey Senozhatsky 	mutex_lock(&zram_index_mutex);
20236566d1a3SSergey Senozhatsky 	ret = zram_add();
20246566d1a3SSergey Senozhatsky 	mutex_unlock(&zram_index_mutex);
20256566d1a3SSergey Senozhatsky 
20266566d1a3SSergey Senozhatsky 	if (ret < 0)
20276566d1a3SSergey Senozhatsky 		return ret;
20286566d1a3SSergey Senozhatsky 	return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
20296566d1a3SSergey Senozhatsky }
2030f40609d1SGreg Kroah-Hartman static CLASS_ATTR_RO(hot_add);
20316566d1a3SSergey Senozhatsky 
20326566d1a3SSergey Senozhatsky static ssize_t hot_remove_store(struct class *class,
20336566d1a3SSergey Senozhatsky 			struct class_attribute *attr,
20346566d1a3SSergey Senozhatsky 			const char *buf,
20356566d1a3SSergey Senozhatsky 			size_t count)
20366566d1a3SSergey Senozhatsky {
20376566d1a3SSergey Senozhatsky 	struct zram *zram;
20386566d1a3SSergey Senozhatsky 	int ret, dev_id;
20396566d1a3SSergey Senozhatsky 
20406566d1a3SSergey Senozhatsky 	/* dev_id is gendisk->first_minor, which is `int' */
20416566d1a3SSergey Senozhatsky 	ret = kstrtoint(buf, 10, &dev_id);
20426566d1a3SSergey Senozhatsky 	if (ret)
20436566d1a3SSergey Senozhatsky 		return ret;
20446566d1a3SSergey Senozhatsky 	if (dev_id < 0)
20456566d1a3SSergey Senozhatsky 		return -EINVAL;
20466566d1a3SSergey Senozhatsky 
20476566d1a3SSergey Senozhatsky 	mutex_lock(&zram_index_mutex);
20486566d1a3SSergey Senozhatsky 
20496566d1a3SSergey Senozhatsky 	zram = idr_find(&zram_index_idr, dev_id);
205017ec4cd9SJerome Marchand 	if (zram) {
20516566d1a3SSergey Senozhatsky 		ret = zram_remove(zram);
2052529e71e1STakashi Iwai 		if (!ret)
205317ec4cd9SJerome Marchand 			idr_remove(&zram_index_idr, dev_id);
205417ec4cd9SJerome Marchand 	} else {
20556566d1a3SSergey Senozhatsky 		ret = -ENODEV;
205617ec4cd9SJerome Marchand 	}
20576566d1a3SSergey Senozhatsky 
20586566d1a3SSergey Senozhatsky 	mutex_unlock(&zram_index_mutex);
20596566d1a3SSergey Senozhatsky 	return ret ? ret : count;
20606566d1a3SSergey Senozhatsky }
206127104a53SGreg Kroah-Hartman static CLASS_ATTR_WO(hot_remove);
20626566d1a3SSergey Senozhatsky 
206327104a53SGreg Kroah-Hartman static struct attribute *zram_control_class_attrs[] = {
206427104a53SGreg Kroah-Hartman 	&class_attr_hot_add.attr,
206527104a53SGreg Kroah-Hartman 	&class_attr_hot_remove.attr,
206627104a53SGreg Kroah-Hartman 	NULL,
20676566d1a3SSergey Senozhatsky };
206827104a53SGreg Kroah-Hartman ATTRIBUTE_GROUPS(zram_control_class);
20696566d1a3SSergey Senozhatsky 
20706566d1a3SSergey Senozhatsky static struct class zram_control_class = {
20716566d1a3SSergey Senozhatsky 	.name		= "zram-control",
20726566d1a3SSergey Senozhatsky 	.owner		= THIS_MODULE,
207327104a53SGreg Kroah-Hartman 	.class_groups	= zram_control_class_groups,
20746566d1a3SSergey Senozhatsky };
20756566d1a3SSergey Senozhatsky 
207685508ec6SSergey Senozhatsky static int zram_remove_cb(int id, void *ptr, void *data)
207785508ec6SSergey Senozhatsky {
207885508ec6SSergey Senozhatsky 	zram_remove(ptr);
207985508ec6SSergey Senozhatsky 	return 0;
208085508ec6SSergey Senozhatsky }
208185508ec6SSergey Senozhatsky 
208285508ec6SSergey Senozhatsky static void destroy_devices(void)
208385508ec6SSergey Senozhatsky {
20846566d1a3SSergey Senozhatsky 	class_unregister(&zram_control_class);
208585508ec6SSergey Senozhatsky 	idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
2086c0265342SMinchan Kim 	zram_debugfs_destroy();
208785508ec6SSergey Senozhatsky 	idr_destroy(&zram_index_idr);
2088a096cafcSSergey Senozhatsky 	unregister_blkdev(zram_major, "zram");
20891dd6c834SAnna-Maria Gleixner 	cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2090a096cafcSSergey Senozhatsky }
2091a096cafcSSergey Senozhatsky 
2092cd67e10aSMinchan Kim static int __init zram_init(void)
2093cd67e10aSMinchan Kim {
209492ff1528SSergey Senozhatsky 	int ret;
2095cd67e10aSMinchan Kim 
20961dd6c834SAnna-Maria Gleixner 	ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
20971dd6c834SAnna-Maria Gleixner 				      zcomp_cpu_up_prepare, zcomp_cpu_dead);
20981dd6c834SAnna-Maria Gleixner 	if (ret < 0)
20991dd6c834SAnna-Maria Gleixner 		return ret;
21001dd6c834SAnna-Maria Gleixner 
21016566d1a3SSergey Senozhatsky 	ret = class_register(&zram_control_class);
21026566d1a3SSergey Senozhatsky 	if (ret) {
210370864969SSergey Senozhatsky 		pr_err("Unable to register zram-control class\n");
21041dd6c834SAnna-Maria Gleixner 		cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
21056566d1a3SSergey Senozhatsky 		return ret;
21066566d1a3SSergey Senozhatsky 	}
21076566d1a3SSergey Senozhatsky 
2108c0265342SMinchan Kim 	zram_debugfs_create();
2109cd67e10aSMinchan Kim 	zram_major = register_blkdev(0, "zram");
2110cd67e10aSMinchan Kim 	if (zram_major <= 0) {
211170864969SSergey Senozhatsky 		pr_err("Unable to get major number\n");
21126566d1a3SSergey Senozhatsky 		class_unregister(&zram_control_class);
21131dd6c834SAnna-Maria Gleixner 		cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2114a096cafcSSergey Senozhatsky 		return -EBUSY;
2115cd67e10aSMinchan Kim 	}
2116cd67e10aSMinchan Kim 
211792ff1528SSergey Senozhatsky 	while (num_devices != 0) {
21186566d1a3SSergey Senozhatsky 		mutex_lock(&zram_index_mutex);
211992ff1528SSergey Senozhatsky 		ret = zram_add();
21206566d1a3SSergey Senozhatsky 		mutex_unlock(&zram_index_mutex);
212192ff1528SSergey Senozhatsky 		if (ret < 0)
2122a096cafcSSergey Senozhatsky 			goto out_error;
212392ff1528SSergey Senozhatsky 		num_devices--;
2124cd67e10aSMinchan Kim 	}
2125cd67e10aSMinchan Kim 
2126cd67e10aSMinchan Kim 	return 0;
2127cd67e10aSMinchan Kim 
2128a096cafcSSergey Senozhatsky out_error:
212985508ec6SSergey Senozhatsky 	destroy_devices();
2130cd67e10aSMinchan Kim 	return ret;
2131cd67e10aSMinchan Kim }
2132cd67e10aSMinchan Kim 
2133cd67e10aSMinchan Kim static void __exit zram_exit(void)
2134cd67e10aSMinchan Kim {
213585508ec6SSergey Senozhatsky 	destroy_devices();
2136cd67e10aSMinchan Kim }
2137cd67e10aSMinchan Kim 
2138cd67e10aSMinchan Kim module_init(zram_init);
2139cd67e10aSMinchan Kim module_exit(zram_exit);
2140cd67e10aSMinchan Kim 
2141cd67e10aSMinchan Kim module_param(num_devices, uint, 0);
2142c3cdb40eSSergey Senozhatsky MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
2143cd67e10aSMinchan Kim 
2144cd67e10aSMinchan Kim MODULE_LICENSE("Dual BSD/GPL");
2145cd67e10aSMinchan Kim MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
2146cd67e10aSMinchan Kim MODULE_DESCRIPTION("Compressed RAM Block Device");
2147