xref: /openbmc/linux/drivers/block/zram/zram_drv.c (revision 43209ea2d17aae1540d4e28274e36404f72702f2)
1cd67e10aSMinchan Kim /*
2cd67e10aSMinchan Kim  * Compressed RAM block device
3cd67e10aSMinchan Kim  *
4cd67e10aSMinchan Kim  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
57bfb3de8SMinchan Kim  *               2012, 2013 Minchan Kim
6cd67e10aSMinchan Kim  *
7cd67e10aSMinchan Kim  * This code is released using a dual license strategy: BSD/GPL
8cd67e10aSMinchan Kim  * You can choose the licence that better fits your requirements.
9cd67e10aSMinchan Kim  *
10cd67e10aSMinchan Kim  * Released under the terms of 3-clause BSD License
11cd67e10aSMinchan Kim  * Released under the terms of GNU General Public License Version 2.0
12cd67e10aSMinchan Kim  *
13cd67e10aSMinchan Kim  */
14cd67e10aSMinchan Kim 
15cd67e10aSMinchan Kim #define KMSG_COMPONENT "zram"
16cd67e10aSMinchan Kim #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17cd67e10aSMinchan Kim 
18cd67e10aSMinchan Kim #include <linux/module.h>
19cd67e10aSMinchan Kim #include <linux/kernel.h>
20cd67e10aSMinchan Kim #include <linux/bio.h>
21cd67e10aSMinchan Kim #include <linux/bitops.h>
22cd67e10aSMinchan Kim #include <linux/blkdev.h>
23cd67e10aSMinchan Kim #include <linux/buffer_head.h>
24cd67e10aSMinchan Kim #include <linux/device.h>
25cd67e10aSMinchan Kim #include <linux/genhd.h>
26cd67e10aSMinchan Kim #include <linux/highmem.h>
27cd67e10aSMinchan Kim #include <linux/slab.h>
28cd67e10aSMinchan Kim #include <linux/string.h>
29cd67e10aSMinchan Kim #include <linux/vmalloc.h>
30fcfa8d95SSergey Senozhatsky #include <linux/err.h>
3185508ec6SSergey Senozhatsky #include <linux/idr.h>
326566d1a3SSergey Senozhatsky #include <linux/sysfs.h>
33cd67e10aSMinchan Kim 
34cd67e10aSMinchan Kim #include "zram_drv.h"
35cd67e10aSMinchan Kim 
3685508ec6SSergey Senozhatsky static DEFINE_IDR(zram_index_idr);
376566d1a3SSergey Senozhatsky /* idr index must be protected */
386566d1a3SSergey Senozhatsky static DEFINE_MUTEX(zram_index_mutex);
396566d1a3SSergey Senozhatsky 
40cd67e10aSMinchan Kim static int zram_major;
41b7ca232eSSergey Senozhatsky static const char *default_compressor = "lzo";
42cd67e10aSMinchan Kim 
43cd67e10aSMinchan Kim /* Module params (documentation at end) */
44cd67e10aSMinchan Kim static unsigned int num_devices = 1;
45cd67e10aSMinchan Kim 
468f7d282cSSergey Senozhatsky static inline void deprecated_attr_warn(const char *name)
478f7d282cSSergey Senozhatsky {
488f7d282cSSergey Senozhatsky 	pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
498f7d282cSSergey Senozhatsky 			task_pid_nr(current),
508f7d282cSSergey Senozhatsky 			current->comm,
518f7d282cSSergey Senozhatsky 			name,
528f7d282cSSergey Senozhatsky 			"See zram documentation.");
538f7d282cSSergey Senozhatsky }
548f7d282cSSergey Senozhatsky 
55a68eb3b6SSergey Senozhatsky #define ZRAM_ATTR_RO(name)						\
56083914eaSGanesh Mahendran static ssize_t name##_show(struct device *d,				\
57a68eb3b6SSergey Senozhatsky 				struct device_attribute *attr, char *b)	\
58a68eb3b6SSergey Senozhatsky {									\
59a68eb3b6SSergey Senozhatsky 	struct zram *zram = dev_to_zram(d);				\
608f7d282cSSergey Senozhatsky 									\
618f7d282cSSergey Senozhatsky 	deprecated_attr_warn(__stringify(name));			\
6256b4e8cbSSergey Senozhatsky 	return scnprintf(b, PAGE_SIZE, "%llu\n",			\
63a68eb3b6SSergey Senozhatsky 		(u64)atomic64_read(&zram->stats.name));			\
64a68eb3b6SSergey Senozhatsky }									\
65083914eaSGanesh Mahendran static DEVICE_ATTR_RO(name);
66a68eb3b6SSergey Senozhatsky 
6708eee69fSMinchan Kim static inline bool init_done(struct zram *zram)
68be2d1d56SSergey Senozhatsky {
6908eee69fSMinchan Kim 	return zram->disksize;
70be2d1d56SSergey Senozhatsky }
71be2d1d56SSergey Senozhatsky 
72cd67e10aSMinchan Kim static inline struct zram *dev_to_zram(struct device *dev)
73cd67e10aSMinchan Kim {
74cd67e10aSMinchan Kim 	return (struct zram *)dev_to_disk(dev)->private_data;
75cd67e10aSMinchan Kim }
76cd67e10aSMinchan Kim 
77b31177f2SSergey Senozhatsky /* flag operations require table entry bit_spin_lock() being held */
78522698d7SSergey Senozhatsky static int zram_test_flag(struct zram_meta *meta, u32 index,
79522698d7SSergey Senozhatsky 			enum zram_pageflags flag)
8099ebbd30SAndrew Morton {
81522698d7SSergey Senozhatsky 	return meta->table[index].value & BIT(flag);
8299ebbd30SAndrew Morton }
8399ebbd30SAndrew Morton 
84522698d7SSergey Senozhatsky static void zram_set_flag(struct zram_meta *meta, u32 index,
85522698d7SSergey Senozhatsky 			enum zram_pageflags flag)
86522698d7SSergey Senozhatsky {
87522698d7SSergey Senozhatsky 	meta->table[index].value |= BIT(flag);
8899ebbd30SAndrew Morton }
8999ebbd30SAndrew Morton 
90522698d7SSergey Senozhatsky static void zram_clear_flag(struct zram_meta *meta, u32 index,
91522698d7SSergey Senozhatsky 			enum zram_pageflags flag)
92cd67e10aSMinchan Kim {
93522698d7SSergey Senozhatsky 	meta->table[index].value &= ~BIT(flag);
94522698d7SSergey Senozhatsky }
95cd67e10aSMinchan Kim 
96522698d7SSergey Senozhatsky static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
97522698d7SSergey Senozhatsky {
98522698d7SSergey Senozhatsky 	return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
99522698d7SSergey Senozhatsky }
100522698d7SSergey Senozhatsky 
101522698d7SSergey Senozhatsky static void zram_set_obj_size(struct zram_meta *meta,
102522698d7SSergey Senozhatsky 					u32 index, size_t size)
103522698d7SSergey Senozhatsky {
104522698d7SSergey Senozhatsky 	unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
105522698d7SSergey Senozhatsky 
106522698d7SSergey Senozhatsky 	meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
107522698d7SSergey Senozhatsky }
108522698d7SSergey Senozhatsky 
1091c53e0d2SGeliang Tang static inline bool is_partial_io(struct bio_vec *bvec)
110522698d7SSergey Senozhatsky {
111522698d7SSergey Senozhatsky 	return bvec->bv_len != PAGE_SIZE;
112522698d7SSergey Senozhatsky }
113522698d7SSergey Senozhatsky 
114522698d7SSergey Senozhatsky /*
115522698d7SSergey Senozhatsky  * Check if request is within bounds and aligned on zram logical blocks.
116522698d7SSergey Senozhatsky  */
1171c53e0d2SGeliang Tang static inline bool valid_io_request(struct zram *zram,
118522698d7SSergey Senozhatsky 		sector_t start, unsigned int size)
119522698d7SSergey Senozhatsky {
120522698d7SSergey Senozhatsky 	u64 end, bound;
121522698d7SSergey Senozhatsky 
122522698d7SSergey Senozhatsky 	/* unaligned request */
123522698d7SSergey Senozhatsky 	if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
1241c53e0d2SGeliang Tang 		return false;
125522698d7SSergey Senozhatsky 	if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
1261c53e0d2SGeliang Tang 		return false;
127522698d7SSergey Senozhatsky 
128522698d7SSergey Senozhatsky 	end = start + (size >> SECTOR_SHIFT);
129522698d7SSergey Senozhatsky 	bound = zram->disksize >> SECTOR_SHIFT;
130522698d7SSergey Senozhatsky 	/* out of range range */
131522698d7SSergey Senozhatsky 	if (unlikely(start >= bound || end > bound || start > end))
1321c53e0d2SGeliang Tang 		return false;
133522698d7SSergey Senozhatsky 
134522698d7SSergey Senozhatsky 	/* I/O request is valid */
1351c53e0d2SGeliang Tang 	return true;
136522698d7SSergey Senozhatsky }
137522698d7SSergey Senozhatsky 
138522698d7SSergey Senozhatsky static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
139522698d7SSergey Senozhatsky {
140522698d7SSergey Senozhatsky 	if (*offset + bvec->bv_len >= PAGE_SIZE)
141522698d7SSergey Senozhatsky 		(*index)++;
142522698d7SSergey Senozhatsky 	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
143522698d7SSergey Senozhatsky }
144522698d7SSergey Senozhatsky 
145522698d7SSergey Senozhatsky static inline void update_used_max(struct zram *zram,
146522698d7SSergey Senozhatsky 					const unsigned long pages)
147522698d7SSergey Senozhatsky {
148522698d7SSergey Senozhatsky 	unsigned long old_max, cur_max;
149522698d7SSergey Senozhatsky 
150522698d7SSergey Senozhatsky 	old_max = atomic_long_read(&zram->stats.max_used_pages);
151522698d7SSergey Senozhatsky 
152522698d7SSergey Senozhatsky 	do {
153522698d7SSergey Senozhatsky 		cur_max = old_max;
154522698d7SSergey Senozhatsky 		if (pages > cur_max)
155522698d7SSergey Senozhatsky 			old_max = atomic_long_cmpxchg(
156522698d7SSergey Senozhatsky 				&zram->stats.max_used_pages, cur_max, pages);
157522698d7SSergey Senozhatsky 	} while (old_max != cur_max);
158522698d7SSergey Senozhatsky }
159522698d7SSergey Senozhatsky 
1601c53e0d2SGeliang Tang static bool page_zero_filled(void *ptr)
161522698d7SSergey Senozhatsky {
162522698d7SSergey Senozhatsky 	unsigned int pos;
163522698d7SSergey Senozhatsky 	unsigned long *page;
164522698d7SSergey Senozhatsky 
165522698d7SSergey Senozhatsky 	page = (unsigned long *)ptr;
166522698d7SSergey Senozhatsky 
167522698d7SSergey Senozhatsky 	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
168522698d7SSergey Senozhatsky 		if (page[pos])
1691c53e0d2SGeliang Tang 			return false;
170522698d7SSergey Senozhatsky 	}
171522698d7SSergey Senozhatsky 
1721c53e0d2SGeliang Tang 	return true;
173522698d7SSergey Senozhatsky }
174522698d7SSergey Senozhatsky 
175522698d7SSergey Senozhatsky static void handle_zero_page(struct bio_vec *bvec)
176522698d7SSergey Senozhatsky {
177522698d7SSergey Senozhatsky 	struct page *page = bvec->bv_page;
178522698d7SSergey Senozhatsky 	void *user_mem;
179522698d7SSergey Senozhatsky 
180522698d7SSergey Senozhatsky 	user_mem = kmap_atomic(page);
181522698d7SSergey Senozhatsky 	if (is_partial_io(bvec))
182522698d7SSergey Senozhatsky 		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
183522698d7SSergey Senozhatsky 	else
184522698d7SSergey Senozhatsky 		clear_page(user_mem);
185522698d7SSergey Senozhatsky 	kunmap_atomic(user_mem);
186522698d7SSergey Senozhatsky 
187522698d7SSergey Senozhatsky 	flush_dcache_page(page);
188cd67e10aSMinchan Kim }
189cd67e10aSMinchan Kim 
190cd67e10aSMinchan Kim static ssize_t initstate_show(struct device *dev,
191cd67e10aSMinchan Kim 		struct device_attribute *attr, char *buf)
192cd67e10aSMinchan Kim {
193a68eb3b6SSergey Senozhatsky 	u32 val;
194cd67e10aSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
195cd67e10aSMinchan Kim 
196a68eb3b6SSergey Senozhatsky 	down_read(&zram->init_lock);
197a68eb3b6SSergey Senozhatsky 	val = init_done(zram);
198a68eb3b6SSergey Senozhatsky 	up_read(&zram->init_lock);
199cd67e10aSMinchan Kim 
20056b4e8cbSSergey Senozhatsky 	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
201cd67e10aSMinchan Kim }
202cd67e10aSMinchan Kim 
203522698d7SSergey Senozhatsky static ssize_t disksize_show(struct device *dev,
204522698d7SSergey Senozhatsky 		struct device_attribute *attr, char *buf)
205522698d7SSergey Senozhatsky {
206522698d7SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
207522698d7SSergey Senozhatsky 
208522698d7SSergey Senozhatsky 	return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
209522698d7SSergey Senozhatsky }
210522698d7SSergey Senozhatsky 
211cd67e10aSMinchan Kim static ssize_t orig_data_size_show(struct device *dev,
212cd67e10aSMinchan Kim 		struct device_attribute *attr, char *buf)
213cd67e10aSMinchan Kim {
214cd67e10aSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
215cd67e10aSMinchan Kim 
2168f7d282cSSergey Senozhatsky 	deprecated_attr_warn("orig_data_size");
21756b4e8cbSSergey Senozhatsky 	return scnprintf(buf, PAGE_SIZE, "%llu\n",
21890a7806eSSergey Senozhatsky 		(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
219cd67e10aSMinchan Kim }
220cd67e10aSMinchan Kim 
221cd67e10aSMinchan Kim static ssize_t mem_used_total_show(struct device *dev,
222cd67e10aSMinchan Kim 		struct device_attribute *attr, char *buf)
223cd67e10aSMinchan Kim {
224cd67e10aSMinchan Kim 	u64 val = 0;
225cd67e10aSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
226cd67e10aSMinchan Kim 
2278f7d282cSSergey Senozhatsky 	deprecated_attr_warn("mem_used_total");
228cd67e10aSMinchan Kim 	down_read(&zram->init_lock);
2295a99e95bSWeijie Yang 	if (init_done(zram)) {
2305a99e95bSWeijie Yang 		struct zram_meta *meta = zram->meta;
231722cdc17SMinchan Kim 		val = zs_get_total_pages(meta->mem_pool);
2325a99e95bSWeijie Yang 	}
233cd67e10aSMinchan Kim 	up_read(&zram->init_lock);
234cd67e10aSMinchan Kim 
235722cdc17SMinchan Kim 	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
236cd67e10aSMinchan Kim }
237cd67e10aSMinchan Kim 
2389ada9da9SMinchan Kim static ssize_t mem_limit_show(struct device *dev,
2399ada9da9SMinchan Kim 		struct device_attribute *attr, char *buf)
2409ada9da9SMinchan Kim {
2419ada9da9SMinchan Kim 	u64 val;
2429ada9da9SMinchan Kim 	struct zram *zram = dev_to_zram(dev);
2439ada9da9SMinchan Kim 
2448f7d282cSSergey Senozhatsky 	deprecated_attr_warn("mem_limit");
2459ada9da9SMinchan Kim 	down_read(&zram->init_lock);
2469ada9da9SMinchan Kim 	val = zram->limit_pages;
2479ada9da9SMinchan Kim 	up_read(&zram->init_lock);
2489ada9da9SMinchan Kim 
2499ada9da9SMinchan Kim 	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
2509ada9da9SMinchan Kim }
2519ada9da9SMinchan Kim 
2529ada9da9SMinchan Kim static ssize_t mem_limit_store(struct device *dev,
2539ada9da9SMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
2549ada9da9SMinchan Kim {
2559ada9da9SMinchan Kim 	u64 limit;
2569ada9da9SMinchan Kim 	char *tmp;
2579ada9da9SMinchan Kim 	struct zram *zram = dev_to_zram(dev);
2589ada9da9SMinchan Kim 
2599ada9da9SMinchan Kim 	limit = memparse(buf, &tmp);
2609ada9da9SMinchan Kim 	if (buf == tmp) /* no chars parsed, invalid input */
2619ada9da9SMinchan Kim 		return -EINVAL;
2629ada9da9SMinchan Kim 
2639ada9da9SMinchan Kim 	down_write(&zram->init_lock);
2649ada9da9SMinchan Kim 	zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
2659ada9da9SMinchan Kim 	up_write(&zram->init_lock);
2669ada9da9SMinchan Kim 
2679ada9da9SMinchan Kim 	return len;
2689ada9da9SMinchan Kim }
2699ada9da9SMinchan Kim 
270461a8eeeSMinchan Kim static ssize_t mem_used_max_show(struct device *dev,
271461a8eeeSMinchan Kim 		struct device_attribute *attr, char *buf)
272461a8eeeSMinchan Kim {
273461a8eeeSMinchan Kim 	u64 val = 0;
274461a8eeeSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
275461a8eeeSMinchan Kim 
2768f7d282cSSergey Senozhatsky 	deprecated_attr_warn("mem_used_max");
277461a8eeeSMinchan Kim 	down_read(&zram->init_lock);
278461a8eeeSMinchan Kim 	if (init_done(zram))
279461a8eeeSMinchan Kim 		val = atomic_long_read(&zram->stats.max_used_pages);
280461a8eeeSMinchan Kim 	up_read(&zram->init_lock);
281461a8eeeSMinchan Kim 
282461a8eeeSMinchan Kim 	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
283461a8eeeSMinchan Kim }
284461a8eeeSMinchan Kim 
285461a8eeeSMinchan Kim static ssize_t mem_used_max_store(struct device *dev,
286461a8eeeSMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
287461a8eeeSMinchan Kim {
288461a8eeeSMinchan Kim 	int err;
289461a8eeeSMinchan Kim 	unsigned long val;
290461a8eeeSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
291461a8eeeSMinchan Kim 
292461a8eeeSMinchan Kim 	err = kstrtoul(buf, 10, &val);
293461a8eeeSMinchan Kim 	if (err || val != 0)
294461a8eeeSMinchan Kim 		return -EINVAL;
295461a8eeeSMinchan Kim 
296461a8eeeSMinchan Kim 	down_read(&zram->init_lock);
2975a99e95bSWeijie Yang 	if (init_done(zram)) {
2985a99e95bSWeijie Yang 		struct zram_meta *meta = zram->meta;
299461a8eeeSMinchan Kim 		atomic_long_set(&zram->stats.max_used_pages,
300461a8eeeSMinchan Kim 				zs_get_total_pages(meta->mem_pool));
3015a99e95bSWeijie Yang 	}
302461a8eeeSMinchan Kim 	up_read(&zram->init_lock);
303461a8eeeSMinchan Kim 
304461a8eeeSMinchan Kim 	return len;
305461a8eeeSMinchan Kim }
306461a8eeeSMinchan Kim 
307*43209ea2SSergey Senozhatsky /*
308*43209ea2SSergey Senozhatsky  * We switched to per-cpu streams and this attr is not needed anymore.
309*43209ea2SSergey Senozhatsky  * However, we will keep it around for some time, because:
310*43209ea2SSergey Senozhatsky  * a) we may revert per-cpu streams in the future
311*43209ea2SSergey Senozhatsky  * b) it's visible to user space and we need to follow our 2 years
312*43209ea2SSergey Senozhatsky  *    retirement rule; but we already have a number of 'soon to be
313*43209ea2SSergey Senozhatsky  *    altered' attrs, so max_comp_streams need to wait for the next
314*43209ea2SSergey Senozhatsky  *    layoff cycle.
315*43209ea2SSergey Senozhatsky  */
316522698d7SSergey Senozhatsky static ssize_t max_comp_streams_show(struct device *dev,
317522698d7SSergey Senozhatsky 		struct device_attribute *attr, char *buf)
318522698d7SSergey Senozhatsky {
319*43209ea2SSergey Senozhatsky 	return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
320522698d7SSergey Senozhatsky }
321522698d7SSergey Senozhatsky 
322beca3ec7SSergey Senozhatsky static ssize_t max_comp_streams_store(struct device *dev,
323beca3ec7SSergey Senozhatsky 		struct device_attribute *attr, const char *buf, size_t len)
324beca3ec7SSergey Senozhatsky {
325*43209ea2SSergey Senozhatsky 	return len;
326beca3ec7SSergey Senozhatsky }
327beca3ec7SSergey Senozhatsky 
328e46b8a03SSergey Senozhatsky static ssize_t comp_algorithm_show(struct device *dev,
329e46b8a03SSergey Senozhatsky 		struct device_attribute *attr, char *buf)
330e46b8a03SSergey Senozhatsky {
331e46b8a03SSergey Senozhatsky 	size_t sz;
332e46b8a03SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
333e46b8a03SSergey Senozhatsky 
334e46b8a03SSergey Senozhatsky 	down_read(&zram->init_lock);
335e46b8a03SSergey Senozhatsky 	sz = zcomp_available_show(zram->compressor, buf);
336e46b8a03SSergey Senozhatsky 	up_read(&zram->init_lock);
337e46b8a03SSergey Senozhatsky 
338e46b8a03SSergey Senozhatsky 	return sz;
339e46b8a03SSergey Senozhatsky }
340e46b8a03SSergey Senozhatsky 
341e46b8a03SSergey Senozhatsky static ssize_t comp_algorithm_store(struct device *dev,
342e46b8a03SSergey Senozhatsky 		struct device_attribute *attr, const char *buf, size_t len)
343e46b8a03SSergey Senozhatsky {
344e46b8a03SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
3454bbacd51SSergey Senozhatsky 	size_t sz;
3464bbacd51SSergey Senozhatsky 
3471d5b43bfSLuis Henriques 	if (!zcomp_available_algorithm(buf))
3481d5b43bfSLuis Henriques 		return -EINVAL;
3491d5b43bfSLuis Henriques 
350e46b8a03SSergey Senozhatsky 	down_write(&zram->init_lock);
351e46b8a03SSergey Senozhatsky 	if (init_done(zram)) {
352e46b8a03SSergey Senozhatsky 		up_write(&zram->init_lock);
353e46b8a03SSergey Senozhatsky 		pr_info("Can't change algorithm for initialized device\n");
354e46b8a03SSergey Senozhatsky 		return -EBUSY;
355e46b8a03SSergey Senozhatsky 	}
356e46b8a03SSergey Senozhatsky 	strlcpy(zram->compressor, buf, sizeof(zram->compressor));
3574bbacd51SSergey Senozhatsky 
3584bbacd51SSergey Senozhatsky 	/* ignore trailing newline */
3594bbacd51SSergey Senozhatsky 	sz = strlen(zram->compressor);
3604bbacd51SSergey Senozhatsky 	if (sz > 0 && zram->compressor[sz - 1] == '\n')
3614bbacd51SSergey Senozhatsky 		zram->compressor[sz - 1] = 0x00;
3624bbacd51SSergey Senozhatsky 
363e46b8a03SSergey Senozhatsky 	up_write(&zram->init_lock);
364e46b8a03SSergey Senozhatsky 	return len;
365e46b8a03SSergey Senozhatsky }
366e46b8a03SSergey Senozhatsky 
367522698d7SSergey Senozhatsky static ssize_t compact_store(struct device *dev,
368522698d7SSergey Senozhatsky 		struct device_attribute *attr, const char *buf, size_t len)
369cd67e10aSMinchan Kim {
370522698d7SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
371522698d7SSergey Senozhatsky 	struct zram_meta *meta;
372522698d7SSergey Senozhatsky 
373522698d7SSergey Senozhatsky 	down_read(&zram->init_lock);
374522698d7SSergey Senozhatsky 	if (!init_done(zram)) {
375522698d7SSergey Senozhatsky 		up_read(&zram->init_lock);
376522698d7SSergey Senozhatsky 		return -EINVAL;
377cd67e10aSMinchan Kim 	}
378cd67e10aSMinchan Kim 
379522698d7SSergey Senozhatsky 	meta = zram->meta;
3807d3f3938SSergey Senozhatsky 	zs_compact(meta->mem_pool);
381522698d7SSergey Senozhatsky 	up_read(&zram->init_lock);
382522698d7SSergey Senozhatsky 
383522698d7SSergey Senozhatsky 	return len;
384cd67e10aSMinchan Kim }
385cd67e10aSMinchan Kim 
386522698d7SSergey Senozhatsky static ssize_t io_stat_show(struct device *dev,
387522698d7SSergey Senozhatsky 		struct device_attribute *attr, char *buf)
388cd67e10aSMinchan Kim {
389522698d7SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
390522698d7SSergey Senozhatsky 	ssize_t ret;
391522698d7SSergey Senozhatsky 
392522698d7SSergey Senozhatsky 	down_read(&zram->init_lock);
393522698d7SSergey Senozhatsky 	ret = scnprintf(buf, PAGE_SIZE,
394522698d7SSergey Senozhatsky 			"%8llu %8llu %8llu %8llu\n",
395522698d7SSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.failed_reads),
396522698d7SSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.failed_writes),
397522698d7SSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.invalid_io),
398522698d7SSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.notify_free));
399522698d7SSergey Senozhatsky 	up_read(&zram->init_lock);
400522698d7SSergey Senozhatsky 
401522698d7SSergey Senozhatsky 	return ret;
402d2d5e762SWeijie Yang }
403d2d5e762SWeijie Yang 
404522698d7SSergey Senozhatsky static ssize_t mm_stat_show(struct device *dev,
405522698d7SSergey Senozhatsky 		struct device_attribute *attr, char *buf)
406d2d5e762SWeijie Yang {
407522698d7SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
4087d3f3938SSergey Senozhatsky 	struct zs_pool_stats pool_stats;
409522698d7SSergey Senozhatsky 	u64 orig_size, mem_used = 0;
410522698d7SSergey Senozhatsky 	long max_used;
411522698d7SSergey Senozhatsky 	ssize_t ret;
412522698d7SSergey Senozhatsky 
4137d3f3938SSergey Senozhatsky 	memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
4147d3f3938SSergey Senozhatsky 
415522698d7SSergey Senozhatsky 	down_read(&zram->init_lock);
4167d3f3938SSergey Senozhatsky 	if (init_done(zram)) {
417522698d7SSergey Senozhatsky 		mem_used = zs_get_total_pages(zram->meta->mem_pool);
4187d3f3938SSergey Senozhatsky 		zs_pool_stats(zram->meta->mem_pool, &pool_stats);
4197d3f3938SSergey Senozhatsky 	}
420522698d7SSergey Senozhatsky 
421522698d7SSergey Senozhatsky 	orig_size = atomic64_read(&zram->stats.pages_stored);
422522698d7SSergey Senozhatsky 	max_used = atomic_long_read(&zram->stats.max_used_pages);
423522698d7SSergey Senozhatsky 
424522698d7SSergey Senozhatsky 	ret = scnprintf(buf, PAGE_SIZE,
4257d3f3938SSergey Senozhatsky 			"%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
426522698d7SSergey Senozhatsky 			orig_size << PAGE_SHIFT,
427522698d7SSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.compr_data_size),
428522698d7SSergey Senozhatsky 			mem_used << PAGE_SHIFT,
429522698d7SSergey Senozhatsky 			zram->limit_pages << PAGE_SHIFT,
430522698d7SSergey Senozhatsky 			max_used << PAGE_SHIFT,
431522698d7SSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.zero_pages),
432860c707dSSergey Senozhatsky 			pool_stats.pages_compacted);
433522698d7SSergey Senozhatsky 	up_read(&zram->init_lock);
434522698d7SSergey Senozhatsky 
435522698d7SSergey Senozhatsky 	return ret;
436d2d5e762SWeijie Yang }
437d2d5e762SWeijie Yang 
438522698d7SSergey Senozhatsky static DEVICE_ATTR_RO(io_stat);
439522698d7SSergey Senozhatsky static DEVICE_ATTR_RO(mm_stat);
440522698d7SSergey Senozhatsky ZRAM_ATTR_RO(num_reads);
441522698d7SSergey Senozhatsky ZRAM_ATTR_RO(num_writes);
442522698d7SSergey Senozhatsky ZRAM_ATTR_RO(failed_reads);
443522698d7SSergey Senozhatsky ZRAM_ATTR_RO(failed_writes);
444522698d7SSergey Senozhatsky ZRAM_ATTR_RO(invalid_io);
445522698d7SSergey Senozhatsky ZRAM_ATTR_RO(notify_free);
446522698d7SSergey Senozhatsky ZRAM_ATTR_RO(zero_pages);
447522698d7SSergey Senozhatsky ZRAM_ATTR_RO(compr_data_size);
448d2d5e762SWeijie Yang 
449522698d7SSergey Senozhatsky static inline bool zram_meta_get(struct zram *zram)
450522698d7SSergey Senozhatsky {
451522698d7SSergey Senozhatsky 	if (atomic_inc_not_zero(&zram->refcount))
452522698d7SSergey Senozhatsky 		return true;
453522698d7SSergey Senozhatsky 	return false;
454cd67e10aSMinchan Kim }
455cd67e10aSMinchan Kim 
456522698d7SSergey Senozhatsky static inline void zram_meta_put(struct zram *zram)
457cd67e10aSMinchan Kim {
458522698d7SSergey Senozhatsky 	atomic_dec(&zram->refcount);
459cd67e10aSMinchan Kim }
460cd67e10aSMinchan Kim 
4611fec1172SGanesh Mahendran static void zram_meta_free(struct zram_meta *meta, u64 disksize)
462cd67e10aSMinchan Kim {
4631fec1172SGanesh Mahendran 	size_t num_pages = disksize >> PAGE_SHIFT;
4641fec1172SGanesh Mahendran 	size_t index;
4651fec1172SGanesh Mahendran 
4661fec1172SGanesh Mahendran 	/* Free all pages that are still in this zram device */
4671fec1172SGanesh Mahendran 	for (index = 0; index < num_pages; index++) {
4681fec1172SGanesh Mahendran 		unsigned long handle = meta->table[index].handle;
4691fec1172SGanesh Mahendran 
4701fec1172SGanesh Mahendran 		if (!handle)
4711fec1172SGanesh Mahendran 			continue;
4721fec1172SGanesh Mahendran 
4731fec1172SGanesh Mahendran 		zs_free(meta->mem_pool, handle);
4741fec1172SGanesh Mahendran 	}
4751fec1172SGanesh Mahendran 
476cd67e10aSMinchan Kim 	zs_destroy_pool(meta->mem_pool);
477cd67e10aSMinchan Kim 	vfree(meta->table);
478cd67e10aSMinchan Kim 	kfree(meta);
479cd67e10aSMinchan Kim }
480cd67e10aSMinchan Kim 
4814ce321f5SSergey Senozhatsky static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
482cd67e10aSMinchan Kim {
483cd67e10aSMinchan Kim 	size_t num_pages;
484cd67e10aSMinchan Kim 	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
485b8179958SSergey Senozhatsky 
486cd67e10aSMinchan Kim 	if (!meta)
487b8179958SSergey Senozhatsky 		return NULL;
488cd67e10aSMinchan Kim 
489cd67e10aSMinchan Kim 	num_pages = disksize >> PAGE_SHIFT;
490cd67e10aSMinchan Kim 	meta->table = vzalloc(num_pages * sizeof(*meta->table));
491cd67e10aSMinchan Kim 	if (!meta->table) {
492cd67e10aSMinchan Kim 		pr_err("Error allocating zram address table\n");
493b8179958SSergey Senozhatsky 		goto out_error;
494cd67e10aSMinchan Kim 	}
495cd67e10aSMinchan Kim 
496d0d8da2dSSergey Senozhatsky 	meta->mem_pool = zs_create_pool(pool_name);
497cd67e10aSMinchan Kim 	if (!meta->mem_pool) {
498cd67e10aSMinchan Kim 		pr_err("Error creating memory pool\n");
499b8179958SSergey Senozhatsky 		goto out_error;
500cd67e10aSMinchan Kim 	}
501cd67e10aSMinchan Kim 
502cd67e10aSMinchan Kim 	return meta;
503cd67e10aSMinchan Kim 
504b8179958SSergey Senozhatsky out_error:
505cd67e10aSMinchan Kim 	vfree(meta->table);
506cd67e10aSMinchan Kim 	kfree(meta);
507b8179958SSergey Senozhatsky 	return NULL;
508cd67e10aSMinchan Kim }
509cd67e10aSMinchan Kim 
510d2d5e762SWeijie Yang /*
511d2d5e762SWeijie Yang  * To protect concurrent access to the same index entry,
512d2d5e762SWeijie Yang  * caller should hold this table index entry's bit_spinlock to
513d2d5e762SWeijie Yang  * indicate this index entry is accessing.
514d2d5e762SWeijie Yang  */
515cd67e10aSMinchan Kim static void zram_free_page(struct zram *zram, size_t index)
516cd67e10aSMinchan Kim {
517cd67e10aSMinchan Kim 	struct zram_meta *meta = zram->meta;
518cd67e10aSMinchan Kim 	unsigned long handle = meta->table[index].handle;
519cd67e10aSMinchan Kim 
520cd67e10aSMinchan Kim 	if (unlikely(!handle)) {
521cd67e10aSMinchan Kim 		/*
522cd67e10aSMinchan Kim 		 * No memory is allocated for zero filled pages.
523cd67e10aSMinchan Kim 		 * Simply clear zero page flag.
524cd67e10aSMinchan Kim 		 */
525cd67e10aSMinchan Kim 		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
526cd67e10aSMinchan Kim 			zram_clear_flag(meta, index, ZRAM_ZERO);
52790a7806eSSergey Senozhatsky 			atomic64_dec(&zram->stats.zero_pages);
528cd67e10aSMinchan Kim 		}
529cd67e10aSMinchan Kim 		return;
530cd67e10aSMinchan Kim 	}
531cd67e10aSMinchan Kim 
532cd67e10aSMinchan Kim 	zs_free(meta->mem_pool, handle);
533cd67e10aSMinchan Kim 
534d2d5e762SWeijie Yang 	atomic64_sub(zram_get_obj_size(meta, index),
535d2d5e762SWeijie Yang 			&zram->stats.compr_data_size);
53690a7806eSSergey Senozhatsky 	atomic64_dec(&zram->stats.pages_stored);
537cd67e10aSMinchan Kim 
538cd67e10aSMinchan Kim 	meta->table[index].handle = 0;
539d2d5e762SWeijie Yang 	zram_set_obj_size(meta, index, 0);
540cd67e10aSMinchan Kim }
541cd67e10aSMinchan Kim 
542cd67e10aSMinchan Kim static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
543cd67e10aSMinchan Kim {
544b7ca232eSSergey Senozhatsky 	int ret = 0;
545cd67e10aSMinchan Kim 	unsigned char *cmem;
546cd67e10aSMinchan Kim 	struct zram_meta *meta = zram->meta;
54792967471SMinchan Kim 	unsigned long handle;
548023b409fSMinchan Kim 	size_t size;
54992967471SMinchan Kim 
550d2d5e762SWeijie Yang 	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
55192967471SMinchan Kim 	handle = meta->table[index].handle;
552d2d5e762SWeijie Yang 	size = zram_get_obj_size(meta, index);
553cd67e10aSMinchan Kim 
554cd67e10aSMinchan Kim 	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
555d2d5e762SWeijie Yang 		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
556cd67e10aSMinchan Kim 		clear_page(mem);
557cd67e10aSMinchan Kim 		return 0;
558cd67e10aSMinchan Kim 	}
559cd67e10aSMinchan Kim 
560cd67e10aSMinchan Kim 	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
56192967471SMinchan Kim 	if (size == PAGE_SIZE)
562cd67e10aSMinchan Kim 		copy_page(mem, cmem);
563cd67e10aSMinchan Kim 	else
564b7ca232eSSergey Senozhatsky 		ret = zcomp_decompress(zram->comp, cmem, size, mem);
565cd67e10aSMinchan Kim 	zs_unmap_object(meta->mem_pool, handle);
566d2d5e762SWeijie Yang 	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
567cd67e10aSMinchan Kim 
568cd67e10aSMinchan Kim 	/* Should NEVER happen. Return bio error if it does. */
569b7ca232eSSergey Senozhatsky 	if (unlikely(ret)) {
570cd67e10aSMinchan Kim 		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
571cd67e10aSMinchan Kim 		return ret;
572cd67e10aSMinchan Kim 	}
573cd67e10aSMinchan Kim 
574cd67e10aSMinchan Kim 	return 0;
575cd67e10aSMinchan Kim }
576cd67e10aSMinchan Kim 
577cd67e10aSMinchan Kim static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
578b627cff3Skaram.lee 			  u32 index, int offset)
579cd67e10aSMinchan Kim {
580cd67e10aSMinchan Kim 	int ret;
581cd67e10aSMinchan Kim 	struct page *page;
582cd67e10aSMinchan Kim 	unsigned char *user_mem, *uncmem = NULL;
583cd67e10aSMinchan Kim 	struct zram_meta *meta = zram->meta;
584cd67e10aSMinchan Kim 	page = bvec->bv_page;
585cd67e10aSMinchan Kim 
586d2d5e762SWeijie Yang 	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
587cd67e10aSMinchan Kim 	if (unlikely(!meta->table[index].handle) ||
588cd67e10aSMinchan Kim 			zram_test_flag(meta, index, ZRAM_ZERO)) {
589d2d5e762SWeijie Yang 		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
590cd67e10aSMinchan Kim 		handle_zero_page(bvec);
591cd67e10aSMinchan Kim 		return 0;
592cd67e10aSMinchan Kim 	}
593d2d5e762SWeijie Yang 	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
594cd67e10aSMinchan Kim 
595cd67e10aSMinchan Kim 	if (is_partial_io(bvec))
596cd67e10aSMinchan Kim 		/* Use  a temporary buffer to decompress the page */
597cd67e10aSMinchan Kim 		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
598cd67e10aSMinchan Kim 
599cd67e10aSMinchan Kim 	user_mem = kmap_atomic(page);
600cd67e10aSMinchan Kim 	if (!is_partial_io(bvec))
601cd67e10aSMinchan Kim 		uncmem = user_mem;
602cd67e10aSMinchan Kim 
603cd67e10aSMinchan Kim 	if (!uncmem) {
60470864969SSergey Senozhatsky 		pr_err("Unable to allocate temp memory\n");
605cd67e10aSMinchan Kim 		ret = -ENOMEM;
606cd67e10aSMinchan Kim 		goto out_cleanup;
607cd67e10aSMinchan Kim 	}
608cd67e10aSMinchan Kim 
609cd67e10aSMinchan Kim 	ret = zram_decompress_page(zram, uncmem, index);
610cd67e10aSMinchan Kim 	/* Should NEVER happen. Return bio error if it does. */
611b7ca232eSSergey Senozhatsky 	if (unlikely(ret))
612cd67e10aSMinchan Kim 		goto out_cleanup;
613cd67e10aSMinchan Kim 
614cd67e10aSMinchan Kim 	if (is_partial_io(bvec))
615cd67e10aSMinchan Kim 		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
616cd67e10aSMinchan Kim 				bvec->bv_len);
617cd67e10aSMinchan Kim 
618cd67e10aSMinchan Kim 	flush_dcache_page(page);
619cd67e10aSMinchan Kim 	ret = 0;
620cd67e10aSMinchan Kim out_cleanup:
621cd67e10aSMinchan Kim 	kunmap_atomic(user_mem);
622cd67e10aSMinchan Kim 	if (is_partial_io(bvec))
623cd67e10aSMinchan Kim 		kfree(uncmem);
624cd67e10aSMinchan Kim 	return ret;
625cd67e10aSMinchan Kim }
626cd67e10aSMinchan Kim 
627cd67e10aSMinchan Kim static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
628cd67e10aSMinchan Kim 			   int offset)
629cd67e10aSMinchan Kim {
630cd67e10aSMinchan Kim 	int ret = 0;
631cd67e10aSMinchan Kim 	size_t clen;
632da9556a2SSergey Senozhatsky 	unsigned long handle = 0;
633cd67e10aSMinchan Kim 	struct page *page;
634cd67e10aSMinchan Kim 	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
635cd67e10aSMinchan Kim 	struct zram_meta *meta = zram->meta;
63617162f41SSergey Senozhatsky 	struct zcomp_strm *zstrm = NULL;
637461a8eeeSMinchan Kim 	unsigned long alloced_pages;
638cd67e10aSMinchan Kim 
639cd67e10aSMinchan Kim 	page = bvec->bv_page;
640cd67e10aSMinchan Kim 	if (is_partial_io(bvec)) {
641cd67e10aSMinchan Kim 		/*
642cd67e10aSMinchan Kim 		 * This is a partial IO. We need to read the full page
643cd67e10aSMinchan Kim 		 * before to write the changes.
644cd67e10aSMinchan Kim 		 */
645cd67e10aSMinchan Kim 		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
646cd67e10aSMinchan Kim 		if (!uncmem) {
647cd67e10aSMinchan Kim 			ret = -ENOMEM;
648cd67e10aSMinchan Kim 			goto out;
649cd67e10aSMinchan Kim 		}
650cd67e10aSMinchan Kim 		ret = zram_decompress_page(zram, uncmem, index);
651cd67e10aSMinchan Kim 		if (ret)
652cd67e10aSMinchan Kim 			goto out;
653cd67e10aSMinchan Kim 	}
654cd67e10aSMinchan Kim 
655da9556a2SSergey Senozhatsky compress_again:
656cd67e10aSMinchan Kim 	user_mem = kmap_atomic(page);
657cd67e10aSMinchan Kim 	if (is_partial_io(bvec)) {
658cd67e10aSMinchan Kim 		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
659cd67e10aSMinchan Kim 		       bvec->bv_len);
660cd67e10aSMinchan Kim 		kunmap_atomic(user_mem);
661cd67e10aSMinchan Kim 		user_mem = NULL;
662cd67e10aSMinchan Kim 	} else {
663cd67e10aSMinchan Kim 		uncmem = user_mem;
664cd67e10aSMinchan Kim 	}
665cd67e10aSMinchan Kim 
666cd67e10aSMinchan Kim 	if (page_zero_filled(uncmem)) {
667c4065152SWeijie Yang 		if (user_mem)
668cd67e10aSMinchan Kim 			kunmap_atomic(user_mem);
669cd67e10aSMinchan Kim 		/* Free memory associated with this sector now. */
670d2d5e762SWeijie Yang 		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
671cd67e10aSMinchan Kim 		zram_free_page(zram, index);
67292967471SMinchan Kim 		zram_set_flag(meta, index, ZRAM_ZERO);
673d2d5e762SWeijie Yang 		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
674cd67e10aSMinchan Kim 
67590a7806eSSergey Senozhatsky 		atomic64_inc(&zram->stats.zero_pages);
676cd67e10aSMinchan Kim 		ret = 0;
677cd67e10aSMinchan Kim 		goto out;
678cd67e10aSMinchan Kim 	}
679cd67e10aSMinchan Kim 
680da9556a2SSergey Senozhatsky 	zstrm = zcomp_strm_find(zram->comp);
681b7ca232eSSergey Senozhatsky 	ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
682cd67e10aSMinchan Kim 	if (!is_partial_io(bvec)) {
683cd67e10aSMinchan Kim 		kunmap_atomic(user_mem);
684cd67e10aSMinchan Kim 		user_mem = NULL;
685cd67e10aSMinchan Kim 		uncmem = NULL;
686cd67e10aSMinchan Kim 	}
687cd67e10aSMinchan Kim 
688b7ca232eSSergey Senozhatsky 	if (unlikely(ret)) {
689cd67e10aSMinchan Kim 		pr_err("Compression failed! err=%d\n", ret);
690cd67e10aSMinchan Kim 		goto out;
691cd67e10aSMinchan Kim 	}
692da9556a2SSergey Senozhatsky 
693b7ca232eSSergey Senozhatsky 	src = zstrm->buffer;
694cd67e10aSMinchan Kim 	if (unlikely(clen > max_zpage_size)) {
695cd67e10aSMinchan Kim 		clen = PAGE_SIZE;
696cd67e10aSMinchan Kim 		if (is_partial_io(bvec))
697cd67e10aSMinchan Kim 			src = uncmem;
698cd67e10aSMinchan Kim 	}
699cd67e10aSMinchan Kim 
700da9556a2SSergey Senozhatsky 	/*
701da9556a2SSergey Senozhatsky 	 * handle allocation has 2 paths:
702da9556a2SSergey Senozhatsky 	 * a) fast path is executed with preemption disabled (for
703da9556a2SSergey Senozhatsky 	 *  per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
704da9556a2SSergey Senozhatsky 	 *  since we can't sleep;
705da9556a2SSergey Senozhatsky 	 * b) slow path enables preemption and attempts to allocate
706da9556a2SSergey Senozhatsky 	 *  the page with __GFP_DIRECT_RECLAIM bit set. we have to
707da9556a2SSergey Senozhatsky 	 *  put per-cpu compression stream and, thus, to re-do
708da9556a2SSergey Senozhatsky 	 *  the compression once handle is allocated.
709da9556a2SSergey Senozhatsky 	 *
710da9556a2SSergey Senozhatsky 	 * if we have a 'non-null' handle here then we are coming
711da9556a2SSergey Senozhatsky 	 * from the slow path and handle has already been allocated.
712da9556a2SSergey Senozhatsky 	 */
713da9556a2SSergey Senozhatsky 	if (!handle)
714da9556a2SSergey Senozhatsky 		handle = zs_malloc(meta->mem_pool, clen,
715da9556a2SSergey Senozhatsky 				__GFP_KSWAPD_RECLAIM |
716da9556a2SSergey Senozhatsky 				__GFP_NOWARN |
717da9556a2SSergey Senozhatsky 				__GFP_HIGHMEM);
718cd67e10aSMinchan Kim 	if (!handle) {
719da9556a2SSergey Senozhatsky 		zcomp_strm_release(zram->comp, zstrm);
720da9556a2SSergey Senozhatsky 		zstrm = NULL;
721da9556a2SSergey Senozhatsky 
722da9556a2SSergey Senozhatsky 		handle = zs_malloc(meta->mem_pool, clen,
723da9556a2SSergey Senozhatsky 				GFP_NOIO | __GFP_HIGHMEM);
724da9556a2SSergey Senozhatsky 		if (handle)
725da9556a2SSergey Senozhatsky 			goto compress_again;
726da9556a2SSergey Senozhatsky 
72770864969SSergey Senozhatsky 		pr_err("Error allocating memory for compressed page: %u, size=%zu\n",
728cd67e10aSMinchan Kim 			index, clen);
729cd67e10aSMinchan Kim 		ret = -ENOMEM;
730cd67e10aSMinchan Kim 		goto out;
731cd67e10aSMinchan Kim 	}
7329ada9da9SMinchan Kim 
733461a8eeeSMinchan Kim 	alloced_pages = zs_get_total_pages(meta->mem_pool);
73412372755SSergey SENOZHATSKY 	update_used_max(zram, alloced_pages);
73512372755SSergey SENOZHATSKY 
736461a8eeeSMinchan Kim 	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
7379ada9da9SMinchan Kim 		zs_free(meta->mem_pool, handle);
7389ada9da9SMinchan Kim 		ret = -ENOMEM;
7399ada9da9SMinchan Kim 		goto out;
7409ada9da9SMinchan Kim 	}
7419ada9da9SMinchan Kim 
742cd67e10aSMinchan Kim 	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
743cd67e10aSMinchan Kim 
744cd67e10aSMinchan Kim 	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
745cd67e10aSMinchan Kim 		src = kmap_atomic(page);
746cd67e10aSMinchan Kim 		copy_page(cmem, src);
747cd67e10aSMinchan Kim 		kunmap_atomic(src);
748cd67e10aSMinchan Kim 	} else {
749cd67e10aSMinchan Kim 		memcpy(cmem, src, clen);
750cd67e10aSMinchan Kim 	}
751cd67e10aSMinchan Kim 
752b7ca232eSSergey Senozhatsky 	zcomp_strm_release(zram->comp, zstrm);
75317162f41SSergey Senozhatsky 	zstrm = NULL;
754cd67e10aSMinchan Kim 	zs_unmap_object(meta->mem_pool, handle);
755cd67e10aSMinchan Kim 
756cd67e10aSMinchan Kim 	/*
757cd67e10aSMinchan Kim 	 * Free memory associated with this sector
758cd67e10aSMinchan Kim 	 * before overwriting unused sectors.
759cd67e10aSMinchan Kim 	 */
760d2d5e762SWeijie Yang 	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
761cd67e10aSMinchan Kim 	zram_free_page(zram, index);
762cd67e10aSMinchan Kim 
763cd67e10aSMinchan Kim 	meta->table[index].handle = handle;
764d2d5e762SWeijie Yang 	zram_set_obj_size(meta, index, clen);
765d2d5e762SWeijie Yang 	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
766cd67e10aSMinchan Kim 
767cd67e10aSMinchan Kim 	/* Update stats */
76890a7806eSSergey Senozhatsky 	atomic64_add(clen, &zram->stats.compr_data_size);
76990a7806eSSergey Senozhatsky 	atomic64_inc(&zram->stats.pages_stored);
770cd67e10aSMinchan Kim out:
77117162f41SSergey Senozhatsky 	if (zstrm)
772b7ca232eSSergey Senozhatsky 		zcomp_strm_release(zram->comp, zstrm);
773cd67e10aSMinchan Kim 	if (is_partial_io(bvec))
774cd67e10aSMinchan Kim 		kfree(uncmem);
775cd67e10aSMinchan Kim 	return ret;
776cd67e10aSMinchan Kim }
777cd67e10aSMinchan Kim 
778f4659d8eSJoonsoo Kim /*
779f4659d8eSJoonsoo Kim  * zram_bio_discard - handler on discard request
780f4659d8eSJoonsoo Kim  * @index: physical block index in PAGE_SIZE units
781f4659d8eSJoonsoo Kim  * @offset: byte offset within physical block
782f4659d8eSJoonsoo Kim  */
783f4659d8eSJoonsoo Kim static void zram_bio_discard(struct zram *zram, u32 index,
784f4659d8eSJoonsoo Kim 			     int offset, struct bio *bio)
785f4659d8eSJoonsoo Kim {
786f4659d8eSJoonsoo Kim 	size_t n = bio->bi_iter.bi_size;
787d2d5e762SWeijie Yang 	struct zram_meta *meta = zram->meta;
788f4659d8eSJoonsoo Kim 
789f4659d8eSJoonsoo Kim 	/*
790f4659d8eSJoonsoo Kim 	 * zram manages data in physical block size units. Because logical block
791f4659d8eSJoonsoo Kim 	 * size isn't identical with physical block size on some arch, we
792f4659d8eSJoonsoo Kim 	 * could get a discard request pointing to a specific offset within a
793f4659d8eSJoonsoo Kim 	 * certain physical block.  Although we can handle this request by
794f4659d8eSJoonsoo Kim 	 * reading that physiclal block and decompressing and partially zeroing
795f4659d8eSJoonsoo Kim 	 * and re-compressing and then re-storing it, this isn't reasonable
796f4659d8eSJoonsoo Kim 	 * because our intent with a discard request is to save memory.  So
797f4659d8eSJoonsoo Kim 	 * skipping this logical block is appropriate here.
798f4659d8eSJoonsoo Kim 	 */
799f4659d8eSJoonsoo Kim 	if (offset) {
80038515c73SWeijie Yang 		if (n <= (PAGE_SIZE - offset))
801f4659d8eSJoonsoo Kim 			return;
802f4659d8eSJoonsoo Kim 
80338515c73SWeijie Yang 		n -= (PAGE_SIZE - offset);
804f4659d8eSJoonsoo Kim 		index++;
805f4659d8eSJoonsoo Kim 	}
806f4659d8eSJoonsoo Kim 
807f4659d8eSJoonsoo Kim 	while (n >= PAGE_SIZE) {
808d2d5e762SWeijie Yang 		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
809f4659d8eSJoonsoo Kim 		zram_free_page(zram, index);
810d2d5e762SWeijie Yang 		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
811015254daSSergey Senozhatsky 		atomic64_inc(&zram->stats.notify_free);
812f4659d8eSJoonsoo Kim 		index++;
813f4659d8eSJoonsoo Kim 		n -= PAGE_SIZE;
814f4659d8eSJoonsoo Kim 	}
815f4659d8eSJoonsoo Kim }
816f4659d8eSJoonsoo Kim 
817522698d7SSergey Senozhatsky static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
818522698d7SSergey Senozhatsky 			int offset, int rw)
819522698d7SSergey Senozhatsky {
820522698d7SSergey Senozhatsky 	unsigned long start_time = jiffies;
821522698d7SSergey Senozhatsky 	int ret;
822522698d7SSergey Senozhatsky 
823522698d7SSergey Senozhatsky 	generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
824522698d7SSergey Senozhatsky 			&zram->disk->part0);
825522698d7SSergey Senozhatsky 
826522698d7SSergey Senozhatsky 	if (rw == READ) {
827522698d7SSergey Senozhatsky 		atomic64_inc(&zram->stats.num_reads);
828522698d7SSergey Senozhatsky 		ret = zram_bvec_read(zram, bvec, index, offset);
829522698d7SSergey Senozhatsky 	} else {
830522698d7SSergey Senozhatsky 		atomic64_inc(&zram->stats.num_writes);
831522698d7SSergey Senozhatsky 		ret = zram_bvec_write(zram, bvec, index, offset);
832522698d7SSergey Senozhatsky 	}
833522698d7SSergey Senozhatsky 
834522698d7SSergey Senozhatsky 	generic_end_io_acct(rw, &zram->disk->part0, start_time);
835522698d7SSergey Senozhatsky 
836522698d7SSergey Senozhatsky 	if (unlikely(ret)) {
837522698d7SSergey Senozhatsky 		if (rw == READ)
838522698d7SSergey Senozhatsky 			atomic64_inc(&zram->stats.failed_reads);
839522698d7SSergey Senozhatsky 		else
840522698d7SSergey Senozhatsky 			atomic64_inc(&zram->stats.failed_writes);
841522698d7SSergey Senozhatsky 	}
842522698d7SSergey Senozhatsky 
843522698d7SSergey Senozhatsky 	return ret;
844522698d7SSergey Senozhatsky }
845522698d7SSergey Senozhatsky 
846522698d7SSergey Senozhatsky static void __zram_make_request(struct zram *zram, struct bio *bio)
847522698d7SSergey Senozhatsky {
848522698d7SSergey Senozhatsky 	int offset, rw;
849522698d7SSergey Senozhatsky 	u32 index;
850522698d7SSergey Senozhatsky 	struct bio_vec bvec;
851522698d7SSergey Senozhatsky 	struct bvec_iter iter;
852522698d7SSergey Senozhatsky 
853522698d7SSergey Senozhatsky 	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
854522698d7SSergey Senozhatsky 	offset = (bio->bi_iter.bi_sector &
855522698d7SSergey Senozhatsky 		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
856522698d7SSergey Senozhatsky 
857522698d7SSergey Senozhatsky 	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
858522698d7SSergey Senozhatsky 		zram_bio_discard(zram, index, offset, bio);
8594246a0b6SChristoph Hellwig 		bio_endio(bio);
860522698d7SSergey Senozhatsky 		return;
861522698d7SSergey Senozhatsky 	}
862522698d7SSergey Senozhatsky 
863522698d7SSergey Senozhatsky 	rw = bio_data_dir(bio);
864522698d7SSergey Senozhatsky 	bio_for_each_segment(bvec, bio, iter) {
865522698d7SSergey Senozhatsky 		int max_transfer_size = PAGE_SIZE - offset;
866522698d7SSergey Senozhatsky 
867522698d7SSergey Senozhatsky 		if (bvec.bv_len > max_transfer_size) {
868522698d7SSergey Senozhatsky 			/*
869522698d7SSergey Senozhatsky 			 * zram_bvec_rw() can only make operation on a single
870522698d7SSergey Senozhatsky 			 * zram page. Split the bio vector.
871522698d7SSergey Senozhatsky 			 */
872522698d7SSergey Senozhatsky 			struct bio_vec bv;
873522698d7SSergey Senozhatsky 
874522698d7SSergey Senozhatsky 			bv.bv_page = bvec.bv_page;
875522698d7SSergey Senozhatsky 			bv.bv_len = max_transfer_size;
876522698d7SSergey Senozhatsky 			bv.bv_offset = bvec.bv_offset;
877522698d7SSergey Senozhatsky 
878522698d7SSergey Senozhatsky 			if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
879522698d7SSergey Senozhatsky 				goto out;
880522698d7SSergey Senozhatsky 
881522698d7SSergey Senozhatsky 			bv.bv_len = bvec.bv_len - max_transfer_size;
882522698d7SSergey Senozhatsky 			bv.bv_offset += max_transfer_size;
883522698d7SSergey Senozhatsky 			if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
884522698d7SSergey Senozhatsky 				goto out;
885522698d7SSergey Senozhatsky 		} else
886522698d7SSergey Senozhatsky 			if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
887522698d7SSergey Senozhatsky 				goto out;
888522698d7SSergey Senozhatsky 
889522698d7SSergey Senozhatsky 		update_position(&index, &offset, &bvec);
890522698d7SSergey Senozhatsky 	}
891522698d7SSergey Senozhatsky 
8924246a0b6SChristoph Hellwig 	bio_endio(bio);
893522698d7SSergey Senozhatsky 	return;
894522698d7SSergey Senozhatsky 
895522698d7SSergey Senozhatsky out:
896522698d7SSergey Senozhatsky 	bio_io_error(bio);
897522698d7SSergey Senozhatsky }
898522698d7SSergey Senozhatsky 
899522698d7SSergey Senozhatsky /*
900522698d7SSergey Senozhatsky  * Handler function for all zram I/O requests.
901522698d7SSergey Senozhatsky  */
902dece1635SJens Axboe static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
903522698d7SSergey Senozhatsky {
904522698d7SSergey Senozhatsky 	struct zram *zram = queue->queuedata;
905522698d7SSergey Senozhatsky 
906522698d7SSergey Senozhatsky 	if (unlikely(!zram_meta_get(zram)))
907522698d7SSergey Senozhatsky 		goto error;
908522698d7SSergey Senozhatsky 
90954efd50bSKent Overstreet 	blk_queue_split(queue, &bio, queue->bio_split);
91054efd50bSKent Overstreet 
911522698d7SSergey Senozhatsky 	if (!valid_io_request(zram, bio->bi_iter.bi_sector,
912522698d7SSergey Senozhatsky 					bio->bi_iter.bi_size)) {
913522698d7SSergey Senozhatsky 		atomic64_inc(&zram->stats.invalid_io);
914522698d7SSergey Senozhatsky 		goto put_zram;
915522698d7SSergey Senozhatsky 	}
916522698d7SSergey Senozhatsky 
917522698d7SSergey Senozhatsky 	__zram_make_request(zram, bio);
918522698d7SSergey Senozhatsky 	zram_meta_put(zram);
919dece1635SJens Axboe 	return BLK_QC_T_NONE;
920522698d7SSergey Senozhatsky put_zram:
921522698d7SSergey Senozhatsky 	zram_meta_put(zram);
922522698d7SSergey Senozhatsky error:
923522698d7SSergey Senozhatsky 	bio_io_error(bio);
924dece1635SJens Axboe 	return BLK_QC_T_NONE;
925522698d7SSergey Senozhatsky }
926522698d7SSergey Senozhatsky 
927522698d7SSergey Senozhatsky static void zram_slot_free_notify(struct block_device *bdev,
928522698d7SSergey Senozhatsky 				unsigned long index)
929522698d7SSergey Senozhatsky {
930522698d7SSergey Senozhatsky 	struct zram *zram;
931522698d7SSergey Senozhatsky 	struct zram_meta *meta;
932522698d7SSergey Senozhatsky 
933522698d7SSergey Senozhatsky 	zram = bdev->bd_disk->private_data;
934522698d7SSergey Senozhatsky 	meta = zram->meta;
935522698d7SSergey Senozhatsky 
936522698d7SSergey Senozhatsky 	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
937522698d7SSergey Senozhatsky 	zram_free_page(zram, index);
938522698d7SSergey Senozhatsky 	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
939522698d7SSergey Senozhatsky 	atomic64_inc(&zram->stats.notify_free);
940522698d7SSergey Senozhatsky }
941522698d7SSergey Senozhatsky 
942522698d7SSergey Senozhatsky static int zram_rw_page(struct block_device *bdev, sector_t sector,
943522698d7SSergey Senozhatsky 		       struct page *page, int rw)
944522698d7SSergey Senozhatsky {
945522698d7SSergey Senozhatsky 	int offset, err = -EIO;
946522698d7SSergey Senozhatsky 	u32 index;
947522698d7SSergey Senozhatsky 	struct zram *zram;
948522698d7SSergey Senozhatsky 	struct bio_vec bv;
949522698d7SSergey Senozhatsky 
950522698d7SSergey Senozhatsky 	zram = bdev->bd_disk->private_data;
951522698d7SSergey Senozhatsky 	if (unlikely(!zram_meta_get(zram)))
952522698d7SSergey Senozhatsky 		goto out;
953522698d7SSergey Senozhatsky 
954522698d7SSergey Senozhatsky 	if (!valid_io_request(zram, sector, PAGE_SIZE)) {
955522698d7SSergey Senozhatsky 		atomic64_inc(&zram->stats.invalid_io);
956522698d7SSergey Senozhatsky 		err = -EINVAL;
957522698d7SSergey Senozhatsky 		goto put_zram;
958522698d7SSergey Senozhatsky 	}
959522698d7SSergey Senozhatsky 
960522698d7SSergey Senozhatsky 	index = sector >> SECTORS_PER_PAGE_SHIFT;
961522698d7SSergey Senozhatsky 	offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
962522698d7SSergey Senozhatsky 
963522698d7SSergey Senozhatsky 	bv.bv_page = page;
964522698d7SSergey Senozhatsky 	bv.bv_len = PAGE_SIZE;
965522698d7SSergey Senozhatsky 	bv.bv_offset = 0;
966522698d7SSergey Senozhatsky 
967522698d7SSergey Senozhatsky 	err = zram_bvec_rw(zram, &bv, index, offset, rw);
968522698d7SSergey Senozhatsky put_zram:
969522698d7SSergey Senozhatsky 	zram_meta_put(zram);
970522698d7SSergey Senozhatsky out:
971522698d7SSergey Senozhatsky 	/*
972522698d7SSergey Senozhatsky 	 * If I/O fails, just return error(ie, non-zero) without
973522698d7SSergey Senozhatsky 	 * calling page_endio.
974522698d7SSergey Senozhatsky 	 * It causes resubmit the I/O with bio request by upper functions
975522698d7SSergey Senozhatsky 	 * of rw_page(e.g., swap_readpage, __swap_writepage) and
976522698d7SSergey Senozhatsky 	 * bio->bi_end_io does things to handle the error
977522698d7SSergey Senozhatsky 	 * (e.g., SetPageError, set_page_dirty and extra works).
978522698d7SSergey Senozhatsky 	 */
979522698d7SSergey Senozhatsky 	if (err == 0)
980522698d7SSergey Senozhatsky 		page_endio(page, rw, 0);
981522698d7SSergey Senozhatsky 	return err;
982522698d7SSergey Senozhatsky }
983522698d7SSergey Senozhatsky 
984ba6b17d6SSergey Senozhatsky static void zram_reset_device(struct zram *zram)
985cd67e10aSMinchan Kim {
98608eee69fSMinchan Kim 	struct zram_meta *meta;
98708eee69fSMinchan Kim 	struct zcomp *comp;
98808eee69fSMinchan Kim 	u64 disksize;
98908eee69fSMinchan Kim 
990cd67e10aSMinchan Kim 	down_write(&zram->init_lock);
9919ada9da9SMinchan Kim 
9929ada9da9SMinchan Kim 	zram->limit_pages = 0;
9939ada9da9SMinchan Kim 
994be2d1d56SSergey Senozhatsky 	if (!init_done(zram)) {
995cd67e10aSMinchan Kim 		up_write(&zram->init_lock);
996cd67e10aSMinchan Kim 		return;
997cd67e10aSMinchan Kim 	}
998cd67e10aSMinchan Kim 
99908eee69fSMinchan Kim 	meta = zram->meta;
100008eee69fSMinchan Kim 	comp = zram->comp;
100108eee69fSMinchan Kim 	disksize = zram->disksize;
100208eee69fSMinchan Kim 	/*
100308eee69fSMinchan Kim 	 * Refcount will go down to 0 eventually and r/w handler
100408eee69fSMinchan Kim 	 * cannot handle further I/O so it will bail out by
100508eee69fSMinchan Kim 	 * check zram_meta_get.
100608eee69fSMinchan Kim 	 */
100708eee69fSMinchan Kim 	zram_meta_put(zram);
100808eee69fSMinchan Kim 	/*
100908eee69fSMinchan Kim 	 * We want to free zram_meta in process context to avoid
101008eee69fSMinchan Kim 	 * deadlock between reclaim path and any other locks.
101108eee69fSMinchan Kim 	 */
101208eee69fSMinchan Kim 	wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
101308eee69fSMinchan Kim 
1014cd67e10aSMinchan Kim 	/* Reset stats */
1015cd67e10aSMinchan Kim 	memset(&zram->stats, 0, sizeof(zram->stats));
1016cd67e10aSMinchan Kim 	zram->disksize = 0;
1017d7ad41a1SWeijie Yang 
1018a096cafcSSergey Senozhatsky 	set_capacity(zram->disk, 0);
1019d7ad41a1SWeijie Yang 	part_stat_set_all(&zram->disk->part0, 0);
1020a096cafcSSergey Senozhatsky 
1021cd67e10aSMinchan Kim 	up_write(&zram->init_lock);
102208eee69fSMinchan Kim 	/* I/O operation under all of CPU are done so let's free */
102308eee69fSMinchan Kim 	zram_meta_free(meta, disksize);
102408eee69fSMinchan Kim 	zcomp_destroy(comp);
1025cd67e10aSMinchan Kim }
1026cd67e10aSMinchan Kim 
1027cd67e10aSMinchan Kim static ssize_t disksize_store(struct device *dev,
1028cd67e10aSMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
1029cd67e10aSMinchan Kim {
1030cd67e10aSMinchan Kim 	u64 disksize;
1031d61f98c7SSergey Senozhatsky 	struct zcomp *comp;
1032cd67e10aSMinchan Kim 	struct zram_meta *meta;
1033cd67e10aSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
1034fcfa8d95SSergey Senozhatsky 	int err;
1035cd67e10aSMinchan Kim 
1036cd67e10aSMinchan Kim 	disksize = memparse(buf, NULL);
1037cd67e10aSMinchan Kim 	if (!disksize)
1038cd67e10aSMinchan Kim 		return -EINVAL;
1039cd67e10aSMinchan Kim 
1040cd67e10aSMinchan Kim 	disksize = PAGE_ALIGN(disksize);
10414ce321f5SSergey Senozhatsky 	meta = zram_meta_alloc(zram->disk->disk_name, disksize);
1042db5d711eSMinchan Kim 	if (!meta)
1043db5d711eSMinchan Kim 		return -ENOMEM;
1044b67d1ec1SSergey Senozhatsky 
1045da9556a2SSergey Senozhatsky 	comp = zcomp_create(zram->compressor);
1046fcfa8d95SSergey Senozhatsky 	if (IS_ERR(comp)) {
104770864969SSergey Senozhatsky 		pr_err("Cannot initialise %s compressing backend\n",
1048e46b8a03SSergey Senozhatsky 				zram->compressor);
1049fcfa8d95SSergey Senozhatsky 		err = PTR_ERR(comp);
1050fcfa8d95SSergey Senozhatsky 		goto out_free_meta;
1051d61f98c7SSergey Senozhatsky 	}
1052d61f98c7SSergey Senozhatsky 
1053d61f98c7SSergey Senozhatsky 	down_write(&zram->init_lock);
1054d61f98c7SSergey Senozhatsky 	if (init_done(zram)) {
1055d61f98c7SSergey Senozhatsky 		pr_info("Cannot change disksize for initialized device\n");
1056d61f98c7SSergey Senozhatsky 		err = -EBUSY;
1057fcfa8d95SSergey Senozhatsky 		goto out_destroy_comp;
1058cd67e10aSMinchan Kim 	}
1059cd67e10aSMinchan Kim 
106008eee69fSMinchan Kim 	init_waitqueue_head(&zram->io_done);
106108eee69fSMinchan Kim 	atomic_set(&zram->refcount, 1);
1062b67d1ec1SSergey Senozhatsky 	zram->meta = meta;
1063d61f98c7SSergey Senozhatsky 	zram->comp = comp;
1064cd67e10aSMinchan Kim 	zram->disksize = disksize;
1065cd67e10aSMinchan Kim 	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1066cd67e10aSMinchan Kim 	up_write(&zram->init_lock);
1067b4c5c609SMinchan Kim 
1068b4c5c609SMinchan Kim 	/*
1069b4c5c609SMinchan Kim 	 * Revalidate disk out of the init_lock to avoid lockdep splat.
1070b4c5c609SMinchan Kim 	 * It's okay because disk's capacity is protected by init_lock
1071b4c5c609SMinchan Kim 	 * so that revalidate_disk always sees up-to-date capacity.
1072b4c5c609SMinchan Kim 	 */
1073b4c5c609SMinchan Kim 	revalidate_disk(zram->disk);
1074b4c5c609SMinchan Kim 
1075cd67e10aSMinchan Kim 	return len;
1076b7ca232eSSergey Senozhatsky 
1077fcfa8d95SSergey Senozhatsky out_destroy_comp:
1078fcfa8d95SSergey Senozhatsky 	up_write(&zram->init_lock);
1079d61f98c7SSergey Senozhatsky 	zcomp_destroy(comp);
1080fcfa8d95SSergey Senozhatsky out_free_meta:
10811fec1172SGanesh Mahendran 	zram_meta_free(meta, disksize);
1082b7ca232eSSergey Senozhatsky 	return err;
1083cd67e10aSMinchan Kim }
1084cd67e10aSMinchan Kim 
1085cd67e10aSMinchan Kim static ssize_t reset_store(struct device *dev,
1086cd67e10aSMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
1087cd67e10aSMinchan Kim {
1088cd67e10aSMinchan Kim 	int ret;
1089cd67e10aSMinchan Kim 	unsigned short do_reset;
1090cd67e10aSMinchan Kim 	struct zram *zram;
1091cd67e10aSMinchan Kim 	struct block_device *bdev;
1092cd67e10aSMinchan Kim 
1093f405c445SSergey Senozhatsky 	ret = kstrtou16(buf, 10, &do_reset);
1094f405c445SSergey Senozhatsky 	if (ret)
1095f405c445SSergey Senozhatsky 		return ret;
1096f405c445SSergey Senozhatsky 
1097f405c445SSergey Senozhatsky 	if (!do_reset)
1098f405c445SSergey Senozhatsky 		return -EINVAL;
1099f405c445SSergey Senozhatsky 
1100cd67e10aSMinchan Kim 	zram = dev_to_zram(dev);
1101cd67e10aSMinchan Kim 	bdev = bdget_disk(zram->disk, 0);
1102cd67e10aSMinchan Kim 	if (!bdev)
1103cd67e10aSMinchan Kim 		return -ENOMEM;
1104cd67e10aSMinchan Kim 
1105ba6b17d6SSergey Senozhatsky 	mutex_lock(&bdev->bd_mutex);
1106f405c445SSergey Senozhatsky 	/* Do not reset an active device or claimed device */
1107f405c445SSergey Senozhatsky 	if (bdev->bd_openers || zram->claim) {
1108f405c445SSergey Senozhatsky 		mutex_unlock(&bdev->bd_mutex);
1109f405c445SSergey Senozhatsky 		bdput(bdev);
1110f405c445SSergey Senozhatsky 		return -EBUSY;
1111cd67e10aSMinchan Kim 	}
1112cd67e10aSMinchan Kim 
1113f405c445SSergey Senozhatsky 	/* From now on, anyone can't open /dev/zram[0-9] */
1114f405c445SSergey Senozhatsky 	zram->claim = true;
1115f405c445SSergey Senozhatsky 	mutex_unlock(&bdev->bd_mutex);
1116cd67e10aSMinchan Kim 
1117f405c445SSergey Senozhatsky 	/* Make sure all the pending I/O are finished */
1118cd67e10aSMinchan Kim 	fsync_bdev(bdev);
1119ba6b17d6SSergey Senozhatsky 	zram_reset_device(zram);
1120ba6b17d6SSergey Senozhatsky 	revalidate_disk(zram->disk);
1121cd67e10aSMinchan Kim 	bdput(bdev);
1122cd67e10aSMinchan Kim 
1123f405c445SSergey Senozhatsky 	mutex_lock(&bdev->bd_mutex);
1124f405c445SSergey Senozhatsky 	zram->claim = false;
1125ba6b17d6SSergey Senozhatsky 	mutex_unlock(&bdev->bd_mutex);
1126f405c445SSergey Senozhatsky 
1127f405c445SSergey Senozhatsky 	return len;
1128f405c445SSergey Senozhatsky }
1129f405c445SSergey Senozhatsky 
1130f405c445SSergey Senozhatsky static int zram_open(struct block_device *bdev, fmode_t mode)
1131f405c445SSergey Senozhatsky {
1132f405c445SSergey Senozhatsky 	int ret = 0;
1133f405c445SSergey Senozhatsky 	struct zram *zram;
1134f405c445SSergey Senozhatsky 
1135f405c445SSergey Senozhatsky 	WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1136f405c445SSergey Senozhatsky 
1137f405c445SSergey Senozhatsky 	zram = bdev->bd_disk->private_data;
1138f405c445SSergey Senozhatsky 	/* zram was claimed to reset so open request fails */
1139f405c445SSergey Senozhatsky 	if (zram->claim)
1140f405c445SSergey Senozhatsky 		ret = -EBUSY;
1141f405c445SSergey Senozhatsky 
1142cd67e10aSMinchan Kim 	return ret;
1143cd67e10aSMinchan Kim }
1144cd67e10aSMinchan Kim 
1145cd67e10aSMinchan Kim static const struct block_device_operations zram_devops = {
1146f405c445SSergey Senozhatsky 	.open = zram_open,
1147cd67e10aSMinchan Kim 	.swap_slot_free_notify = zram_slot_free_notify,
11488c7f0102Skaram.lee 	.rw_page = zram_rw_page,
1149cd67e10aSMinchan Kim 	.owner = THIS_MODULE
1150cd67e10aSMinchan Kim };
1151cd67e10aSMinchan Kim 
115299ebbd30SAndrew Morton static DEVICE_ATTR_WO(compact);
1153083914eaSGanesh Mahendran static DEVICE_ATTR_RW(disksize);
1154083914eaSGanesh Mahendran static DEVICE_ATTR_RO(initstate);
1155083914eaSGanesh Mahendran static DEVICE_ATTR_WO(reset);
1156083914eaSGanesh Mahendran static DEVICE_ATTR_RO(orig_data_size);
1157083914eaSGanesh Mahendran static DEVICE_ATTR_RO(mem_used_total);
1158083914eaSGanesh Mahendran static DEVICE_ATTR_RW(mem_limit);
1159083914eaSGanesh Mahendran static DEVICE_ATTR_RW(mem_used_max);
1160083914eaSGanesh Mahendran static DEVICE_ATTR_RW(max_comp_streams);
1161083914eaSGanesh Mahendran static DEVICE_ATTR_RW(comp_algorithm);
1162cd67e10aSMinchan Kim 
1163cd67e10aSMinchan Kim static struct attribute *zram_disk_attrs[] = {
1164cd67e10aSMinchan Kim 	&dev_attr_disksize.attr,
1165cd67e10aSMinchan Kim 	&dev_attr_initstate.attr,
1166cd67e10aSMinchan Kim 	&dev_attr_reset.attr,
1167cd67e10aSMinchan Kim 	&dev_attr_num_reads.attr,
1168cd67e10aSMinchan Kim 	&dev_attr_num_writes.attr,
116964447249SSergey Senozhatsky 	&dev_attr_failed_reads.attr,
117064447249SSergey Senozhatsky 	&dev_attr_failed_writes.attr,
117199ebbd30SAndrew Morton 	&dev_attr_compact.attr,
1172cd67e10aSMinchan Kim 	&dev_attr_invalid_io.attr,
1173cd67e10aSMinchan Kim 	&dev_attr_notify_free.attr,
1174cd67e10aSMinchan Kim 	&dev_attr_zero_pages.attr,
1175cd67e10aSMinchan Kim 	&dev_attr_orig_data_size.attr,
1176cd67e10aSMinchan Kim 	&dev_attr_compr_data_size.attr,
1177cd67e10aSMinchan Kim 	&dev_attr_mem_used_total.attr,
11789ada9da9SMinchan Kim 	&dev_attr_mem_limit.attr,
1179461a8eeeSMinchan Kim 	&dev_attr_mem_used_max.attr,
1180beca3ec7SSergey Senozhatsky 	&dev_attr_max_comp_streams.attr,
1181e46b8a03SSergey Senozhatsky 	&dev_attr_comp_algorithm.attr,
11822f6a3bedSSergey Senozhatsky 	&dev_attr_io_stat.attr,
11834f2109f6SSergey Senozhatsky 	&dev_attr_mm_stat.attr,
1184cd67e10aSMinchan Kim 	NULL,
1185cd67e10aSMinchan Kim };
1186cd67e10aSMinchan Kim 
1187cd67e10aSMinchan Kim static struct attribute_group zram_disk_attr_group = {
1188cd67e10aSMinchan Kim 	.attrs = zram_disk_attrs,
1189cd67e10aSMinchan Kim };
1190cd67e10aSMinchan Kim 
119192ff1528SSergey Senozhatsky /*
119292ff1528SSergey Senozhatsky  * Allocate and initialize new zram device. the function returns
119392ff1528SSergey Senozhatsky  * '>= 0' device_id upon success, and negative value otherwise.
119492ff1528SSergey Senozhatsky  */
119592ff1528SSergey Senozhatsky static int zram_add(void)
1196cd67e10aSMinchan Kim {
119785508ec6SSergey Senozhatsky 	struct zram *zram;
1198ee980160SSergey Senozhatsky 	struct request_queue *queue;
119992ff1528SSergey Senozhatsky 	int ret, device_id;
120085508ec6SSergey Senozhatsky 
120185508ec6SSergey Senozhatsky 	zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
120285508ec6SSergey Senozhatsky 	if (!zram)
120385508ec6SSergey Senozhatsky 		return -ENOMEM;
120485508ec6SSergey Senozhatsky 
120592ff1528SSergey Senozhatsky 	ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
120685508ec6SSergey Senozhatsky 	if (ret < 0)
120785508ec6SSergey Senozhatsky 		goto out_free_dev;
120892ff1528SSergey Senozhatsky 	device_id = ret;
1209cd67e10aSMinchan Kim 
1210cd67e10aSMinchan Kim 	init_rwsem(&zram->init_lock);
1211cd67e10aSMinchan Kim 
1212ee980160SSergey Senozhatsky 	queue = blk_alloc_queue(GFP_KERNEL);
1213ee980160SSergey Senozhatsky 	if (!queue) {
1214cd67e10aSMinchan Kim 		pr_err("Error allocating disk queue for device %d\n",
1215cd67e10aSMinchan Kim 			device_id);
121685508ec6SSergey Senozhatsky 		ret = -ENOMEM;
121785508ec6SSergey Senozhatsky 		goto out_free_idr;
1218cd67e10aSMinchan Kim 	}
1219cd67e10aSMinchan Kim 
1220ee980160SSergey Senozhatsky 	blk_queue_make_request(queue, zram_make_request);
1221cd67e10aSMinchan Kim 
1222cd67e10aSMinchan Kim 	/* gendisk structure */
1223cd67e10aSMinchan Kim 	zram->disk = alloc_disk(1);
1224cd67e10aSMinchan Kim 	if (!zram->disk) {
122570864969SSergey Senozhatsky 		pr_err("Error allocating disk structure for device %d\n",
1226cd67e10aSMinchan Kim 			device_id);
1227201c7b72SJulia Lawall 		ret = -ENOMEM;
1228cd67e10aSMinchan Kim 		goto out_free_queue;
1229cd67e10aSMinchan Kim 	}
1230cd67e10aSMinchan Kim 
1231cd67e10aSMinchan Kim 	zram->disk->major = zram_major;
1232cd67e10aSMinchan Kim 	zram->disk->first_minor = device_id;
1233cd67e10aSMinchan Kim 	zram->disk->fops = &zram_devops;
1234ee980160SSergey Senozhatsky 	zram->disk->queue = queue;
1235ee980160SSergey Senozhatsky 	zram->disk->queue->queuedata = zram;
1236cd67e10aSMinchan Kim 	zram->disk->private_data = zram;
1237cd67e10aSMinchan Kim 	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1238cd67e10aSMinchan Kim 
1239cd67e10aSMinchan Kim 	/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1240cd67e10aSMinchan Kim 	set_capacity(zram->disk, 0);
1241b67d1ec1SSergey Senozhatsky 	/* zram devices sort of resembles non-rotational disks */
1242b67d1ec1SSergey Senozhatsky 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1243b277da0aSMike Snitzer 	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1244cd67e10aSMinchan Kim 	/*
1245cd67e10aSMinchan Kim 	 * To ensure that we always get PAGE_SIZE aligned
1246cd67e10aSMinchan Kim 	 * and n*PAGE_SIZED sized I/O requests.
1247cd67e10aSMinchan Kim 	 */
1248cd67e10aSMinchan Kim 	blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1249cd67e10aSMinchan Kim 	blk_queue_logical_block_size(zram->disk->queue,
1250cd67e10aSMinchan Kim 					ZRAM_LOGICAL_BLOCK_SIZE);
1251cd67e10aSMinchan Kim 	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1252cd67e10aSMinchan Kim 	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1253f4659d8eSJoonsoo Kim 	zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
12542bb4cd5cSJens Axboe 	blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
1255f4659d8eSJoonsoo Kim 	/*
1256f4659d8eSJoonsoo Kim 	 * zram_bio_discard() will clear all logical blocks if logical block
1257f4659d8eSJoonsoo Kim 	 * size is identical with physical block size(PAGE_SIZE). But if it is
1258f4659d8eSJoonsoo Kim 	 * different, we will skip discarding some parts of logical blocks in
1259f4659d8eSJoonsoo Kim 	 * the part of the request range which isn't aligned to physical block
1260f4659d8eSJoonsoo Kim 	 * size.  So we can't ensure that all discarded logical blocks are
1261f4659d8eSJoonsoo Kim 	 * zeroed.
1262f4659d8eSJoonsoo Kim 	 */
1263f4659d8eSJoonsoo Kim 	if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1264f4659d8eSJoonsoo Kim 		zram->disk->queue->limits.discard_zeroes_data = 1;
1265f4659d8eSJoonsoo Kim 	else
1266f4659d8eSJoonsoo Kim 		zram->disk->queue->limits.discard_zeroes_data = 0;
1267f4659d8eSJoonsoo Kim 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1268cd67e10aSMinchan Kim 
1269cd67e10aSMinchan Kim 	add_disk(zram->disk);
1270cd67e10aSMinchan Kim 
1271cd67e10aSMinchan Kim 	ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1272cd67e10aSMinchan Kim 				&zram_disk_attr_group);
1273cd67e10aSMinchan Kim 	if (ret < 0) {
127470864969SSergey Senozhatsky 		pr_err("Error creating sysfs group for device %d\n",
127570864969SSergey Senozhatsky 				device_id);
1276cd67e10aSMinchan Kim 		goto out_free_disk;
1277cd67e10aSMinchan Kim 	}
1278e46b8a03SSergey Senozhatsky 	strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1279be2d1d56SSergey Senozhatsky 	zram->meta = NULL;
1280d12b63c9SSergey Senozhatsky 
1281d12b63c9SSergey Senozhatsky 	pr_info("Added device: %s\n", zram->disk->disk_name);
128292ff1528SSergey Senozhatsky 	return device_id;
1283cd67e10aSMinchan Kim 
1284cd67e10aSMinchan Kim out_free_disk:
1285cd67e10aSMinchan Kim 	del_gendisk(zram->disk);
1286cd67e10aSMinchan Kim 	put_disk(zram->disk);
1287cd67e10aSMinchan Kim out_free_queue:
1288ee980160SSergey Senozhatsky 	blk_cleanup_queue(queue);
128985508ec6SSergey Senozhatsky out_free_idr:
129085508ec6SSergey Senozhatsky 	idr_remove(&zram_index_idr, device_id);
129185508ec6SSergey Senozhatsky out_free_dev:
129285508ec6SSergey Senozhatsky 	kfree(zram);
1293cd67e10aSMinchan Kim 	return ret;
1294cd67e10aSMinchan Kim }
1295cd67e10aSMinchan Kim 
12966566d1a3SSergey Senozhatsky static int zram_remove(struct zram *zram)
1297cd67e10aSMinchan Kim {
12986566d1a3SSergey Senozhatsky 	struct block_device *bdev;
12996566d1a3SSergey Senozhatsky 
13006566d1a3SSergey Senozhatsky 	bdev = bdget_disk(zram->disk, 0);
13016566d1a3SSergey Senozhatsky 	if (!bdev)
13026566d1a3SSergey Senozhatsky 		return -ENOMEM;
13036566d1a3SSergey Senozhatsky 
13046566d1a3SSergey Senozhatsky 	mutex_lock(&bdev->bd_mutex);
13056566d1a3SSergey Senozhatsky 	if (bdev->bd_openers || zram->claim) {
13066566d1a3SSergey Senozhatsky 		mutex_unlock(&bdev->bd_mutex);
13076566d1a3SSergey Senozhatsky 		bdput(bdev);
13086566d1a3SSergey Senozhatsky 		return -EBUSY;
13096566d1a3SSergey Senozhatsky 	}
13106566d1a3SSergey Senozhatsky 
13116566d1a3SSergey Senozhatsky 	zram->claim = true;
13126566d1a3SSergey Senozhatsky 	mutex_unlock(&bdev->bd_mutex);
13136566d1a3SSergey Senozhatsky 
1314a096cafcSSergey Senozhatsky 	/*
1315a096cafcSSergey Senozhatsky 	 * Remove sysfs first, so no one will perform a disksize
13166566d1a3SSergey Senozhatsky 	 * store while we destroy the devices. This also helps during
13176566d1a3SSergey Senozhatsky 	 * hot_remove -- zram_reset_device() is the last holder of
13186566d1a3SSergey Senozhatsky 	 * ->init_lock, no later/concurrent disksize_store() or any
13196566d1a3SSergey Senozhatsky 	 * other sysfs handlers are possible.
1320a096cafcSSergey Senozhatsky 	 */
1321cd67e10aSMinchan Kim 	sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1322cd67e10aSMinchan Kim 			&zram_disk_attr_group);
1323cd67e10aSMinchan Kim 
13246566d1a3SSergey Senozhatsky 	/* Make sure all the pending I/O are finished */
13256566d1a3SSergey Senozhatsky 	fsync_bdev(bdev);
1326a096cafcSSergey Senozhatsky 	zram_reset_device(zram);
13276566d1a3SSergey Senozhatsky 	bdput(bdev);
13286566d1a3SSergey Senozhatsky 
13296566d1a3SSergey Senozhatsky 	pr_info("Removed device: %s\n", zram->disk->disk_name);
13306566d1a3SSergey Senozhatsky 
1331ee980160SSergey Senozhatsky 	blk_cleanup_queue(zram->disk->queue);
1332cd67e10aSMinchan Kim 	del_gendisk(zram->disk);
1333cd67e10aSMinchan Kim 	put_disk(zram->disk);
133485508ec6SSergey Senozhatsky 	kfree(zram);
13356566d1a3SSergey Senozhatsky 	return 0;
1336cd67e10aSMinchan Kim }
1337cd67e10aSMinchan Kim 
13386566d1a3SSergey Senozhatsky /* zram-control sysfs attributes */
13396566d1a3SSergey Senozhatsky static ssize_t hot_add_show(struct class *class,
13406566d1a3SSergey Senozhatsky 			struct class_attribute *attr,
13416566d1a3SSergey Senozhatsky 			char *buf)
13426566d1a3SSergey Senozhatsky {
13436566d1a3SSergey Senozhatsky 	int ret;
13446566d1a3SSergey Senozhatsky 
13456566d1a3SSergey Senozhatsky 	mutex_lock(&zram_index_mutex);
13466566d1a3SSergey Senozhatsky 	ret = zram_add();
13476566d1a3SSergey Senozhatsky 	mutex_unlock(&zram_index_mutex);
13486566d1a3SSergey Senozhatsky 
13496566d1a3SSergey Senozhatsky 	if (ret < 0)
13506566d1a3SSergey Senozhatsky 		return ret;
13516566d1a3SSergey Senozhatsky 	return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
13526566d1a3SSergey Senozhatsky }
13536566d1a3SSergey Senozhatsky 
13546566d1a3SSergey Senozhatsky static ssize_t hot_remove_store(struct class *class,
13556566d1a3SSergey Senozhatsky 			struct class_attribute *attr,
13566566d1a3SSergey Senozhatsky 			const char *buf,
13576566d1a3SSergey Senozhatsky 			size_t count)
13586566d1a3SSergey Senozhatsky {
13596566d1a3SSergey Senozhatsky 	struct zram *zram;
13606566d1a3SSergey Senozhatsky 	int ret, dev_id;
13616566d1a3SSergey Senozhatsky 
13626566d1a3SSergey Senozhatsky 	/* dev_id is gendisk->first_minor, which is `int' */
13636566d1a3SSergey Senozhatsky 	ret = kstrtoint(buf, 10, &dev_id);
13646566d1a3SSergey Senozhatsky 	if (ret)
13656566d1a3SSergey Senozhatsky 		return ret;
13666566d1a3SSergey Senozhatsky 	if (dev_id < 0)
13676566d1a3SSergey Senozhatsky 		return -EINVAL;
13686566d1a3SSergey Senozhatsky 
13696566d1a3SSergey Senozhatsky 	mutex_lock(&zram_index_mutex);
13706566d1a3SSergey Senozhatsky 
13716566d1a3SSergey Senozhatsky 	zram = idr_find(&zram_index_idr, dev_id);
137217ec4cd9SJerome Marchand 	if (zram) {
13736566d1a3SSergey Senozhatsky 		ret = zram_remove(zram);
137417ec4cd9SJerome Marchand 		idr_remove(&zram_index_idr, dev_id);
137517ec4cd9SJerome Marchand 	} else {
13766566d1a3SSergey Senozhatsky 		ret = -ENODEV;
137717ec4cd9SJerome Marchand 	}
13786566d1a3SSergey Senozhatsky 
13796566d1a3SSergey Senozhatsky 	mutex_unlock(&zram_index_mutex);
13806566d1a3SSergey Senozhatsky 	return ret ? ret : count;
13816566d1a3SSergey Senozhatsky }
13826566d1a3SSergey Senozhatsky 
13836566d1a3SSergey Senozhatsky static struct class_attribute zram_control_class_attrs[] = {
13846566d1a3SSergey Senozhatsky 	__ATTR_RO(hot_add),
13856566d1a3SSergey Senozhatsky 	__ATTR_WO(hot_remove),
13866566d1a3SSergey Senozhatsky 	__ATTR_NULL,
13876566d1a3SSergey Senozhatsky };
13886566d1a3SSergey Senozhatsky 
13896566d1a3SSergey Senozhatsky static struct class zram_control_class = {
13906566d1a3SSergey Senozhatsky 	.name		= "zram-control",
13916566d1a3SSergey Senozhatsky 	.owner		= THIS_MODULE,
13926566d1a3SSergey Senozhatsky 	.class_attrs	= zram_control_class_attrs,
13936566d1a3SSergey Senozhatsky };
13946566d1a3SSergey Senozhatsky 
139585508ec6SSergey Senozhatsky static int zram_remove_cb(int id, void *ptr, void *data)
139685508ec6SSergey Senozhatsky {
139785508ec6SSergey Senozhatsky 	zram_remove(ptr);
139885508ec6SSergey Senozhatsky 	return 0;
139985508ec6SSergey Senozhatsky }
140085508ec6SSergey Senozhatsky 
140185508ec6SSergey Senozhatsky static void destroy_devices(void)
140285508ec6SSergey Senozhatsky {
14036566d1a3SSergey Senozhatsky 	class_unregister(&zram_control_class);
140485508ec6SSergey Senozhatsky 	idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
140585508ec6SSergey Senozhatsky 	idr_destroy(&zram_index_idr);
1406a096cafcSSergey Senozhatsky 	unregister_blkdev(zram_major, "zram");
1407a096cafcSSergey Senozhatsky }
1408a096cafcSSergey Senozhatsky 
1409cd67e10aSMinchan Kim static int __init zram_init(void)
1410cd67e10aSMinchan Kim {
141192ff1528SSergey Senozhatsky 	int ret;
1412cd67e10aSMinchan Kim 
14136566d1a3SSergey Senozhatsky 	ret = class_register(&zram_control_class);
14146566d1a3SSergey Senozhatsky 	if (ret) {
141570864969SSergey Senozhatsky 		pr_err("Unable to register zram-control class\n");
14166566d1a3SSergey Senozhatsky 		return ret;
14176566d1a3SSergey Senozhatsky 	}
14186566d1a3SSergey Senozhatsky 
1419cd67e10aSMinchan Kim 	zram_major = register_blkdev(0, "zram");
1420cd67e10aSMinchan Kim 	if (zram_major <= 0) {
142170864969SSergey Senozhatsky 		pr_err("Unable to get major number\n");
14226566d1a3SSergey Senozhatsky 		class_unregister(&zram_control_class);
1423a096cafcSSergey Senozhatsky 		return -EBUSY;
1424cd67e10aSMinchan Kim 	}
1425cd67e10aSMinchan Kim 
142692ff1528SSergey Senozhatsky 	while (num_devices != 0) {
14276566d1a3SSergey Senozhatsky 		mutex_lock(&zram_index_mutex);
142892ff1528SSergey Senozhatsky 		ret = zram_add();
14296566d1a3SSergey Senozhatsky 		mutex_unlock(&zram_index_mutex);
143092ff1528SSergey Senozhatsky 		if (ret < 0)
1431a096cafcSSergey Senozhatsky 			goto out_error;
143292ff1528SSergey Senozhatsky 		num_devices--;
1433cd67e10aSMinchan Kim 	}
1434cd67e10aSMinchan Kim 
1435cd67e10aSMinchan Kim 	return 0;
1436cd67e10aSMinchan Kim 
1437a096cafcSSergey Senozhatsky out_error:
143885508ec6SSergey Senozhatsky 	destroy_devices();
1439cd67e10aSMinchan Kim 	return ret;
1440cd67e10aSMinchan Kim }
1441cd67e10aSMinchan Kim 
1442cd67e10aSMinchan Kim static void __exit zram_exit(void)
1443cd67e10aSMinchan Kim {
144485508ec6SSergey Senozhatsky 	destroy_devices();
1445cd67e10aSMinchan Kim }
1446cd67e10aSMinchan Kim 
1447cd67e10aSMinchan Kim module_init(zram_init);
1448cd67e10aSMinchan Kim module_exit(zram_exit);
1449cd67e10aSMinchan Kim 
1450cd67e10aSMinchan Kim module_param(num_devices, uint, 0);
1451c3cdb40eSSergey Senozhatsky MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
1452cd67e10aSMinchan Kim 
1453cd67e10aSMinchan Kim MODULE_LICENSE("Dual BSD/GPL");
1454cd67e10aSMinchan Kim MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1455cd67e10aSMinchan Kim MODULE_DESCRIPTION("Compressed RAM Block Device");
1456