xref: /openbmc/linux/drivers/block/zram/zram_drv.c (revision ae85a8075c5b025b9d503554ddc480a346a24536)
1cd67e10aSMinchan Kim /*
2cd67e10aSMinchan Kim  * Compressed RAM block device
3cd67e10aSMinchan Kim  *
4cd67e10aSMinchan Kim  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
57bfb3de8SMinchan Kim  *               2012, 2013 Minchan Kim
6cd67e10aSMinchan Kim  *
7cd67e10aSMinchan Kim  * This code is released using a dual license strategy: BSD/GPL
8cd67e10aSMinchan Kim  * You can choose the licence that better fits your requirements.
9cd67e10aSMinchan Kim  *
10cd67e10aSMinchan Kim  * Released under the terms of 3-clause BSD License
11cd67e10aSMinchan Kim  * Released under the terms of GNU General Public License Version 2.0
12cd67e10aSMinchan Kim  *
13cd67e10aSMinchan Kim  */
14cd67e10aSMinchan Kim 
15cd67e10aSMinchan Kim #define KMSG_COMPONENT "zram"
16cd67e10aSMinchan Kim #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17cd67e10aSMinchan Kim 
18cd67e10aSMinchan Kim #include <linux/module.h>
19cd67e10aSMinchan Kim #include <linux/kernel.h>
20cd67e10aSMinchan Kim #include <linux/bio.h>
21cd67e10aSMinchan Kim #include <linux/bitops.h>
22cd67e10aSMinchan Kim #include <linux/blkdev.h>
23cd67e10aSMinchan Kim #include <linux/buffer_head.h>
24cd67e10aSMinchan Kim #include <linux/device.h>
25cd67e10aSMinchan Kim #include <linux/genhd.h>
26cd67e10aSMinchan Kim #include <linux/highmem.h>
27cd67e10aSMinchan Kim #include <linux/slab.h>
28b09ab054SMinchan Kim #include <linux/backing-dev.h>
29cd67e10aSMinchan Kim #include <linux/string.h>
30cd67e10aSMinchan Kim #include <linux/vmalloc.h>
31fcfa8d95SSergey Senozhatsky #include <linux/err.h>
3285508ec6SSergey Senozhatsky #include <linux/idr.h>
336566d1a3SSergey Senozhatsky #include <linux/sysfs.h>
341dd6c834SAnna-Maria Gleixner #include <linux/cpuhotplug.h>
35cd67e10aSMinchan Kim 
36cd67e10aSMinchan Kim #include "zram_drv.h"
37cd67e10aSMinchan Kim 
3885508ec6SSergey Senozhatsky static DEFINE_IDR(zram_index_idr);
396566d1a3SSergey Senozhatsky /* idr index must be protected */
406566d1a3SSergey Senozhatsky static DEFINE_MUTEX(zram_index_mutex);
416566d1a3SSergey Senozhatsky 
42cd67e10aSMinchan Kim static int zram_major;
43b7ca232eSSergey Senozhatsky static const char *default_compressor = "lzo";
44cd67e10aSMinchan Kim 
45cd67e10aSMinchan Kim /* Module params (documentation at end) */
46cd67e10aSMinchan Kim static unsigned int num_devices = 1;
47cd67e10aSMinchan Kim 
481f7319c7SMinchan Kim static void zram_free_page(struct zram *zram, size_t index);
491f7319c7SMinchan Kim 
5008eee69fSMinchan Kim static inline bool init_done(struct zram *zram)
51be2d1d56SSergey Senozhatsky {
5208eee69fSMinchan Kim 	return zram->disksize;
53be2d1d56SSergey Senozhatsky }
54be2d1d56SSergey Senozhatsky 
55cd67e10aSMinchan Kim static inline struct zram *dev_to_zram(struct device *dev)
56cd67e10aSMinchan Kim {
57cd67e10aSMinchan Kim 	return (struct zram *)dev_to_disk(dev)->private_data;
58cd67e10aSMinchan Kim }
59cd67e10aSMinchan Kim 
60643ae61dSMinchan Kim static unsigned long zram_get_handle(struct zram *zram, u32 index)
61643ae61dSMinchan Kim {
62643ae61dSMinchan Kim 	return zram->table[index].handle;
63643ae61dSMinchan Kim }
64643ae61dSMinchan Kim 
65643ae61dSMinchan Kim static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
66643ae61dSMinchan Kim {
67643ae61dSMinchan Kim 	zram->table[index].handle = handle;
68643ae61dSMinchan Kim }
69643ae61dSMinchan Kim 
70b31177f2SSergey Senozhatsky /* flag operations require table entry bit_spin_lock() being held */
71beb6602cSMinchan Kim static int zram_test_flag(struct zram *zram, u32 index,
72522698d7SSergey Senozhatsky 			enum zram_pageflags flag)
7399ebbd30SAndrew Morton {
74beb6602cSMinchan Kim 	return zram->table[index].value & BIT(flag);
7599ebbd30SAndrew Morton }
7699ebbd30SAndrew Morton 
77beb6602cSMinchan Kim static void zram_set_flag(struct zram *zram, u32 index,
78522698d7SSergey Senozhatsky 			enum zram_pageflags flag)
79522698d7SSergey Senozhatsky {
80beb6602cSMinchan Kim 	zram->table[index].value |= BIT(flag);
8199ebbd30SAndrew Morton }
8299ebbd30SAndrew Morton 
83beb6602cSMinchan Kim static void zram_clear_flag(struct zram *zram, u32 index,
84522698d7SSergey Senozhatsky 			enum zram_pageflags flag)
85cd67e10aSMinchan Kim {
86beb6602cSMinchan Kim 	zram->table[index].value &= ~BIT(flag);
87522698d7SSergey Senozhatsky }
88cd67e10aSMinchan Kim 
89beb6602cSMinchan Kim static inline void zram_set_element(struct zram *zram, u32 index,
908e19d540Szhouxianrong 			unsigned long element)
918e19d540Szhouxianrong {
92beb6602cSMinchan Kim 	zram->table[index].element = element;
938e19d540Szhouxianrong }
948e19d540Szhouxianrong 
95643ae61dSMinchan Kim static unsigned long zram_get_element(struct zram *zram, u32 index)
968e19d540Szhouxianrong {
97643ae61dSMinchan Kim 	return zram->table[index].element;
988e19d540Szhouxianrong }
998e19d540Szhouxianrong 
100beb6602cSMinchan Kim static size_t zram_get_obj_size(struct zram *zram, u32 index)
101522698d7SSergey Senozhatsky {
102beb6602cSMinchan Kim 	return zram->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
103522698d7SSergey Senozhatsky }
104522698d7SSergey Senozhatsky 
105beb6602cSMinchan Kim static void zram_set_obj_size(struct zram *zram,
106522698d7SSergey Senozhatsky 					u32 index, size_t size)
107522698d7SSergey Senozhatsky {
108beb6602cSMinchan Kim 	unsigned long flags = zram->table[index].value >> ZRAM_FLAG_SHIFT;
109522698d7SSergey Senozhatsky 
110beb6602cSMinchan Kim 	zram->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
111522698d7SSergey Senozhatsky }
112522698d7SSergey Senozhatsky 
1131f7319c7SMinchan Kim #if PAGE_SIZE != 4096
1141c53e0d2SGeliang Tang static inline bool is_partial_io(struct bio_vec *bvec)
115522698d7SSergey Senozhatsky {
116522698d7SSergey Senozhatsky 	return bvec->bv_len != PAGE_SIZE;
117522698d7SSergey Senozhatsky }
1181f7319c7SMinchan Kim #else
1191f7319c7SMinchan Kim static inline bool is_partial_io(struct bio_vec *bvec)
1201f7319c7SMinchan Kim {
1211f7319c7SMinchan Kim 	return false;
1221f7319c7SMinchan Kim }
1231f7319c7SMinchan Kim #endif
124522698d7SSergey Senozhatsky 
125b09ab054SMinchan Kim static void zram_revalidate_disk(struct zram *zram)
126b09ab054SMinchan Kim {
127b09ab054SMinchan Kim 	revalidate_disk(zram->disk);
128b09ab054SMinchan Kim 	/* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
129e1735496SJens Axboe 	zram->disk->queue->backing_dev_info->capabilities |=
130b09ab054SMinchan Kim 		BDI_CAP_STABLE_WRITES;
131b09ab054SMinchan Kim }
132b09ab054SMinchan Kim 
133522698d7SSergey Senozhatsky /*
134522698d7SSergey Senozhatsky  * Check if request is within bounds and aligned on zram logical blocks.
135522698d7SSergey Senozhatsky  */
1361c53e0d2SGeliang Tang static inline bool valid_io_request(struct zram *zram,
137522698d7SSergey Senozhatsky 		sector_t start, unsigned int size)
138522698d7SSergey Senozhatsky {
139522698d7SSergey Senozhatsky 	u64 end, bound;
140522698d7SSergey Senozhatsky 
141522698d7SSergey Senozhatsky 	/* unaligned request */
142522698d7SSergey Senozhatsky 	if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
1431c53e0d2SGeliang Tang 		return false;
144522698d7SSergey Senozhatsky 	if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
1451c53e0d2SGeliang Tang 		return false;
146522698d7SSergey Senozhatsky 
147522698d7SSergey Senozhatsky 	end = start + (size >> SECTOR_SHIFT);
148522698d7SSergey Senozhatsky 	bound = zram->disksize >> SECTOR_SHIFT;
149522698d7SSergey Senozhatsky 	/* out of range range */
150522698d7SSergey Senozhatsky 	if (unlikely(start >= bound || end > bound || start > end))
1511c53e0d2SGeliang Tang 		return false;
152522698d7SSergey Senozhatsky 
153522698d7SSergey Senozhatsky 	/* I/O request is valid */
1541c53e0d2SGeliang Tang 	return true;
155522698d7SSergey Senozhatsky }
156522698d7SSergey Senozhatsky 
157522698d7SSergey Senozhatsky static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
158522698d7SSergey Senozhatsky {
159e86942c7SMinchan Kim 	*index  += (*offset + bvec->bv_len) / PAGE_SIZE;
160522698d7SSergey Senozhatsky 	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
161522698d7SSergey Senozhatsky }
162522698d7SSergey Senozhatsky 
163522698d7SSergey Senozhatsky static inline void update_used_max(struct zram *zram,
164522698d7SSergey Senozhatsky 					const unsigned long pages)
165522698d7SSergey Senozhatsky {
166522698d7SSergey Senozhatsky 	unsigned long old_max, cur_max;
167522698d7SSergey Senozhatsky 
168522698d7SSergey Senozhatsky 	old_max = atomic_long_read(&zram->stats.max_used_pages);
169522698d7SSergey Senozhatsky 
170522698d7SSergey Senozhatsky 	do {
171522698d7SSergey Senozhatsky 		cur_max = old_max;
172522698d7SSergey Senozhatsky 		if (pages > cur_max)
173522698d7SSergey Senozhatsky 			old_max = atomic_long_cmpxchg(
174522698d7SSergey Senozhatsky 				&zram->stats.max_used_pages, cur_max, pages);
175522698d7SSergey Senozhatsky 	} while (old_max != cur_max);
176522698d7SSergey Senozhatsky }
177522698d7SSergey Senozhatsky 
1788e19d540Szhouxianrong static inline void zram_fill_page(char *ptr, unsigned long len,
1798e19d540Szhouxianrong 					unsigned long value)
1808e19d540Szhouxianrong {
1818e19d540Szhouxianrong 	int i;
1828e19d540Szhouxianrong 	unsigned long *page = (unsigned long *)ptr;
1838e19d540Szhouxianrong 
1848e19d540Szhouxianrong 	WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
1858e19d540Szhouxianrong 
1868e19d540Szhouxianrong 	if (likely(value == 0)) {
1878e19d540Szhouxianrong 		memset(ptr, 0, len);
1888e19d540Szhouxianrong 	} else {
1898e19d540Szhouxianrong 		for (i = 0; i < len / sizeof(*page); i++)
1908e19d540Szhouxianrong 			page[i] = value;
1918e19d540Szhouxianrong 	}
1928e19d540Szhouxianrong }
1938e19d540Szhouxianrong 
1948e19d540Szhouxianrong static bool page_same_filled(void *ptr, unsigned long *element)
195522698d7SSergey Senozhatsky {
196522698d7SSergey Senozhatsky 	unsigned int pos;
197522698d7SSergey Senozhatsky 	unsigned long *page;
198f0fe9984SSangwoo Park 	unsigned long val;
199522698d7SSergey Senozhatsky 
200522698d7SSergey Senozhatsky 	page = (unsigned long *)ptr;
201f0fe9984SSangwoo Park 	val = page[0];
202522698d7SSergey Senozhatsky 
203f0fe9984SSangwoo Park 	for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
204f0fe9984SSangwoo Park 		if (val != page[pos])
2051c53e0d2SGeliang Tang 			return false;
206522698d7SSergey Senozhatsky 	}
207522698d7SSergey Senozhatsky 
208f0fe9984SSangwoo Park 	*element = val;
2098e19d540Szhouxianrong 
2101c53e0d2SGeliang Tang 	return true;
211522698d7SSergey Senozhatsky }
212522698d7SSergey Senozhatsky 
213cd67e10aSMinchan Kim static ssize_t initstate_show(struct device *dev,
214cd67e10aSMinchan Kim 		struct device_attribute *attr, char *buf)
215cd67e10aSMinchan Kim {
216a68eb3b6SSergey Senozhatsky 	u32 val;
217cd67e10aSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
218cd67e10aSMinchan Kim 
219a68eb3b6SSergey Senozhatsky 	down_read(&zram->init_lock);
220a68eb3b6SSergey Senozhatsky 	val = init_done(zram);
221a68eb3b6SSergey Senozhatsky 	up_read(&zram->init_lock);
222cd67e10aSMinchan Kim 
22356b4e8cbSSergey Senozhatsky 	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
224cd67e10aSMinchan Kim }
225cd67e10aSMinchan Kim 
226522698d7SSergey Senozhatsky static ssize_t disksize_show(struct device *dev,
227522698d7SSergey Senozhatsky 		struct device_attribute *attr, char *buf)
228522698d7SSergey Senozhatsky {
229522698d7SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
230522698d7SSergey Senozhatsky 
231522698d7SSergey Senozhatsky 	return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
232522698d7SSergey Senozhatsky }
233522698d7SSergey Senozhatsky 
2349ada9da9SMinchan Kim static ssize_t mem_limit_store(struct device *dev,
2359ada9da9SMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
2369ada9da9SMinchan Kim {
2379ada9da9SMinchan Kim 	u64 limit;
2389ada9da9SMinchan Kim 	char *tmp;
2399ada9da9SMinchan Kim 	struct zram *zram = dev_to_zram(dev);
2409ada9da9SMinchan Kim 
2419ada9da9SMinchan Kim 	limit = memparse(buf, &tmp);
2429ada9da9SMinchan Kim 	if (buf == tmp) /* no chars parsed, invalid input */
2439ada9da9SMinchan Kim 		return -EINVAL;
2449ada9da9SMinchan Kim 
2459ada9da9SMinchan Kim 	down_write(&zram->init_lock);
2469ada9da9SMinchan Kim 	zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
2479ada9da9SMinchan Kim 	up_write(&zram->init_lock);
2489ada9da9SMinchan Kim 
2499ada9da9SMinchan Kim 	return len;
2509ada9da9SMinchan Kim }
2519ada9da9SMinchan Kim 
252461a8eeeSMinchan Kim static ssize_t mem_used_max_store(struct device *dev,
253461a8eeeSMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
254461a8eeeSMinchan Kim {
255461a8eeeSMinchan Kim 	int err;
256461a8eeeSMinchan Kim 	unsigned long val;
257461a8eeeSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
258461a8eeeSMinchan Kim 
259461a8eeeSMinchan Kim 	err = kstrtoul(buf, 10, &val);
260461a8eeeSMinchan Kim 	if (err || val != 0)
261461a8eeeSMinchan Kim 		return -EINVAL;
262461a8eeeSMinchan Kim 
263461a8eeeSMinchan Kim 	down_read(&zram->init_lock);
2645a99e95bSWeijie Yang 	if (init_done(zram)) {
265461a8eeeSMinchan Kim 		atomic_long_set(&zram->stats.max_used_pages,
266beb6602cSMinchan Kim 				zs_get_total_pages(zram->mem_pool));
2675a99e95bSWeijie Yang 	}
268461a8eeeSMinchan Kim 	up_read(&zram->init_lock);
269461a8eeeSMinchan Kim 
270461a8eeeSMinchan Kim 	return len;
271461a8eeeSMinchan Kim }
272461a8eeeSMinchan Kim 
273013bf95aSMinchan Kim #ifdef CONFIG_ZRAM_WRITEBACK
274013bf95aSMinchan Kim static bool zram_wb_enabled(struct zram *zram)
275013bf95aSMinchan Kim {
276013bf95aSMinchan Kim 	return zram->backing_dev;
277013bf95aSMinchan Kim }
278013bf95aSMinchan Kim 
279013bf95aSMinchan Kim static void reset_bdev(struct zram *zram)
280013bf95aSMinchan Kim {
281013bf95aSMinchan Kim 	struct block_device *bdev;
282013bf95aSMinchan Kim 
283013bf95aSMinchan Kim 	if (!zram_wb_enabled(zram))
284013bf95aSMinchan Kim 		return;
285013bf95aSMinchan Kim 
286013bf95aSMinchan Kim 	bdev = zram->bdev;
287013bf95aSMinchan Kim 	if (zram->old_block_size)
288013bf95aSMinchan Kim 		set_blocksize(bdev, zram->old_block_size);
289013bf95aSMinchan Kim 	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
290013bf95aSMinchan Kim 	/* hope filp_close flush all of IO */
291013bf95aSMinchan Kim 	filp_close(zram->backing_dev, NULL);
292013bf95aSMinchan Kim 	zram->backing_dev = NULL;
293013bf95aSMinchan Kim 	zram->old_block_size = 0;
294013bf95aSMinchan Kim 	zram->bdev = NULL;
2951363d466SMinchan Kim 
2961363d466SMinchan Kim 	kvfree(zram->bitmap);
2971363d466SMinchan Kim 	zram->bitmap = NULL;
298013bf95aSMinchan Kim }
299013bf95aSMinchan Kim 
300013bf95aSMinchan Kim static ssize_t backing_dev_show(struct device *dev,
301013bf95aSMinchan Kim 		struct device_attribute *attr, char *buf)
302013bf95aSMinchan Kim {
303013bf95aSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
304013bf95aSMinchan Kim 	struct file *file = zram->backing_dev;
305013bf95aSMinchan Kim 	char *p;
306013bf95aSMinchan Kim 	ssize_t ret;
307013bf95aSMinchan Kim 
308013bf95aSMinchan Kim 	down_read(&zram->init_lock);
309013bf95aSMinchan Kim 	if (!zram_wb_enabled(zram)) {
310013bf95aSMinchan Kim 		memcpy(buf, "none\n", 5);
311013bf95aSMinchan Kim 		up_read(&zram->init_lock);
312013bf95aSMinchan Kim 		return 5;
313013bf95aSMinchan Kim 	}
314013bf95aSMinchan Kim 
315013bf95aSMinchan Kim 	p = file_path(file, buf, PAGE_SIZE - 1);
316013bf95aSMinchan Kim 	if (IS_ERR(p)) {
317013bf95aSMinchan Kim 		ret = PTR_ERR(p);
318013bf95aSMinchan Kim 		goto out;
319013bf95aSMinchan Kim 	}
320013bf95aSMinchan Kim 
321013bf95aSMinchan Kim 	ret = strlen(p);
322013bf95aSMinchan Kim 	memmove(buf, p, ret);
323013bf95aSMinchan Kim 	buf[ret++] = '\n';
324013bf95aSMinchan Kim out:
325013bf95aSMinchan Kim 	up_read(&zram->init_lock);
326013bf95aSMinchan Kim 	return ret;
327013bf95aSMinchan Kim }
328013bf95aSMinchan Kim 
329013bf95aSMinchan Kim static ssize_t backing_dev_store(struct device *dev,
330013bf95aSMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
331013bf95aSMinchan Kim {
332013bf95aSMinchan Kim 	char *file_name;
333013bf95aSMinchan Kim 	struct file *backing_dev = NULL;
334013bf95aSMinchan Kim 	struct inode *inode;
335013bf95aSMinchan Kim 	struct address_space *mapping;
3361363d466SMinchan Kim 	unsigned int bitmap_sz, old_block_size = 0;
3371363d466SMinchan Kim 	unsigned long nr_pages, *bitmap = NULL;
338013bf95aSMinchan Kim 	struct block_device *bdev = NULL;
339013bf95aSMinchan Kim 	int err;
340013bf95aSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
341013bf95aSMinchan Kim 
342013bf95aSMinchan Kim 	file_name = kmalloc(PATH_MAX, GFP_KERNEL);
343013bf95aSMinchan Kim 	if (!file_name)
344013bf95aSMinchan Kim 		return -ENOMEM;
345013bf95aSMinchan Kim 
346013bf95aSMinchan Kim 	down_write(&zram->init_lock);
347013bf95aSMinchan Kim 	if (init_done(zram)) {
348013bf95aSMinchan Kim 		pr_info("Can't setup backing device for initialized device\n");
349013bf95aSMinchan Kim 		err = -EBUSY;
350013bf95aSMinchan Kim 		goto out;
351013bf95aSMinchan Kim 	}
352013bf95aSMinchan Kim 
353013bf95aSMinchan Kim 	strlcpy(file_name, buf, len);
354013bf95aSMinchan Kim 
355013bf95aSMinchan Kim 	backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
356013bf95aSMinchan Kim 	if (IS_ERR(backing_dev)) {
357013bf95aSMinchan Kim 		err = PTR_ERR(backing_dev);
358013bf95aSMinchan Kim 		backing_dev = NULL;
359013bf95aSMinchan Kim 		goto out;
360013bf95aSMinchan Kim 	}
361013bf95aSMinchan Kim 
362013bf95aSMinchan Kim 	mapping = backing_dev->f_mapping;
363013bf95aSMinchan Kim 	inode = mapping->host;
364013bf95aSMinchan Kim 
365013bf95aSMinchan Kim 	/* Support only block device in this moment */
366013bf95aSMinchan Kim 	if (!S_ISBLK(inode->i_mode)) {
367013bf95aSMinchan Kim 		err = -ENOTBLK;
368013bf95aSMinchan Kim 		goto out;
369013bf95aSMinchan Kim 	}
370013bf95aSMinchan Kim 
371013bf95aSMinchan Kim 	bdev = bdgrab(I_BDEV(inode));
372013bf95aSMinchan Kim 	err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
373013bf95aSMinchan Kim 	if (err < 0)
374013bf95aSMinchan Kim 		goto out;
375013bf95aSMinchan Kim 
3761363d466SMinchan Kim 	nr_pages = i_size_read(inode) >> PAGE_SHIFT;
3771363d466SMinchan Kim 	bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
3781363d466SMinchan Kim 	bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
3791363d466SMinchan Kim 	if (!bitmap) {
3801363d466SMinchan Kim 		err = -ENOMEM;
3811363d466SMinchan Kim 		goto out;
3821363d466SMinchan Kim 	}
3831363d466SMinchan Kim 
384013bf95aSMinchan Kim 	old_block_size = block_size(bdev);
385013bf95aSMinchan Kim 	err = set_blocksize(bdev, PAGE_SIZE);
386013bf95aSMinchan Kim 	if (err)
387013bf95aSMinchan Kim 		goto out;
388013bf95aSMinchan Kim 
389013bf95aSMinchan Kim 	reset_bdev(zram);
3901363d466SMinchan Kim 	spin_lock_init(&zram->bitmap_lock);
391013bf95aSMinchan Kim 
392013bf95aSMinchan Kim 	zram->old_block_size = old_block_size;
393013bf95aSMinchan Kim 	zram->bdev = bdev;
394013bf95aSMinchan Kim 	zram->backing_dev = backing_dev;
3951363d466SMinchan Kim 	zram->bitmap = bitmap;
3961363d466SMinchan Kim 	zram->nr_pages = nr_pages;
397013bf95aSMinchan Kim 	up_write(&zram->init_lock);
398013bf95aSMinchan Kim 
399013bf95aSMinchan Kim 	pr_info("setup backing device %s\n", file_name);
400013bf95aSMinchan Kim 	kfree(file_name);
401013bf95aSMinchan Kim 
402013bf95aSMinchan Kim 	return len;
403013bf95aSMinchan Kim out:
4041363d466SMinchan Kim 	if (bitmap)
4051363d466SMinchan Kim 		kvfree(bitmap);
4061363d466SMinchan Kim 
407013bf95aSMinchan Kim 	if (bdev)
408013bf95aSMinchan Kim 		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
409013bf95aSMinchan Kim 
410013bf95aSMinchan Kim 	if (backing_dev)
411013bf95aSMinchan Kim 		filp_close(backing_dev, NULL);
412013bf95aSMinchan Kim 
413013bf95aSMinchan Kim 	up_write(&zram->init_lock);
414013bf95aSMinchan Kim 
415013bf95aSMinchan Kim 	kfree(file_name);
416013bf95aSMinchan Kim 
417013bf95aSMinchan Kim 	return err;
418013bf95aSMinchan Kim }
419013bf95aSMinchan Kim 
4201363d466SMinchan Kim static unsigned long get_entry_bdev(struct zram *zram)
4211363d466SMinchan Kim {
4221363d466SMinchan Kim 	unsigned long entry;
4231363d466SMinchan Kim 
4241363d466SMinchan Kim 	spin_lock(&zram->bitmap_lock);
4251363d466SMinchan Kim 	/* skip 0 bit to confuse zram.handle = 0 */
4261363d466SMinchan Kim 	entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1);
4271363d466SMinchan Kim 	if (entry == zram->nr_pages) {
4281363d466SMinchan Kim 		spin_unlock(&zram->bitmap_lock);
4291363d466SMinchan Kim 		return 0;
4301363d466SMinchan Kim 	}
4311363d466SMinchan Kim 
4321363d466SMinchan Kim 	set_bit(entry, zram->bitmap);
4331363d466SMinchan Kim 	spin_unlock(&zram->bitmap_lock);
4341363d466SMinchan Kim 
4351363d466SMinchan Kim 	return entry;
4361363d466SMinchan Kim }
4371363d466SMinchan Kim 
4381363d466SMinchan Kim static void put_entry_bdev(struct zram *zram, unsigned long entry)
4391363d466SMinchan Kim {
4401363d466SMinchan Kim 	int was_set;
4411363d466SMinchan Kim 
4421363d466SMinchan Kim 	spin_lock(&zram->bitmap_lock);
4431363d466SMinchan Kim 	was_set = test_and_clear_bit(entry, zram->bitmap);
4441363d466SMinchan Kim 	spin_unlock(&zram->bitmap_lock);
4451363d466SMinchan Kim 	WARN_ON_ONCE(!was_set);
4461363d466SMinchan Kim }
4471363d466SMinchan Kim 
448013bf95aSMinchan Kim #else
449013bf95aSMinchan Kim static bool zram_wb_enabled(struct zram *zram) { return false; }
450013bf95aSMinchan Kim static inline void reset_bdev(struct zram *zram) {};
451013bf95aSMinchan Kim #endif
452013bf95aSMinchan Kim 
453013bf95aSMinchan Kim 
45443209ea2SSergey Senozhatsky /*
45543209ea2SSergey Senozhatsky  * We switched to per-cpu streams and this attr is not needed anymore.
45643209ea2SSergey Senozhatsky  * However, we will keep it around for some time, because:
45743209ea2SSergey Senozhatsky  * a) we may revert per-cpu streams in the future
45843209ea2SSergey Senozhatsky  * b) it's visible to user space and we need to follow our 2 years
45943209ea2SSergey Senozhatsky  *    retirement rule; but we already have a number of 'soon to be
46043209ea2SSergey Senozhatsky  *    altered' attrs, so max_comp_streams need to wait for the next
46143209ea2SSergey Senozhatsky  *    layoff cycle.
46243209ea2SSergey Senozhatsky  */
463522698d7SSergey Senozhatsky static ssize_t max_comp_streams_show(struct device *dev,
464522698d7SSergey Senozhatsky 		struct device_attribute *attr, char *buf)
465522698d7SSergey Senozhatsky {
46643209ea2SSergey Senozhatsky 	return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
467522698d7SSergey Senozhatsky }
468522698d7SSergey Senozhatsky 
469beca3ec7SSergey Senozhatsky static ssize_t max_comp_streams_store(struct device *dev,
470beca3ec7SSergey Senozhatsky 		struct device_attribute *attr, const char *buf, size_t len)
471beca3ec7SSergey Senozhatsky {
47243209ea2SSergey Senozhatsky 	return len;
473beca3ec7SSergey Senozhatsky }
474beca3ec7SSergey Senozhatsky 
475e46b8a03SSergey Senozhatsky static ssize_t comp_algorithm_show(struct device *dev,
476e46b8a03SSergey Senozhatsky 		struct device_attribute *attr, char *buf)
477e46b8a03SSergey Senozhatsky {
478e46b8a03SSergey Senozhatsky 	size_t sz;
479e46b8a03SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
480e46b8a03SSergey Senozhatsky 
481e46b8a03SSergey Senozhatsky 	down_read(&zram->init_lock);
482e46b8a03SSergey Senozhatsky 	sz = zcomp_available_show(zram->compressor, buf);
483e46b8a03SSergey Senozhatsky 	up_read(&zram->init_lock);
484e46b8a03SSergey Senozhatsky 
485e46b8a03SSergey Senozhatsky 	return sz;
486e46b8a03SSergey Senozhatsky }
487e46b8a03SSergey Senozhatsky 
488e46b8a03SSergey Senozhatsky static ssize_t comp_algorithm_store(struct device *dev,
489e46b8a03SSergey Senozhatsky 		struct device_attribute *attr, const char *buf, size_t len)
490e46b8a03SSergey Senozhatsky {
491e46b8a03SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
492f357e345SMatthias Kaehlcke 	char compressor[ARRAY_SIZE(zram->compressor)];
4934bbacd51SSergey Senozhatsky 	size_t sz;
4944bbacd51SSergey Senozhatsky 
495415403beSSergey Senozhatsky 	strlcpy(compressor, buf, sizeof(compressor));
496415403beSSergey Senozhatsky 	/* ignore trailing newline */
497415403beSSergey Senozhatsky 	sz = strlen(compressor);
498415403beSSergey Senozhatsky 	if (sz > 0 && compressor[sz - 1] == '\n')
499415403beSSergey Senozhatsky 		compressor[sz - 1] = 0x00;
500415403beSSergey Senozhatsky 
501415403beSSergey Senozhatsky 	if (!zcomp_available_algorithm(compressor))
5021d5b43bfSLuis Henriques 		return -EINVAL;
5031d5b43bfSLuis Henriques 
504e46b8a03SSergey Senozhatsky 	down_write(&zram->init_lock);
505e46b8a03SSergey Senozhatsky 	if (init_done(zram)) {
506e46b8a03SSergey Senozhatsky 		up_write(&zram->init_lock);
507e46b8a03SSergey Senozhatsky 		pr_info("Can't change algorithm for initialized device\n");
508e46b8a03SSergey Senozhatsky 		return -EBUSY;
509e46b8a03SSergey Senozhatsky 	}
5104bbacd51SSergey Senozhatsky 
511f357e345SMatthias Kaehlcke 	strcpy(zram->compressor, compressor);
512e46b8a03SSergey Senozhatsky 	up_write(&zram->init_lock);
513e46b8a03SSergey Senozhatsky 	return len;
514e46b8a03SSergey Senozhatsky }
515e46b8a03SSergey Senozhatsky 
516522698d7SSergey Senozhatsky static ssize_t compact_store(struct device *dev,
517522698d7SSergey Senozhatsky 		struct device_attribute *attr, const char *buf, size_t len)
518cd67e10aSMinchan Kim {
519522698d7SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
520522698d7SSergey Senozhatsky 
521522698d7SSergey Senozhatsky 	down_read(&zram->init_lock);
522522698d7SSergey Senozhatsky 	if (!init_done(zram)) {
523522698d7SSergey Senozhatsky 		up_read(&zram->init_lock);
524522698d7SSergey Senozhatsky 		return -EINVAL;
525cd67e10aSMinchan Kim 	}
526cd67e10aSMinchan Kim 
527beb6602cSMinchan Kim 	zs_compact(zram->mem_pool);
528522698d7SSergey Senozhatsky 	up_read(&zram->init_lock);
529522698d7SSergey Senozhatsky 
530522698d7SSergey Senozhatsky 	return len;
531cd67e10aSMinchan Kim }
532cd67e10aSMinchan Kim 
533522698d7SSergey Senozhatsky static ssize_t io_stat_show(struct device *dev,
534522698d7SSergey Senozhatsky 		struct device_attribute *attr, char *buf)
535cd67e10aSMinchan Kim {
536522698d7SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
537522698d7SSergey Senozhatsky 	ssize_t ret;
538522698d7SSergey Senozhatsky 
539522698d7SSergey Senozhatsky 	down_read(&zram->init_lock);
540522698d7SSergey Senozhatsky 	ret = scnprintf(buf, PAGE_SIZE,
541522698d7SSergey Senozhatsky 			"%8llu %8llu %8llu %8llu\n",
542522698d7SSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.failed_reads),
543522698d7SSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.failed_writes),
544522698d7SSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.invalid_io),
545522698d7SSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.notify_free));
546522698d7SSergey Senozhatsky 	up_read(&zram->init_lock);
547522698d7SSergey Senozhatsky 
548522698d7SSergey Senozhatsky 	return ret;
549d2d5e762SWeijie Yang }
550d2d5e762SWeijie Yang 
551522698d7SSergey Senozhatsky static ssize_t mm_stat_show(struct device *dev,
552522698d7SSergey Senozhatsky 		struct device_attribute *attr, char *buf)
553d2d5e762SWeijie Yang {
554522698d7SSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
5557d3f3938SSergey Senozhatsky 	struct zs_pool_stats pool_stats;
556522698d7SSergey Senozhatsky 	u64 orig_size, mem_used = 0;
557522698d7SSergey Senozhatsky 	long max_used;
558522698d7SSergey Senozhatsky 	ssize_t ret;
559522698d7SSergey Senozhatsky 
5607d3f3938SSergey Senozhatsky 	memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
5617d3f3938SSergey Senozhatsky 
562522698d7SSergey Senozhatsky 	down_read(&zram->init_lock);
5637d3f3938SSergey Senozhatsky 	if (init_done(zram)) {
564beb6602cSMinchan Kim 		mem_used = zs_get_total_pages(zram->mem_pool);
565beb6602cSMinchan Kim 		zs_pool_stats(zram->mem_pool, &pool_stats);
5667d3f3938SSergey Senozhatsky 	}
567522698d7SSergey Senozhatsky 
568522698d7SSergey Senozhatsky 	orig_size = atomic64_read(&zram->stats.pages_stored);
569522698d7SSergey Senozhatsky 	max_used = atomic_long_read(&zram->stats.max_used_pages);
570522698d7SSergey Senozhatsky 
571522698d7SSergey Senozhatsky 	ret = scnprintf(buf, PAGE_SIZE,
5727d3f3938SSergey Senozhatsky 			"%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
573522698d7SSergey Senozhatsky 			orig_size << PAGE_SHIFT,
574522698d7SSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.compr_data_size),
575522698d7SSergey Senozhatsky 			mem_used << PAGE_SHIFT,
576522698d7SSergey Senozhatsky 			zram->limit_pages << PAGE_SHIFT,
577522698d7SSergey Senozhatsky 			max_used << PAGE_SHIFT,
5788e19d540Szhouxianrong 			(u64)atomic64_read(&zram->stats.same_pages),
579860c707dSSergey Senozhatsky 			pool_stats.pages_compacted);
580522698d7SSergey Senozhatsky 	up_read(&zram->init_lock);
581522698d7SSergey Senozhatsky 
582522698d7SSergey Senozhatsky 	return ret;
583d2d5e762SWeijie Yang }
584d2d5e762SWeijie Yang 
585623e47fcSSergey Senozhatsky static ssize_t debug_stat_show(struct device *dev,
586623e47fcSSergey Senozhatsky 		struct device_attribute *attr, char *buf)
587623e47fcSSergey Senozhatsky {
588623e47fcSSergey Senozhatsky 	int version = 1;
589623e47fcSSergey Senozhatsky 	struct zram *zram = dev_to_zram(dev);
590623e47fcSSergey Senozhatsky 	ssize_t ret;
591623e47fcSSergey Senozhatsky 
592623e47fcSSergey Senozhatsky 	down_read(&zram->init_lock);
593623e47fcSSergey Senozhatsky 	ret = scnprintf(buf, PAGE_SIZE,
594623e47fcSSergey Senozhatsky 			"version: %d\n%8llu\n",
595623e47fcSSergey Senozhatsky 			version,
596623e47fcSSergey Senozhatsky 			(u64)atomic64_read(&zram->stats.writestall));
597623e47fcSSergey Senozhatsky 	up_read(&zram->init_lock);
598623e47fcSSergey Senozhatsky 
599623e47fcSSergey Senozhatsky 	return ret;
600623e47fcSSergey Senozhatsky }
601623e47fcSSergey Senozhatsky 
602522698d7SSergey Senozhatsky static DEVICE_ATTR_RO(io_stat);
603522698d7SSergey Senozhatsky static DEVICE_ATTR_RO(mm_stat);
604623e47fcSSergey Senozhatsky static DEVICE_ATTR_RO(debug_stat);
605d2d5e762SWeijie Yang 
60686c49814SMinchan Kim static void zram_slot_lock(struct zram *zram, u32 index)
60786c49814SMinchan Kim {
608beb6602cSMinchan Kim 	bit_spin_lock(ZRAM_ACCESS, &zram->table[index].value);
60986c49814SMinchan Kim }
61086c49814SMinchan Kim 
61186c49814SMinchan Kim static void zram_slot_unlock(struct zram *zram, u32 index)
61286c49814SMinchan Kim {
613beb6602cSMinchan Kim 	bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value);
61486c49814SMinchan Kim }
61586c49814SMinchan Kim 
6161f7319c7SMinchan Kim static bool zram_same_page_read(struct zram *zram, u32 index,
6171f7319c7SMinchan Kim 				struct page *page,
6181f7319c7SMinchan Kim 				unsigned int offset, unsigned int len)
6191f7319c7SMinchan Kim {
62086c49814SMinchan Kim 	zram_slot_lock(zram, index);
621643ae61dSMinchan Kim 	if (unlikely(!zram_get_handle(zram, index) ||
622643ae61dSMinchan Kim 			zram_test_flag(zram, index, ZRAM_SAME))) {
6231f7319c7SMinchan Kim 		void *mem;
6241f7319c7SMinchan Kim 
62586c49814SMinchan Kim 		zram_slot_unlock(zram, index);
6261f7319c7SMinchan Kim 		mem = kmap_atomic(page);
627643ae61dSMinchan Kim 		zram_fill_page(mem + offset, len,
628643ae61dSMinchan Kim 					zram_get_element(zram, index));
6291f7319c7SMinchan Kim 		kunmap_atomic(mem);
6301f7319c7SMinchan Kim 		return true;
6311f7319c7SMinchan Kim 	}
63286c49814SMinchan Kim 	zram_slot_unlock(zram, index);
6331f7319c7SMinchan Kim 
6341f7319c7SMinchan Kim 	return false;
6351f7319c7SMinchan Kim }
6361f7319c7SMinchan Kim 
637beb6602cSMinchan Kim static void zram_meta_free(struct zram *zram, u64 disksize)
638cd67e10aSMinchan Kim {
6391fec1172SGanesh Mahendran 	size_t num_pages = disksize >> PAGE_SHIFT;
6401fec1172SGanesh Mahendran 	size_t index;
6411fec1172SGanesh Mahendran 
6421fec1172SGanesh Mahendran 	/* Free all pages that are still in this zram device */
643302128dcSMinchan Kim 	for (index = 0; index < num_pages; index++)
644302128dcSMinchan Kim 		zram_free_page(zram, index);
6451fec1172SGanesh Mahendran 
646beb6602cSMinchan Kim 	zs_destroy_pool(zram->mem_pool);
647beb6602cSMinchan Kim 	vfree(zram->table);
648cd67e10aSMinchan Kim }
649cd67e10aSMinchan Kim 
650beb6602cSMinchan Kim static bool zram_meta_alloc(struct zram *zram, u64 disksize)
651cd67e10aSMinchan Kim {
652cd67e10aSMinchan Kim 	size_t num_pages;
653cd67e10aSMinchan Kim 
654cd67e10aSMinchan Kim 	num_pages = disksize >> PAGE_SHIFT;
655beb6602cSMinchan Kim 	zram->table = vzalloc(num_pages * sizeof(*zram->table));
656beb6602cSMinchan Kim 	if (!zram->table)
657beb6602cSMinchan Kim 		return false;
658beb6602cSMinchan Kim 
659beb6602cSMinchan Kim 	zram->mem_pool = zs_create_pool(zram->disk->disk_name);
660beb6602cSMinchan Kim 	if (!zram->mem_pool) {
661beb6602cSMinchan Kim 		vfree(zram->table);
662beb6602cSMinchan Kim 		return false;
663cd67e10aSMinchan Kim 	}
664cd67e10aSMinchan Kim 
665beb6602cSMinchan Kim 	return true;
666cd67e10aSMinchan Kim }
667cd67e10aSMinchan Kim 
668d2d5e762SWeijie Yang /*
669d2d5e762SWeijie Yang  * To protect concurrent access to the same index entry,
670d2d5e762SWeijie Yang  * caller should hold this table index entry's bit_spinlock to
671d2d5e762SWeijie Yang  * indicate this index entry is accessing.
672d2d5e762SWeijie Yang  */
673cd67e10aSMinchan Kim static void zram_free_page(struct zram *zram, size_t index)
674cd67e10aSMinchan Kim {
675643ae61dSMinchan Kim 	unsigned long handle = zram_get_handle(zram, index);
676cd67e10aSMinchan Kim 
677cd67e10aSMinchan Kim 	/*
6788e19d540Szhouxianrong 	 * No memory is allocated for same element filled pages.
6798e19d540Szhouxianrong 	 * Simply clear same page flag.
680cd67e10aSMinchan Kim 	 */
681beb6602cSMinchan Kim 	if (zram_test_flag(zram, index, ZRAM_SAME)) {
682beb6602cSMinchan Kim 		zram_clear_flag(zram, index, ZRAM_SAME);
683643ae61dSMinchan Kim 		zram_set_element(zram, index, 0);
6848e19d540Szhouxianrong 		atomic64_dec(&zram->stats.same_pages);
68551f9f82cSMinchan Kim 		atomic64_dec(&zram->stats.pages_stored);
686cd67e10aSMinchan Kim 		return;
687cd67e10aSMinchan Kim 	}
688cd67e10aSMinchan Kim 
6898e19d540Szhouxianrong 	if (!handle)
6908e19d540Szhouxianrong 		return;
6918e19d540Szhouxianrong 
692beb6602cSMinchan Kim 	zs_free(zram->mem_pool, handle);
693cd67e10aSMinchan Kim 
694beb6602cSMinchan Kim 	atomic64_sub(zram_get_obj_size(zram, index),
695d2d5e762SWeijie Yang 			&zram->stats.compr_data_size);
69690a7806eSSergey Senozhatsky 	atomic64_dec(&zram->stats.pages_stored);
697cd67e10aSMinchan Kim 
698643ae61dSMinchan Kim 	zram_set_handle(zram, index, 0);
699beb6602cSMinchan Kim 	zram_set_obj_size(zram, index, 0);
700cd67e10aSMinchan Kim }
701cd67e10aSMinchan Kim 
702693dc1ceSMinchan Kim static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index)
703cd67e10aSMinchan Kim {
7041f7319c7SMinchan Kim 	int ret;
70592967471SMinchan Kim 	unsigned long handle;
706ebaf9ab5SSergey Senozhatsky 	unsigned int size;
7071f7319c7SMinchan Kim 	void *src, *dst;
7081f7319c7SMinchan Kim 
7091f7319c7SMinchan Kim 	if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE))
7101f7319c7SMinchan Kim 		return 0;
71192967471SMinchan Kim 
71286c49814SMinchan Kim 	zram_slot_lock(zram, index);
713643ae61dSMinchan Kim 	handle = zram_get_handle(zram, index);
714beb6602cSMinchan Kim 	size = zram_get_obj_size(zram, index);
715cd67e10aSMinchan Kim 
716beb6602cSMinchan Kim 	src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
717ebaf9ab5SSergey Senozhatsky 	if (size == PAGE_SIZE) {
7181f7319c7SMinchan Kim 		dst = kmap_atomic(page);
7191f7319c7SMinchan Kim 		memcpy(dst, src, PAGE_SIZE);
7201f7319c7SMinchan Kim 		kunmap_atomic(dst);
7211f7319c7SMinchan Kim 		ret = 0;
722ebaf9ab5SSergey Senozhatsky 	} else {
723ebaf9ab5SSergey Senozhatsky 		struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
724ebaf9ab5SSergey Senozhatsky 
7251f7319c7SMinchan Kim 		dst = kmap_atomic(page);
7261f7319c7SMinchan Kim 		ret = zcomp_decompress(zstrm, src, size, dst);
7271f7319c7SMinchan Kim 		kunmap_atomic(dst);
728ebaf9ab5SSergey Senozhatsky 		zcomp_stream_put(zram->comp);
729ebaf9ab5SSergey Senozhatsky 	}
730beb6602cSMinchan Kim 	zs_unmap_object(zram->mem_pool, handle);
73186c49814SMinchan Kim 	zram_slot_unlock(zram, index);
732cd67e10aSMinchan Kim 
733cd67e10aSMinchan Kim 	/* Should NEVER happen. Return bio error if it does. */
7341f7319c7SMinchan Kim 	if (unlikely(ret))
735cd67e10aSMinchan Kim 		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
736cd67e10aSMinchan Kim 
7371f7319c7SMinchan Kim 	return ret;
738cd67e10aSMinchan Kim }
739cd67e10aSMinchan Kim 
740cd67e10aSMinchan Kim static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
741b627cff3Skaram.lee 				u32 index, int offset)
742cd67e10aSMinchan Kim {
743cd67e10aSMinchan Kim 	int ret;
744cd67e10aSMinchan Kim 	struct page *page;
7451f7319c7SMinchan Kim 
746cd67e10aSMinchan Kim 	page = bvec->bv_page;
7471f7319c7SMinchan Kim 	if (is_partial_io(bvec)) {
748cd67e10aSMinchan Kim 		/* Use a temporary buffer to decompress the page */
7491f7319c7SMinchan Kim 		page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
7501f7319c7SMinchan Kim 		if (!page)
7511f7319c7SMinchan Kim 			return -ENOMEM;
752cd67e10aSMinchan Kim 	}
753cd67e10aSMinchan Kim 
754693dc1ceSMinchan Kim 	ret = __zram_bvec_read(zram, page, index);
755b7ca232eSSergey Senozhatsky 	if (unlikely(ret))
7561f7319c7SMinchan Kim 		goto out;
757cd67e10aSMinchan Kim 
7581f7319c7SMinchan Kim 	if (is_partial_io(bvec)) {
7591f7319c7SMinchan Kim 		void *dst = kmap_atomic(bvec->bv_page);
7601f7319c7SMinchan Kim 		void *src = kmap_atomic(page);
761cd67e10aSMinchan Kim 
7621f7319c7SMinchan Kim 		memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
7631f7319c7SMinchan Kim 		kunmap_atomic(src);
7641f7319c7SMinchan Kim 		kunmap_atomic(dst);
7651f7319c7SMinchan Kim 	}
7661f7319c7SMinchan Kim out:
767cd67e10aSMinchan Kim 	if (is_partial_io(bvec))
7681f7319c7SMinchan Kim 		__free_page(page);
7691f7319c7SMinchan Kim 
770cd67e10aSMinchan Kim 	return ret;
771cd67e10aSMinchan Kim }
772cd67e10aSMinchan Kim 
77397ec7c8bSMinchan Kim static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
774cd67e10aSMinchan Kim {
775*ae85a807SMinchan Kim 	int ret = 0;
776461a8eeeSMinchan Kim 	unsigned long alloced_pages;
7771f7319c7SMinchan Kim 	unsigned long handle = 0;
77897ec7c8bSMinchan Kim 	unsigned int comp_len = 0;
77997ec7c8bSMinchan Kim 	void *src, *dst, *mem;
78097ec7c8bSMinchan Kim 	struct zcomp_strm *zstrm;
78197ec7c8bSMinchan Kim 	struct page *page = bvec->bv_page;
78297ec7c8bSMinchan Kim 	unsigned long element = 0;
78397ec7c8bSMinchan Kim 	enum zram_pageflags flags = 0;
78497ec7c8bSMinchan Kim 
78597ec7c8bSMinchan Kim 	mem = kmap_atomic(page);
78697ec7c8bSMinchan Kim 	if (page_same_filled(mem, &element)) {
78797ec7c8bSMinchan Kim 		kunmap_atomic(mem);
78897ec7c8bSMinchan Kim 		/* Free memory associated with this sector now. */
78997ec7c8bSMinchan Kim 		flags = ZRAM_SAME;
79097ec7c8bSMinchan Kim 		atomic64_inc(&zram->stats.same_pages);
79197ec7c8bSMinchan Kim 		goto out;
79297ec7c8bSMinchan Kim 	}
79397ec7c8bSMinchan Kim 	kunmap_atomic(mem);
794cd67e10aSMinchan Kim 
795da9556a2SSergey Senozhatsky compress_again:
79697ec7c8bSMinchan Kim 	zstrm = zcomp_stream_get(zram->comp);
7971f7319c7SMinchan Kim 	src = kmap_atomic(page);
79897ec7c8bSMinchan Kim 	ret = zcomp_compress(zstrm, src, &comp_len);
7991f7319c7SMinchan Kim 	kunmap_atomic(src);
800cd67e10aSMinchan Kim 
801b7ca232eSSergey Senozhatsky 	if (unlikely(ret)) {
80297ec7c8bSMinchan Kim 		zcomp_stream_put(zram->comp);
803cd67e10aSMinchan Kim 		pr_err("Compression failed! err=%d\n", ret);
804beb6602cSMinchan Kim 		zs_free(zram->mem_pool, handle);
8051f7319c7SMinchan Kim 		return ret;
806cd67e10aSMinchan Kim 	}
807da9556a2SSergey Senozhatsky 
8081f7319c7SMinchan Kim 	if (unlikely(comp_len > max_zpage_size))
8091f7319c7SMinchan Kim 		comp_len = PAGE_SIZE;
810cd67e10aSMinchan Kim 
811da9556a2SSergey Senozhatsky 	/*
812da9556a2SSergey Senozhatsky 	 * handle allocation has 2 paths:
813da9556a2SSergey Senozhatsky 	 * a) fast path is executed with preemption disabled (for
814da9556a2SSergey Senozhatsky 	 *  per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
815da9556a2SSergey Senozhatsky 	 *  since we can't sleep;
816da9556a2SSergey Senozhatsky 	 * b) slow path enables preemption and attempts to allocate
817da9556a2SSergey Senozhatsky 	 *  the page with __GFP_DIRECT_RECLAIM bit set. we have to
818da9556a2SSergey Senozhatsky 	 *  put per-cpu compression stream and, thus, to re-do
819da9556a2SSergey Senozhatsky 	 *  the compression once handle is allocated.
820da9556a2SSergey Senozhatsky 	 *
821da9556a2SSergey Senozhatsky 	 * if we have a 'non-null' handle here then we are coming
822da9556a2SSergey Senozhatsky 	 * from the slow path and handle has already been allocated.
823da9556a2SSergey Senozhatsky 	 */
824da9556a2SSergey Senozhatsky 	if (!handle)
825beb6602cSMinchan Kim 		handle = zs_malloc(zram->mem_pool, comp_len,
826da9556a2SSergey Senozhatsky 				__GFP_KSWAPD_RECLAIM |
827da9556a2SSergey Senozhatsky 				__GFP_NOWARN |
8289bc482d3SMinchan Kim 				__GFP_HIGHMEM |
8299bc482d3SMinchan Kim 				__GFP_MOVABLE);
830cd67e10aSMinchan Kim 	if (!handle) {
8312aea8493SSergey Senozhatsky 		zcomp_stream_put(zram->comp);
832623e47fcSSergey Senozhatsky 		atomic64_inc(&zram->stats.writestall);
833beb6602cSMinchan Kim 		handle = zs_malloc(zram->mem_pool, comp_len,
8349bc482d3SMinchan Kim 				GFP_NOIO | __GFP_HIGHMEM |
8359bc482d3SMinchan Kim 				__GFP_MOVABLE);
836da9556a2SSergey Senozhatsky 		if (handle)
837da9556a2SSergey Senozhatsky 			goto compress_again;
8381f7319c7SMinchan Kim 		return -ENOMEM;
839cd67e10aSMinchan Kim 	}
8409ada9da9SMinchan Kim 
841beb6602cSMinchan Kim 	alloced_pages = zs_get_total_pages(zram->mem_pool);
84212372755SSergey SENOZHATSKY 	update_used_max(zram, alloced_pages);
84312372755SSergey SENOZHATSKY 
844461a8eeeSMinchan Kim 	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
84597ec7c8bSMinchan Kim 		zcomp_stream_put(zram->comp);
846beb6602cSMinchan Kim 		zs_free(zram->mem_pool, handle);
8471f7319c7SMinchan Kim 		return -ENOMEM;
8489ada9da9SMinchan Kim 	}
8499ada9da9SMinchan Kim 
850beb6602cSMinchan Kim 	dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
8511f7319c7SMinchan Kim 
8521f7319c7SMinchan Kim 	src = zstrm->buffer;
8531f7319c7SMinchan Kim 	if (comp_len == PAGE_SIZE)
854cd67e10aSMinchan Kim 		src = kmap_atomic(page);
8551f7319c7SMinchan Kim 	memcpy(dst, src, comp_len);
8561f7319c7SMinchan Kim 	if (comp_len == PAGE_SIZE)
857cd67e10aSMinchan Kim 		kunmap_atomic(src);
858cd67e10aSMinchan Kim 
8592aea8493SSergey Senozhatsky 	zcomp_stream_put(zram->comp);
860beb6602cSMinchan Kim 	zs_unmap_object(zram->mem_pool, handle);
8614ebbe7f7SMinchan Kim 	atomic64_add(comp_len, &zram->stats.compr_data_size);
8624ebbe7f7SMinchan Kim out:
863cd67e10aSMinchan Kim 	/*
864cd67e10aSMinchan Kim 	 * Free memory associated with this sector
865cd67e10aSMinchan Kim 	 * before overwriting unused sectors.
866cd67e10aSMinchan Kim 	 */
86786c49814SMinchan Kim 	zram_slot_lock(zram, index);
868cd67e10aSMinchan Kim 	zram_free_page(zram, index);
8694ebbe7f7SMinchan Kim 	if (flags == ZRAM_SAME) {
8704ebbe7f7SMinchan Kim 		zram_set_flag(zram, index, ZRAM_SAME);
8714ebbe7f7SMinchan Kim 		zram_set_element(zram, index, element);
8724ebbe7f7SMinchan Kim 	} else {
873643ae61dSMinchan Kim 		zram_set_handle(zram, index, handle);
874beb6602cSMinchan Kim 		zram_set_obj_size(zram, index, comp_len);
8754ebbe7f7SMinchan Kim 	}
87686c49814SMinchan Kim 	zram_slot_unlock(zram, index);
877cd67e10aSMinchan Kim 
878cd67e10aSMinchan Kim 	/* Update stats */
87990a7806eSSergey Senozhatsky 	atomic64_inc(&zram->stats.pages_stored);
880*ae85a807SMinchan Kim 	return ret;
8811f7319c7SMinchan Kim }
8821f7319c7SMinchan Kim 
8831f7319c7SMinchan Kim static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
8841f7319c7SMinchan Kim 				u32 index, int offset)
8851f7319c7SMinchan Kim {
8861f7319c7SMinchan Kim 	int ret;
8871f7319c7SMinchan Kim 	struct page *page = NULL;
8881f7319c7SMinchan Kim 	void *src;
8891f7319c7SMinchan Kim 	struct bio_vec vec;
8901f7319c7SMinchan Kim 
8911f7319c7SMinchan Kim 	vec = *bvec;
8921f7319c7SMinchan Kim 	if (is_partial_io(bvec)) {
8931f7319c7SMinchan Kim 		void *dst;
8941f7319c7SMinchan Kim 		/*
8951f7319c7SMinchan Kim 		 * This is a partial IO. We need to read the full page
8961f7319c7SMinchan Kim 		 * before to write the changes.
8971f7319c7SMinchan Kim 		 */
8981f7319c7SMinchan Kim 		page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
8991f7319c7SMinchan Kim 		if (!page)
9001f7319c7SMinchan Kim 			return -ENOMEM;
9011f7319c7SMinchan Kim 
902693dc1ceSMinchan Kim 		ret = __zram_bvec_read(zram, page, index);
9031f7319c7SMinchan Kim 		if (ret)
9041f7319c7SMinchan Kim 			goto out;
9051f7319c7SMinchan Kim 
9061f7319c7SMinchan Kim 		src = kmap_atomic(bvec->bv_page);
9071f7319c7SMinchan Kim 		dst = kmap_atomic(page);
9081f7319c7SMinchan Kim 		memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
9091f7319c7SMinchan Kim 		kunmap_atomic(dst);
9101f7319c7SMinchan Kim 		kunmap_atomic(src);
9111f7319c7SMinchan Kim 
9121f7319c7SMinchan Kim 		vec.bv_page = page;
9131f7319c7SMinchan Kim 		vec.bv_len = PAGE_SIZE;
9141f7319c7SMinchan Kim 		vec.bv_offset = 0;
9151f7319c7SMinchan Kim 	}
9161f7319c7SMinchan Kim 
9171f7319c7SMinchan Kim 	ret = __zram_bvec_write(zram, &vec, index);
918cd67e10aSMinchan Kim out:
919cd67e10aSMinchan Kim 	if (is_partial_io(bvec))
9201f7319c7SMinchan Kim 		__free_page(page);
921cd67e10aSMinchan Kim 	return ret;
922cd67e10aSMinchan Kim }
923cd67e10aSMinchan Kim 
924f4659d8eSJoonsoo Kim /*
925f4659d8eSJoonsoo Kim  * zram_bio_discard - handler on discard request
926f4659d8eSJoonsoo Kim  * @index: physical block index in PAGE_SIZE units
927f4659d8eSJoonsoo Kim  * @offset: byte offset within physical block
928f4659d8eSJoonsoo Kim  */
929f4659d8eSJoonsoo Kim static void zram_bio_discard(struct zram *zram, u32 index,
930f4659d8eSJoonsoo Kim 			     int offset, struct bio *bio)
931f4659d8eSJoonsoo Kim {
932f4659d8eSJoonsoo Kim 	size_t n = bio->bi_iter.bi_size;
933f4659d8eSJoonsoo Kim 
934f4659d8eSJoonsoo Kim 	/*
935f4659d8eSJoonsoo Kim 	 * zram manages data in physical block size units. Because logical block
936f4659d8eSJoonsoo Kim 	 * size isn't identical with physical block size on some arch, we
937f4659d8eSJoonsoo Kim 	 * could get a discard request pointing to a specific offset within a
938f4659d8eSJoonsoo Kim 	 * certain physical block.  Although we can handle this request by
939f4659d8eSJoonsoo Kim 	 * reading that physiclal block and decompressing and partially zeroing
940f4659d8eSJoonsoo Kim 	 * and re-compressing and then re-storing it, this isn't reasonable
941f4659d8eSJoonsoo Kim 	 * because our intent with a discard request is to save memory.  So
942f4659d8eSJoonsoo Kim 	 * skipping this logical block is appropriate here.
943f4659d8eSJoonsoo Kim 	 */
944f4659d8eSJoonsoo Kim 	if (offset) {
94538515c73SWeijie Yang 		if (n <= (PAGE_SIZE - offset))
946f4659d8eSJoonsoo Kim 			return;
947f4659d8eSJoonsoo Kim 
94838515c73SWeijie Yang 		n -= (PAGE_SIZE - offset);
949f4659d8eSJoonsoo Kim 		index++;
950f4659d8eSJoonsoo Kim 	}
951f4659d8eSJoonsoo Kim 
952f4659d8eSJoonsoo Kim 	while (n >= PAGE_SIZE) {
95386c49814SMinchan Kim 		zram_slot_lock(zram, index);
954f4659d8eSJoonsoo Kim 		zram_free_page(zram, index);
95586c49814SMinchan Kim 		zram_slot_unlock(zram, index);
956015254daSSergey Senozhatsky 		atomic64_inc(&zram->stats.notify_free);
957f4659d8eSJoonsoo Kim 		index++;
958f4659d8eSJoonsoo Kim 		n -= PAGE_SIZE;
959f4659d8eSJoonsoo Kim 	}
960f4659d8eSJoonsoo Kim }
961f4659d8eSJoonsoo Kim 
962*ae85a807SMinchan Kim /*
963*ae85a807SMinchan Kim  * Returns errno if it has some problem. Otherwise return 0 or 1.
964*ae85a807SMinchan Kim  * Returns 0 if IO request was done synchronously
965*ae85a807SMinchan Kim  * Returns 1 if IO request was successfully submitted.
966*ae85a807SMinchan Kim  */
967522698d7SSergey Senozhatsky static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
968c11f0c0bSJens Axboe 			int offset, bool is_write)
969522698d7SSergey Senozhatsky {
970522698d7SSergey Senozhatsky 	unsigned long start_time = jiffies;
971c11f0c0bSJens Axboe 	int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
972522698d7SSergey Senozhatsky 	int ret;
973522698d7SSergey Senozhatsky 
974c11f0c0bSJens Axboe 	generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
975522698d7SSergey Senozhatsky 			&zram->disk->part0);
976522698d7SSergey Senozhatsky 
977c11f0c0bSJens Axboe 	if (!is_write) {
978522698d7SSergey Senozhatsky 		atomic64_inc(&zram->stats.num_reads);
979522698d7SSergey Senozhatsky 		ret = zram_bvec_read(zram, bvec, index, offset);
9801f7319c7SMinchan Kim 		flush_dcache_page(bvec->bv_page);
981522698d7SSergey Senozhatsky 	} else {
982522698d7SSergey Senozhatsky 		atomic64_inc(&zram->stats.num_writes);
983522698d7SSergey Senozhatsky 		ret = zram_bvec_write(zram, bvec, index, offset);
984522698d7SSergey Senozhatsky 	}
985522698d7SSergey Senozhatsky 
986c11f0c0bSJens Axboe 	generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
987522698d7SSergey Senozhatsky 
988*ae85a807SMinchan Kim 	if (unlikely(ret < 0)) {
989c11f0c0bSJens Axboe 		if (!is_write)
990522698d7SSergey Senozhatsky 			atomic64_inc(&zram->stats.failed_reads);
991522698d7SSergey Senozhatsky 		else
992522698d7SSergey Senozhatsky 			atomic64_inc(&zram->stats.failed_writes);
993522698d7SSergey Senozhatsky 	}
994522698d7SSergey Senozhatsky 
995522698d7SSergey Senozhatsky 	return ret;
996522698d7SSergey Senozhatsky }
997522698d7SSergey Senozhatsky 
998522698d7SSergey Senozhatsky static void __zram_make_request(struct zram *zram, struct bio *bio)
999522698d7SSergey Senozhatsky {
1000abf54548SMike Christie 	int offset;
1001522698d7SSergey Senozhatsky 	u32 index;
1002522698d7SSergey Senozhatsky 	struct bio_vec bvec;
1003522698d7SSergey Senozhatsky 	struct bvec_iter iter;
1004522698d7SSergey Senozhatsky 
1005522698d7SSergey Senozhatsky 	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1006522698d7SSergey Senozhatsky 	offset = (bio->bi_iter.bi_sector &
1007522698d7SSergey Senozhatsky 		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1008522698d7SSergey Senozhatsky 
100931edeacdSChristoph Hellwig 	switch (bio_op(bio)) {
101031edeacdSChristoph Hellwig 	case REQ_OP_DISCARD:
101131edeacdSChristoph Hellwig 	case REQ_OP_WRITE_ZEROES:
1012522698d7SSergey Senozhatsky 		zram_bio_discard(zram, index, offset, bio);
10134246a0b6SChristoph Hellwig 		bio_endio(bio);
1014522698d7SSergey Senozhatsky 		return;
101531edeacdSChristoph Hellwig 	default:
101631edeacdSChristoph Hellwig 		break;
1017522698d7SSergey Senozhatsky 	}
1018522698d7SSergey Senozhatsky 
1019522698d7SSergey Senozhatsky 	bio_for_each_segment(bvec, bio, iter) {
1020e86942c7SMinchan Kim 		struct bio_vec bv = bvec;
1021e86942c7SMinchan Kim 		unsigned int unwritten = bvec.bv_len;
1022522698d7SSergey Senozhatsky 
1023e86942c7SMinchan Kim 		do {
1024e86942c7SMinchan Kim 			bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
1025e86942c7SMinchan Kim 							unwritten);
1026abf54548SMike Christie 			if (zram_bvec_rw(zram, &bv, index, offset,
1027c11f0c0bSJens Axboe 					op_is_write(bio_op(bio))) < 0)
1028522698d7SSergey Senozhatsky 				goto out;
1029522698d7SSergey Senozhatsky 
1030e86942c7SMinchan Kim 			bv.bv_offset += bv.bv_len;
1031e86942c7SMinchan Kim 			unwritten -= bv.bv_len;
1032522698d7SSergey Senozhatsky 
1033e86942c7SMinchan Kim 			update_position(&index, &offset, &bv);
1034e86942c7SMinchan Kim 		} while (unwritten);
1035522698d7SSergey Senozhatsky 	}
1036522698d7SSergey Senozhatsky 
10374246a0b6SChristoph Hellwig 	bio_endio(bio);
1038522698d7SSergey Senozhatsky 	return;
1039522698d7SSergey Senozhatsky 
1040522698d7SSergey Senozhatsky out:
1041522698d7SSergey Senozhatsky 	bio_io_error(bio);
1042522698d7SSergey Senozhatsky }
1043522698d7SSergey Senozhatsky 
1044522698d7SSergey Senozhatsky /*
1045522698d7SSergey Senozhatsky  * Handler function for all zram I/O requests.
1046522698d7SSergey Senozhatsky  */
1047dece1635SJens Axboe static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
1048522698d7SSergey Senozhatsky {
1049522698d7SSergey Senozhatsky 	struct zram *zram = queue->queuedata;
1050522698d7SSergey Senozhatsky 
1051522698d7SSergey Senozhatsky 	if (!valid_io_request(zram, bio->bi_iter.bi_sector,
1052522698d7SSergey Senozhatsky 					bio->bi_iter.bi_size)) {
1053522698d7SSergey Senozhatsky 		atomic64_inc(&zram->stats.invalid_io);
1054a09759acSMinchan Kim 		goto error;
1055522698d7SSergey Senozhatsky 	}
1056522698d7SSergey Senozhatsky 
1057522698d7SSergey Senozhatsky 	__zram_make_request(zram, bio);
1058dece1635SJens Axboe 	return BLK_QC_T_NONE;
1059a09759acSMinchan Kim 
1060522698d7SSergey Senozhatsky error:
1061522698d7SSergey Senozhatsky 	bio_io_error(bio);
1062dece1635SJens Axboe 	return BLK_QC_T_NONE;
1063522698d7SSergey Senozhatsky }
1064522698d7SSergey Senozhatsky 
1065522698d7SSergey Senozhatsky static void zram_slot_free_notify(struct block_device *bdev,
1066522698d7SSergey Senozhatsky 				unsigned long index)
1067522698d7SSergey Senozhatsky {
1068522698d7SSergey Senozhatsky 	struct zram *zram;
1069522698d7SSergey Senozhatsky 
1070522698d7SSergey Senozhatsky 	zram = bdev->bd_disk->private_data;
1071522698d7SSergey Senozhatsky 
107286c49814SMinchan Kim 	zram_slot_lock(zram, index);
1073522698d7SSergey Senozhatsky 	zram_free_page(zram, index);
107486c49814SMinchan Kim 	zram_slot_unlock(zram, index);
1075522698d7SSergey Senozhatsky 	atomic64_inc(&zram->stats.notify_free);
1076522698d7SSergey Senozhatsky }
1077522698d7SSergey Senozhatsky 
1078522698d7SSergey Senozhatsky static int zram_rw_page(struct block_device *bdev, sector_t sector,
1079c11f0c0bSJens Axboe 		       struct page *page, bool is_write)
1080522698d7SSergey Senozhatsky {
1081*ae85a807SMinchan Kim 	int offset, ret;
1082522698d7SSergey Senozhatsky 	u32 index;
1083522698d7SSergey Senozhatsky 	struct zram *zram;
1084522698d7SSergey Senozhatsky 	struct bio_vec bv;
1085522698d7SSergey Senozhatsky 
1086522698d7SSergey Senozhatsky 	zram = bdev->bd_disk->private_data;
1087522698d7SSergey Senozhatsky 
1088522698d7SSergey Senozhatsky 	if (!valid_io_request(zram, sector, PAGE_SIZE)) {
1089522698d7SSergey Senozhatsky 		atomic64_inc(&zram->stats.invalid_io);
1090*ae85a807SMinchan Kim 		ret = -EINVAL;
1091a09759acSMinchan Kim 		goto out;
1092522698d7SSergey Senozhatsky 	}
1093522698d7SSergey Senozhatsky 
1094522698d7SSergey Senozhatsky 	index = sector >> SECTORS_PER_PAGE_SHIFT;
10954ca82dabSMinchan Kim 	offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1096522698d7SSergey Senozhatsky 
1097522698d7SSergey Senozhatsky 	bv.bv_page = page;
1098522698d7SSergey Senozhatsky 	bv.bv_len = PAGE_SIZE;
1099522698d7SSergey Senozhatsky 	bv.bv_offset = 0;
1100522698d7SSergey Senozhatsky 
1101*ae85a807SMinchan Kim 	ret = zram_bvec_rw(zram, &bv, index, offset, is_write);
1102522698d7SSergey Senozhatsky out:
1103522698d7SSergey Senozhatsky 	/*
1104522698d7SSergey Senozhatsky 	 * If I/O fails, just return error(ie, non-zero) without
1105522698d7SSergey Senozhatsky 	 * calling page_endio.
1106522698d7SSergey Senozhatsky 	 * It causes resubmit the I/O with bio request by upper functions
1107522698d7SSergey Senozhatsky 	 * of rw_page(e.g., swap_readpage, __swap_writepage) and
1108522698d7SSergey Senozhatsky 	 * bio->bi_end_io does things to handle the error
1109522698d7SSergey Senozhatsky 	 * (e.g., SetPageError, set_page_dirty and extra works).
1110522698d7SSergey Senozhatsky 	 */
1111*ae85a807SMinchan Kim 	if (unlikely(ret < 0))
1112*ae85a807SMinchan Kim 		return ret;
1113*ae85a807SMinchan Kim 
1114*ae85a807SMinchan Kim 	switch (ret) {
1115*ae85a807SMinchan Kim 	case 0:
1116c11f0c0bSJens Axboe 		page_endio(page, is_write, 0);
1117*ae85a807SMinchan Kim 		break;
1118*ae85a807SMinchan Kim 	case 1:
1119*ae85a807SMinchan Kim 		ret = 0;
1120*ae85a807SMinchan Kim 		break;
1121*ae85a807SMinchan Kim 	default:
1122*ae85a807SMinchan Kim 		WARN_ON(1);
1123*ae85a807SMinchan Kim 	}
1124*ae85a807SMinchan Kim 	return ret;
1125522698d7SSergey Senozhatsky }
1126522698d7SSergey Senozhatsky 
1127ba6b17d6SSergey Senozhatsky static void zram_reset_device(struct zram *zram)
1128cd67e10aSMinchan Kim {
112908eee69fSMinchan Kim 	struct zcomp *comp;
113008eee69fSMinchan Kim 	u64 disksize;
113108eee69fSMinchan Kim 
1132cd67e10aSMinchan Kim 	down_write(&zram->init_lock);
11339ada9da9SMinchan Kim 
11349ada9da9SMinchan Kim 	zram->limit_pages = 0;
11359ada9da9SMinchan Kim 
1136be2d1d56SSergey Senozhatsky 	if (!init_done(zram)) {
1137cd67e10aSMinchan Kim 		up_write(&zram->init_lock);
1138cd67e10aSMinchan Kim 		return;
1139cd67e10aSMinchan Kim 	}
1140cd67e10aSMinchan Kim 
114108eee69fSMinchan Kim 	comp = zram->comp;
114208eee69fSMinchan Kim 	disksize = zram->disksize;
1143cd67e10aSMinchan Kim 	zram->disksize = 0;
1144d7ad41a1SWeijie Yang 
1145a096cafcSSergey Senozhatsky 	set_capacity(zram->disk, 0);
1146d7ad41a1SWeijie Yang 	part_stat_set_all(&zram->disk->part0, 0);
1147a096cafcSSergey Senozhatsky 
1148cd67e10aSMinchan Kim 	up_write(&zram->init_lock);
114908eee69fSMinchan Kim 	/* I/O operation under all of CPU are done so let's free */
1150beb6602cSMinchan Kim 	zram_meta_free(zram, disksize);
1151302128dcSMinchan Kim 	memset(&zram->stats, 0, sizeof(zram->stats));
115208eee69fSMinchan Kim 	zcomp_destroy(comp);
1153013bf95aSMinchan Kim 	reset_bdev(zram);
1154cd67e10aSMinchan Kim }
1155cd67e10aSMinchan Kim 
1156cd67e10aSMinchan Kim static ssize_t disksize_store(struct device *dev,
1157cd67e10aSMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
1158cd67e10aSMinchan Kim {
1159cd67e10aSMinchan Kim 	u64 disksize;
1160d61f98c7SSergey Senozhatsky 	struct zcomp *comp;
1161cd67e10aSMinchan Kim 	struct zram *zram = dev_to_zram(dev);
1162fcfa8d95SSergey Senozhatsky 	int err;
1163cd67e10aSMinchan Kim 
1164cd67e10aSMinchan Kim 	disksize = memparse(buf, NULL);
1165cd67e10aSMinchan Kim 	if (!disksize)
1166cd67e10aSMinchan Kim 		return -EINVAL;
1167cd67e10aSMinchan Kim 
1168beb6602cSMinchan Kim 	down_write(&zram->init_lock);
1169beb6602cSMinchan Kim 	if (init_done(zram)) {
1170beb6602cSMinchan Kim 		pr_info("Cannot change disksize for initialized device\n");
1171beb6602cSMinchan Kim 		err = -EBUSY;
1172beb6602cSMinchan Kim 		goto out_unlock;
1173beb6602cSMinchan Kim 	}
1174beb6602cSMinchan Kim 
1175cd67e10aSMinchan Kim 	disksize = PAGE_ALIGN(disksize);
1176beb6602cSMinchan Kim 	if (!zram_meta_alloc(zram, disksize)) {
1177beb6602cSMinchan Kim 		err = -ENOMEM;
1178beb6602cSMinchan Kim 		goto out_unlock;
1179beb6602cSMinchan Kim 	}
1180b67d1ec1SSergey Senozhatsky 
1181da9556a2SSergey Senozhatsky 	comp = zcomp_create(zram->compressor);
1182fcfa8d95SSergey Senozhatsky 	if (IS_ERR(comp)) {
118370864969SSergey Senozhatsky 		pr_err("Cannot initialise %s compressing backend\n",
1184e46b8a03SSergey Senozhatsky 				zram->compressor);
1185fcfa8d95SSergey Senozhatsky 		err = PTR_ERR(comp);
1186fcfa8d95SSergey Senozhatsky 		goto out_free_meta;
1187d61f98c7SSergey Senozhatsky 	}
1188d61f98c7SSergey Senozhatsky 
1189d61f98c7SSergey Senozhatsky 	zram->comp = comp;
1190cd67e10aSMinchan Kim 	zram->disksize = disksize;
1191cd67e10aSMinchan Kim 	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1192b09ab054SMinchan Kim 	zram_revalidate_disk(zram);
1193e7ccfc4cSMinchan Kim 	up_write(&zram->init_lock);
1194b4c5c609SMinchan Kim 
1195cd67e10aSMinchan Kim 	return len;
1196b7ca232eSSergey Senozhatsky 
1197fcfa8d95SSergey Senozhatsky out_free_meta:
1198beb6602cSMinchan Kim 	zram_meta_free(zram, disksize);
1199beb6602cSMinchan Kim out_unlock:
1200beb6602cSMinchan Kim 	up_write(&zram->init_lock);
1201b7ca232eSSergey Senozhatsky 	return err;
1202cd67e10aSMinchan Kim }
1203cd67e10aSMinchan Kim 
1204cd67e10aSMinchan Kim static ssize_t reset_store(struct device *dev,
1205cd67e10aSMinchan Kim 		struct device_attribute *attr, const char *buf, size_t len)
1206cd67e10aSMinchan Kim {
1207cd67e10aSMinchan Kim 	int ret;
1208cd67e10aSMinchan Kim 	unsigned short do_reset;
1209cd67e10aSMinchan Kim 	struct zram *zram;
1210cd67e10aSMinchan Kim 	struct block_device *bdev;
1211cd67e10aSMinchan Kim 
1212f405c445SSergey Senozhatsky 	ret = kstrtou16(buf, 10, &do_reset);
1213f405c445SSergey Senozhatsky 	if (ret)
1214f405c445SSergey Senozhatsky 		return ret;
1215f405c445SSergey Senozhatsky 
1216f405c445SSergey Senozhatsky 	if (!do_reset)
1217f405c445SSergey Senozhatsky 		return -EINVAL;
1218f405c445SSergey Senozhatsky 
1219cd67e10aSMinchan Kim 	zram = dev_to_zram(dev);
1220cd67e10aSMinchan Kim 	bdev = bdget_disk(zram->disk, 0);
1221cd67e10aSMinchan Kim 	if (!bdev)
1222cd67e10aSMinchan Kim 		return -ENOMEM;
1223cd67e10aSMinchan Kim 
1224ba6b17d6SSergey Senozhatsky 	mutex_lock(&bdev->bd_mutex);
1225f405c445SSergey Senozhatsky 	/* Do not reset an active device or claimed device */
1226f405c445SSergey Senozhatsky 	if (bdev->bd_openers || zram->claim) {
1227f405c445SSergey Senozhatsky 		mutex_unlock(&bdev->bd_mutex);
1228f405c445SSergey Senozhatsky 		bdput(bdev);
1229f405c445SSergey Senozhatsky 		return -EBUSY;
1230cd67e10aSMinchan Kim 	}
1231cd67e10aSMinchan Kim 
1232f405c445SSergey Senozhatsky 	/* From now on, anyone can't open /dev/zram[0-9] */
1233f405c445SSergey Senozhatsky 	zram->claim = true;
1234f405c445SSergey Senozhatsky 	mutex_unlock(&bdev->bd_mutex);
1235cd67e10aSMinchan Kim 
1236f405c445SSergey Senozhatsky 	/* Make sure all the pending I/O are finished */
1237cd67e10aSMinchan Kim 	fsync_bdev(bdev);
1238ba6b17d6SSergey Senozhatsky 	zram_reset_device(zram);
1239b09ab054SMinchan Kim 	zram_revalidate_disk(zram);
1240cd67e10aSMinchan Kim 	bdput(bdev);
1241cd67e10aSMinchan Kim 
1242f405c445SSergey Senozhatsky 	mutex_lock(&bdev->bd_mutex);
1243f405c445SSergey Senozhatsky 	zram->claim = false;
1244ba6b17d6SSergey Senozhatsky 	mutex_unlock(&bdev->bd_mutex);
1245f405c445SSergey Senozhatsky 
1246f405c445SSergey Senozhatsky 	return len;
1247f405c445SSergey Senozhatsky }
1248f405c445SSergey Senozhatsky 
1249f405c445SSergey Senozhatsky static int zram_open(struct block_device *bdev, fmode_t mode)
1250f405c445SSergey Senozhatsky {
1251f405c445SSergey Senozhatsky 	int ret = 0;
1252f405c445SSergey Senozhatsky 	struct zram *zram;
1253f405c445SSergey Senozhatsky 
1254f405c445SSergey Senozhatsky 	WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1255f405c445SSergey Senozhatsky 
1256f405c445SSergey Senozhatsky 	zram = bdev->bd_disk->private_data;
1257f405c445SSergey Senozhatsky 	/* zram was claimed to reset so open request fails */
1258f405c445SSergey Senozhatsky 	if (zram->claim)
1259f405c445SSergey Senozhatsky 		ret = -EBUSY;
1260f405c445SSergey Senozhatsky 
1261cd67e10aSMinchan Kim 	return ret;
1262cd67e10aSMinchan Kim }
1263cd67e10aSMinchan Kim 
1264cd67e10aSMinchan Kim static const struct block_device_operations zram_devops = {
1265f405c445SSergey Senozhatsky 	.open = zram_open,
1266cd67e10aSMinchan Kim 	.swap_slot_free_notify = zram_slot_free_notify,
12678c7f0102Skaram.lee 	.rw_page = zram_rw_page,
1268cd67e10aSMinchan Kim 	.owner = THIS_MODULE
1269cd67e10aSMinchan Kim };
1270cd67e10aSMinchan Kim 
127199ebbd30SAndrew Morton static DEVICE_ATTR_WO(compact);
1272083914eaSGanesh Mahendran static DEVICE_ATTR_RW(disksize);
1273083914eaSGanesh Mahendran static DEVICE_ATTR_RO(initstate);
1274083914eaSGanesh Mahendran static DEVICE_ATTR_WO(reset);
1275c87d1655SSergey Senozhatsky static DEVICE_ATTR_WO(mem_limit);
1276c87d1655SSergey Senozhatsky static DEVICE_ATTR_WO(mem_used_max);
1277083914eaSGanesh Mahendran static DEVICE_ATTR_RW(max_comp_streams);
1278083914eaSGanesh Mahendran static DEVICE_ATTR_RW(comp_algorithm);
1279013bf95aSMinchan Kim #ifdef CONFIG_ZRAM_WRITEBACK
1280013bf95aSMinchan Kim static DEVICE_ATTR_RW(backing_dev);
1281013bf95aSMinchan Kim #endif
1282cd67e10aSMinchan Kim 
1283cd67e10aSMinchan Kim static struct attribute *zram_disk_attrs[] = {
1284cd67e10aSMinchan Kim 	&dev_attr_disksize.attr,
1285cd67e10aSMinchan Kim 	&dev_attr_initstate.attr,
1286cd67e10aSMinchan Kim 	&dev_attr_reset.attr,
128799ebbd30SAndrew Morton 	&dev_attr_compact.attr,
12889ada9da9SMinchan Kim 	&dev_attr_mem_limit.attr,
1289461a8eeeSMinchan Kim 	&dev_attr_mem_used_max.attr,
1290beca3ec7SSergey Senozhatsky 	&dev_attr_max_comp_streams.attr,
1291e46b8a03SSergey Senozhatsky 	&dev_attr_comp_algorithm.attr,
1292013bf95aSMinchan Kim #ifdef CONFIG_ZRAM_WRITEBACK
1293013bf95aSMinchan Kim 	&dev_attr_backing_dev.attr,
1294013bf95aSMinchan Kim #endif
12952f6a3bedSSergey Senozhatsky 	&dev_attr_io_stat.attr,
12964f2109f6SSergey Senozhatsky 	&dev_attr_mm_stat.attr,
1297623e47fcSSergey Senozhatsky 	&dev_attr_debug_stat.attr,
1298cd67e10aSMinchan Kim 	NULL,
1299cd67e10aSMinchan Kim };
1300cd67e10aSMinchan Kim 
1301bc1bb362SArvind Yadav static const struct attribute_group zram_disk_attr_group = {
1302cd67e10aSMinchan Kim 	.attrs = zram_disk_attrs,
1303cd67e10aSMinchan Kim };
1304cd67e10aSMinchan Kim 
130592ff1528SSergey Senozhatsky /*
130692ff1528SSergey Senozhatsky  * Allocate and initialize new zram device. the function returns
130792ff1528SSergey Senozhatsky  * '>= 0' device_id upon success, and negative value otherwise.
130892ff1528SSergey Senozhatsky  */
130992ff1528SSergey Senozhatsky static int zram_add(void)
1310cd67e10aSMinchan Kim {
131185508ec6SSergey Senozhatsky 	struct zram *zram;
1312ee980160SSergey Senozhatsky 	struct request_queue *queue;
131392ff1528SSergey Senozhatsky 	int ret, device_id;
131485508ec6SSergey Senozhatsky 
131585508ec6SSergey Senozhatsky 	zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
131685508ec6SSergey Senozhatsky 	if (!zram)
131785508ec6SSergey Senozhatsky 		return -ENOMEM;
131885508ec6SSergey Senozhatsky 
131992ff1528SSergey Senozhatsky 	ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
132085508ec6SSergey Senozhatsky 	if (ret < 0)
132185508ec6SSergey Senozhatsky 		goto out_free_dev;
132292ff1528SSergey Senozhatsky 	device_id = ret;
1323cd67e10aSMinchan Kim 
1324cd67e10aSMinchan Kim 	init_rwsem(&zram->init_lock);
1325cd67e10aSMinchan Kim 
1326ee980160SSergey Senozhatsky 	queue = blk_alloc_queue(GFP_KERNEL);
1327ee980160SSergey Senozhatsky 	if (!queue) {
1328cd67e10aSMinchan Kim 		pr_err("Error allocating disk queue for device %d\n",
1329cd67e10aSMinchan Kim 			device_id);
133085508ec6SSergey Senozhatsky 		ret = -ENOMEM;
133185508ec6SSergey Senozhatsky 		goto out_free_idr;
1332cd67e10aSMinchan Kim 	}
1333cd67e10aSMinchan Kim 
1334ee980160SSergey Senozhatsky 	blk_queue_make_request(queue, zram_make_request);
1335cd67e10aSMinchan Kim 
1336cd67e10aSMinchan Kim 	/* gendisk structure */
1337cd67e10aSMinchan Kim 	zram->disk = alloc_disk(1);
1338cd67e10aSMinchan Kim 	if (!zram->disk) {
133970864969SSergey Senozhatsky 		pr_err("Error allocating disk structure for device %d\n",
1340cd67e10aSMinchan Kim 			device_id);
1341201c7b72SJulia Lawall 		ret = -ENOMEM;
1342cd67e10aSMinchan Kim 		goto out_free_queue;
1343cd67e10aSMinchan Kim 	}
1344cd67e10aSMinchan Kim 
1345cd67e10aSMinchan Kim 	zram->disk->major = zram_major;
1346cd67e10aSMinchan Kim 	zram->disk->first_minor = device_id;
1347cd67e10aSMinchan Kim 	zram->disk->fops = &zram_devops;
1348ee980160SSergey Senozhatsky 	zram->disk->queue = queue;
1349ee980160SSergey Senozhatsky 	zram->disk->queue->queuedata = zram;
1350cd67e10aSMinchan Kim 	zram->disk->private_data = zram;
1351cd67e10aSMinchan Kim 	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1352cd67e10aSMinchan Kim 
1353cd67e10aSMinchan Kim 	/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1354cd67e10aSMinchan Kim 	set_capacity(zram->disk, 0);
1355b67d1ec1SSergey Senozhatsky 	/* zram devices sort of resembles non-rotational disks */
1356b67d1ec1SSergey Senozhatsky 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1357b277da0aSMike Snitzer 	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1358cd67e10aSMinchan Kim 	/*
1359cd67e10aSMinchan Kim 	 * To ensure that we always get PAGE_SIZE aligned
1360cd67e10aSMinchan Kim 	 * and n*PAGE_SIZED sized I/O requests.
1361cd67e10aSMinchan Kim 	 */
1362cd67e10aSMinchan Kim 	blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1363cd67e10aSMinchan Kim 	blk_queue_logical_block_size(zram->disk->queue,
1364cd67e10aSMinchan Kim 					ZRAM_LOGICAL_BLOCK_SIZE);
1365cd67e10aSMinchan Kim 	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1366cd67e10aSMinchan Kim 	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1367f4659d8eSJoonsoo Kim 	zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
13682bb4cd5cSJens Axboe 	blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
136931edeacdSChristoph Hellwig 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
137031edeacdSChristoph Hellwig 
1371f4659d8eSJoonsoo Kim 	/*
1372f4659d8eSJoonsoo Kim 	 * zram_bio_discard() will clear all logical blocks if logical block
1373f4659d8eSJoonsoo Kim 	 * size is identical with physical block size(PAGE_SIZE). But if it is
1374f4659d8eSJoonsoo Kim 	 * different, we will skip discarding some parts of logical blocks in
1375f4659d8eSJoonsoo Kim 	 * the part of the request range which isn't aligned to physical block
1376f4659d8eSJoonsoo Kim 	 * size.  So we can't ensure that all discarded logical blocks are
1377f4659d8eSJoonsoo Kim 	 * zeroed.
1378f4659d8eSJoonsoo Kim 	 */
1379f4659d8eSJoonsoo Kim 	if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
138031edeacdSChristoph Hellwig 		blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
1381cd67e10aSMinchan Kim 
1382cd67e10aSMinchan Kim 	add_disk(zram->disk);
1383cd67e10aSMinchan Kim 
1384cd67e10aSMinchan Kim 	ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1385cd67e10aSMinchan Kim 				&zram_disk_attr_group);
1386cd67e10aSMinchan Kim 	if (ret < 0) {
138770864969SSergey Senozhatsky 		pr_err("Error creating sysfs group for device %d\n",
138870864969SSergey Senozhatsky 				device_id);
1389cd67e10aSMinchan Kim 		goto out_free_disk;
1390cd67e10aSMinchan Kim 	}
1391e46b8a03SSergey Senozhatsky 	strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1392d12b63c9SSergey Senozhatsky 
1393d12b63c9SSergey Senozhatsky 	pr_info("Added device: %s\n", zram->disk->disk_name);
139492ff1528SSergey Senozhatsky 	return device_id;
1395cd67e10aSMinchan Kim 
1396cd67e10aSMinchan Kim out_free_disk:
1397cd67e10aSMinchan Kim 	del_gendisk(zram->disk);
1398cd67e10aSMinchan Kim 	put_disk(zram->disk);
1399cd67e10aSMinchan Kim out_free_queue:
1400ee980160SSergey Senozhatsky 	blk_cleanup_queue(queue);
140185508ec6SSergey Senozhatsky out_free_idr:
140285508ec6SSergey Senozhatsky 	idr_remove(&zram_index_idr, device_id);
140385508ec6SSergey Senozhatsky out_free_dev:
140485508ec6SSergey Senozhatsky 	kfree(zram);
1405cd67e10aSMinchan Kim 	return ret;
1406cd67e10aSMinchan Kim }
1407cd67e10aSMinchan Kim 
14086566d1a3SSergey Senozhatsky static int zram_remove(struct zram *zram)
1409cd67e10aSMinchan Kim {
14106566d1a3SSergey Senozhatsky 	struct block_device *bdev;
14116566d1a3SSergey Senozhatsky 
14126566d1a3SSergey Senozhatsky 	bdev = bdget_disk(zram->disk, 0);
14136566d1a3SSergey Senozhatsky 	if (!bdev)
14146566d1a3SSergey Senozhatsky 		return -ENOMEM;
14156566d1a3SSergey Senozhatsky 
14166566d1a3SSergey Senozhatsky 	mutex_lock(&bdev->bd_mutex);
14176566d1a3SSergey Senozhatsky 	if (bdev->bd_openers || zram->claim) {
14186566d1a3SSergey Senozhatsky 		mutex_unlock(&bdev->bd_mutex);
14196566d1a3SSergey Senozhatsky 		bdput(bdev);
14206566d1a3SSergey Senozhatsky 		return -EBUSY;
14216566d1a3SSergey Senozhatsky 	}
14226566d1a3SSergey Senozhatsky 
14236566d1a3SSergey Senozhatsky 	zram->claim = true;
14246566d1a3SSergey Senozhatsky 	mutex_unlock(&bdev->bd_mutex);
14256566d1a3SSergey Senozhatsky 
1426a096cafcSSergey Senozhatsky 	/*
1427a096cafcSSergey Senozhatsky 	 * Remove sysfs first, so no one will perform a disksize
14286566d1a3SSergey Senozhatsky 	 * store while we destroy the devices. This also helps during
14296566d1a3SSergey Senozhatsky 	 * hot_remove -- zram_reset_device() is the last holder of
14306566d1a3SSergey Senozhatsky 	 * ->init_lock, no later/concurrent disksize_store() or any
14316566d1a3SSergey Senozhatsky 	 * other sysfs handlers are possible.
1432a096cafcSSergey Senozhatsky 	 */
1433cd67e10aSMinchan Kim 	sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1434cd67e10aSMinchan Kim 			&zram_disk_attr_group);
1435cd67e10aSMinchan Kim 
14366566d1a3SSergey Senozhatsky 	/* Make sure all the pending I/O are finished */
14376566d1a3SSergey Senozhatsky 	fsync_bdev(bdev);
1438a096cafcSSergey Senozhatsky 	zram_reset_device(zram);
14396566d1a3SSergey Senozhatsky 	bdput(bdev);
14406566d1a3SSergey Senozhatsky 
14416566d1a3SSergey Senozhatsky 	pr_info("Removed device: %s\n", zram->disk->disk_name);
14426566d1a3SSergey Senozhatsky 
1443ee980160SSergey Senozhatsky 	blk_cleanup_queue(zram->disk->queue);
1444cd67e10aSMinchan Kim 	del_gendisk(zram->disk);
1445cd67e10aSMinchan Kim 	put_disk(zram->disk);
144685508ec6SSergey Senozhatsky 	kfree(zram);
14476566d1a3SSergey Senozhatsky 	return 0;
1448cd67e10aSMinchan Kim }
1449cd67e10aSMinchan Kim 
14506566d1a3SSergey Senozhatsky /* zram-control sysfs attributes */
145127104a53SGreg Kroah-Hartman 
145227104a53SGreg Kroah-Hartman /*
145327104a53SGreg Kroah-Hartman  * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
145427104a53SGreg Kroah-Hartman  * sense that reading from this file does alter the state of your system -- it
145527104a53SGreg Kroah-Hartman  * creates a new un-initialized zram device and returns back this device's
145627104a53SGreg Kroah-Hartman  * device_id (or an error code if it fails to create a new device).
145727104a53SGreg Kroah-Hartman  */
14586566d1a3SSergey Senozhatsky static ssize_t hot_add_show(struct class *class,
14596566d1a3SSergey Senozhatsky 			struct class_attribute *attr,
14606566d1a3SSergey Senozhatsky 			char *buf)
14616566d1a3SSergey Senozhatsky {
14626566d1a3SSergey Senozhatsky 	int ret;
14636566d1a3SSergey Senozhatsky 
14646566d1a3SSergey Senozhatsky 	mutex_lock(&zram_index_mutex);
14656566d1a3SSergey Senozhatsky 	ret = zram_add();
14666566d1a3SSergey Senozhatsky 	mutex_unlock(&zram_index_mutex);
14676566d1a3SSergey Senozhatsky 
14686566d1a3SSergey Senozhatsky 	if (ret < 0)
14696566d1a3SSergey Senozhatsky 		return ret;
14706566d1a3SSergey Senozhatsky 	return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
14716566d1a3SSergey Senozhatsky }
1472f40609d1SGreg Kroah-Hartman static CLASS_ATTR_RO(hot_add);
14736566d1a3SSergey Senozhatsky 
14746566d1a3SSergey Senozhatsky static ssize_t hot_remove_store(struct class *class,
14756566d1a3SSergey Senozhatsky 			struct class_attribute *attr,
14766566d1a3SSergey Senozhatsky 			const char *buf,
14776566d1a3SSergey Senozhatsky 			size_t count)
14786566d1a3SSergey Senozhatsky {
14796566d1a3SSergey Senozhatsky 	struct zram *zram;
14806566d1a3SSergey Senozhatsky 	int ret, dev_id;
14816566d1a3SSergey Senozhatsky 
14826566d1a3SSergey Senozhatsky 	/* dev_id is gendisk->first_minor, which is `int' */
14836566d1a3SSergey Senozhatsky 	ret = kstrtoint(buf, 10, &dev_id);
14846566d1a3SSergey Senozhatsky 	if (ret)
14856566d1a3SSergey Senozhatsky 		return ret;
14866566d1a3SSergey Senozhatsky 	if (dev_id < 0)
14876566d1a3SSergey Senozhatsky 		return -EINVAL;
14886566d1a3SSergey Senozhatsky 
14896566d1a3SSergey Senozhatsky 	mutex_lock(&zram_index_mutex);
14906566d1a3SSergey Senozhatsky 
14916566d1a3SSergey Senozhatsky 	zram = idr_find(&zram_index_idr, dev_id);
149217ec4cd9SJerome Marchand 	if (zram) {
14936566d1a3SSergey Senozhatsky 		ret = zram_remove(zram);
1494529e71e1STakashi Iwai 		if (!ret)
149517ec4cd9SJerome Marchand 			idr_remove(&zram_index_idr, dev_id);
149617ec4cd9SJerome Marchand 	} else {
14976566d1a3SSergey Senozhatsky 		ret = -ENODEV;
149817ec4cd9SJerome Marchand 	}
14996566d1a3SSergey Senozhatsky 
15006566d1a3SSergey Senozhatsky 	mutex_unlock(&zram_index_mutex);
15016566d1a3SSergey Senozhatsky 	return ret ? ret : count;
15026566d1a3SSergey Senozhatsky }
150327104a53SGreg Kroah-Hartman static CLASS_ATTR_WO(hot_remove);
15046566d1a3SSergey Senozhatsky 
150527104a53SGreg Kroah-Hartman static struct attribute *zram_control_class_attrs[] = {
150627104a53SGreg Kroah-Hartman 	&class_attr_hot_add.attr,
150727104a53SGreg Kroah-Hartman 	&class_attr_hot_remove.attr,
150827104a53SGreg Kroah-Hartman 	NULL,
15096566d1a3SSergey Senozhatsky };
151027104a53SGreg Kroah-Hartman ATTRIBUTE_GROUPS(zram_control_class);
15116566d1a3SSergey Senozhatsky 
15126566d1a3SSergey Senozhatsky static struct class zram_control_class = {
15136566d1a3SSergey Senozhatsky 	.name		= "zram-control",
15146566d1a3SSergey Senozhatsky 	.owner		= THIS_MODULE,
151527104a53SGreg Kroah-Hartman 	.class_groups	= zram_control_class_groups,
15166566d1a3SSergey Senozhatsky };
15176566d1a3SSergey Senozhatsky 
151885508ec6SSergey Senozhatsky static int zram_remove_cb(int id, void *ptr, void *data)
151985508ec6SSergey Senozhatsky {
152085508ec6SSergey Senozhatsky 	zram_remove(ptr);
152185508ec6SSergey Senozhatsky 	return 0;
152285508ec6SSergey Senozhatsky }
152385508ec6SSergey Senozhatsky 
152485508ec6SSergey Senozhatsky static void destroy_devices(void)
152585508ec6SSergey Senozhatsky {
15266566d1a3SSergey Senozhatsky 	class_unregister(&zram_control_class);
152785508ec6SSergey Senozhatsky 	idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
152885508ec6SSergey Senozhatsky 	idr_destroy(&zram_index_idr);
1529a096cafcSSergey Senozhatsky 	unregister_blkdev(zram_major, "zram");
15301dd6c834SAnna-Maria Gleixner 	cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1531a096cafcSSergey Senozhatsky }
1532a096cafcSSergey Senozhatsky 
1533cd67e10aSMinchan Kim static int __init zram_init(void)
1534cd67e10aSMinchan Kim {
153592ff1528SSergey Senozhatsky 	int ret;
1536cd67e10aSMinchan Kim 
15371dd6c834SAnna-Maria Gleixner 	ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
15381dd6c834SAnna-Maria Gleixner 				      zcomp_cpu_up_prepare, zcomp_cpu_dead);
15391dd6c834SAnna-Maria Gleixner 	if (ret < 0)
15401dd6c834SAnna-Maria Gleixner 		return ret;
15411dd6c834SAnna-Maria Gleixner 
15426566d1a3SSergey Senozhatsky 	ret = class_register(&zram_control_class);
15436566d1a3SSergey Senozhatsky 	if (ret) {
154470864969SSergey Senozhatsky 		pr_err("Unable to register zram-control class\n");
15451dd6c834SAnna-Maria Gleixner 		cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
15466566d1a3SSergey Senozhatsky 		return ret;
15476566d1a3SSergey Senozhatsky 	}
15486566d1a3SSergey Senozhatsky 
1549cd67e10aSMinchan Kim 	zram_major = register_blkdev(0, "zram");
1550cd67e10aSMinchan Kim 	if (zram_major <= 0) {
155170864969SSergey Senozhatsky 		pr_err("Unable to get major number\n");
15526566d1a3SSergey Senozhatsky 		class_unregister(&zram_control_class);
15531dd6c834SAnna-Maria Gleixner 		cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1554a096cafcSSergey Senozhatsky 		return -EBUSY;
1555cd67e10aSMinchan Kim 	}
1556cd67e10aSMinchan Kim 
155792ff1528SSergey Senozhatsky 	while (num_devices != 0) {
15586566d1a3SSergey Senozhatsky 		mutex_lock(&zram_index_mutex);
155992ff1528SSergey Senozhatsky 		ret = zram_add();
15606566d1a3SSergey Senozhatsky 		mutex_unlock(&zram_index_mutex);
156192ff1528SSergey Senozhatsky 		if (ret < 0)
1562a096cafcSSergey Senozhatsky 			goto out_error;
156392ff1528SSergey Senozhatsky 		num_devices--;
1564cd67e10aSMinchan Kim 	}
1565cd67e10aSMinchan Kim 
1566cd67e10aSMinchan Kim 	return 0;
1567cd67e10aSMinchan Kim 
1568a096cafcSSergey Senozhatsky out_error:
156985508ec6SSergey Senozhatsky 	destroy_devices();
1570cd67e10aSMinchan Kim 	return ret;
1571cd67e10aSMinchan Kim }
1572cd67e10aSMinchan Kim 
1573cd67e10aSMinchan Kim static void __exit zram_exit(void)
1574cd67e10aSMinchan Kim {
157585508ec6SSergey Senozhatsky 	destroy_devices();
1576cd67e10aSMinchan Kim }
1577cd67e10aSMinchan Kim 
1578cd67e10aSMinchan Kim module_init(zram_init);
1579cd67e10aSMinchan Kim module_exit(zram_exit);
1580cd67e10aSMinchan Kim 
1581cd67e10aSMinchan Kim module_param(num_devices, uint, 0);
1582c3cdb40eSSergey Senozhatsky MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
1583cd67e10aSMinchan Kim 
1584cd67e10aSMinchan Kim MODULE_LICENSE("Dual BSD/GPL");
1585cd67e10aSMinchan Kim MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1586cd67e10aSMinchan Kim MODULE_DESCRIPTION("Compressed RAM Block Device");
1587