xref: /openbmc/linux/drivers/block/zram/zram_drv.c (revision 93d90ad7)
1 /*
2  * Compressed RAM block device
3  *
4  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
5  *               2012, 2013 Minchan Kim
6  *
7  * This code is released using a dual license strategy: BSD/GPL
8  * You can choose the licence that better fits your requirements.
9  *
10  * Released under the terms of 3-clause BSD License
11  * Released under the terms of GNU General Public License Version 2.0
12  *
13  */
14 
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 
18 #ifdef CONFIG_ZRAM_DEBUG
19 #define DEBUG
20 #endif
21 
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/string.h>
33 #include <linux/vmalloc.h>
34 #include <linux/err.h>
35 
36 #include "zram_drv.h"
37 
38 /* Globals */
39 static int zram_major;
40 static struct zram *zram_devices;
41 static const char *default_compressor = "lzo";
42 
43 /* Module params (documentation at end) */
44 static unsigned int num_devices = 1;
45 
46 #define ZRAM_ATTR_RO(name)						\
47 static ssize_t name##_show(struct device *d,		\
48 				struct device_attribute *attr, char *b)	\
49 {									\
50 	struct zram *zram = dev_to_zram(d);				\
51 	return scnprintf(b, PAGE_SIZE, "%llu\n",			\
52 		(u64)atomic64_read(&zram->stats.name));			\
53 }									\
54 static DEVICE_ATTR_RO(name);
55 
56 static inline int init_done(struct zram *zram)
57 {
58 	return zram->meta != NULL;
59 }
60 
61 static inline struct zram *dev_to_zram(struct device *dev)
62 {
63 	return (struct zram *)dev_to_disk(dev)->private_data;
64 }
65 
66 static ssize_t disksize_show(struct device *dev,
67 		struct device_attribute *attr, char *buf)
68 {
69 	struct zram *zram = dev_to_zram(dev);
70 
71 	return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
72 }
73 
74 static ssize_t initstate_show(struct device *dev,
75 		struct device_attribute *attr, char *buf)
76 {
77 	u32 val;
78 	struct zram *zram = dev_to_zram(dev);
79 
80 	down_read(&zram->init_lock);
81 	val = init_done(zram);
82 	up_read(&zram->init_lock);
83 
84 	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
85 }
86 
87 static ssize_t orig_data_size_show(struct device *dev,
88 		struct device_attribute *attr, char *buf)
89 {
90 	struct zram *zram = dev_to_zram(dev);
91 
92 	return scnprintf(buf, PAGE_SIZE, "%llu\n",
93 		(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
94 }
95 
96 static ssize_t mem_used_total_show(struct device *dev,
97 		struct device_attribute *attr, char *buf)
98 {
99 	u64 val = 0;
100 	struct zram *zram = dev_to_zram(dev);
101 
102 	down_read(&zram->init_lock);
103 	if (init_done(zram)) {
104 		struct zram_meta *meta = zram->meta;
105 		val = zs_get_total_pages(meta->mem_pool);
106 	}
107 	up_read(&zram->init_lock);
108 
109 	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
110 }
111 
112 static ssize_t max_comp_streams_show(struct device *dev,
113 		struct device_attribute *attr, char *buf)
114 {
115 	int val;
116 	struct zram *zram = dev_to_zram(dev);
117 
118 	down_read(&zram->init_lock);
119 	val = zram->max_comp_streams;
120 	up_read(&zram->init_lock);
121 
122 	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
123 }
124 
125 static ssize_t mem_limit_show(struct device *dev,
126 		struct device_attribute *attr, char *buf)
127 {
128 	u64 val;
129 	struct zram *zram = dev_to_zram(dev);
130 
131 	down_read(&zram->init_lock);
132 	val = zram->limit_pages;
133 	up_read(&zram->init_lock);
134 
135 	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
136 }
137 
138 static ssize_t mem_limit_store(struct device *dev,
139 		struct device_attribute *attr, const char *buf, size_t len)
140 {
141 	u64 limit;
142 	char *tmp;
143 	struct zram *zram = dev_to_zram(dev);
144 
145 	limit = memparse(buf, &tmp);
146 	if (buf == tmp) /* no chars parsed, invalid input */
147 		return -EINVAL;
148 
149 	down_write(&zram->init_lock);
150 	zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
151 	up_write(&zram->init_lock);
152 
153 	return len;
154 }
155 
156 static ssize_t mem_used_max_show(struct device *dev,
157 		struct device_attribute *attr, char *buf)
158 {
159 	u64 val = 0;
160 	struct zram *zram = dev_to_zram(dev);
161 
162 	down_read(&zram->init_lock);
163 	if (init_done(zram))
164 		val = atomic_long_read(&zram->stats.max_used_pages);
165 	up_read(&zram->init_lock);
166 
167 	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
168 }
169 
170 static ssize_t mem_used_max_store(struct device *dev,
171 		struct device_attribute *attr, const char *buf, size_t len)
172 {
173 	int err;
174 	unsigned long val;
175 	struct zram *zram = dev_to_zram(dev);
176 
177 	err = kstrtoul(buf, 10, &val);
178 	if (err || val != 0)
179 		return -EINVAL;
180 
181 	down_read(&zram->init_lock);
182 	if (init_done(zram)) {
183 		struct zram_meta *meta = zram->meta;
184 		atomic_long_set(&zram->stats.max_used_pages,
185 				zs_get_total_pages(meta->mem_pool));
186 	}
187 	up_read(&zram->init_lock);
188 
189 	return len;
190 }
191 
192 static ssize_t max_comp_streams_store(struct device *dev,
193 		struct device_attribute *attr, const char *buf, size_t len)
194 {
195 	int num;
196 	struct zram *zram = dev_to_zram(dev);
197 	int ret;
198 
199 	ret = kstrtoint(buf, 0, &num);
200 	if (ret < 0)
201 		return ret;
202 	if (num < 1)
203 		return -EINVAL;
204 
205 	down_write(&zram->init_lock);
206 	if (init_done(zram)) {
207 		if (!zcomp_set_max_streams(zram->comp, num)) {
208 			pr_info("Cannot change max compression streams\n");
209 			ret = -EINVAL;
210 			goto out;
211 		}
212 	}
213 
214 	zram->max_comp_streams = num;
215 	ret = len;
216 out:
217 	up_write(&zram->init_lock);
218 	return ret;
219 }
220 
221 static ssize_t comp_algorithm_show(struct device *dev,
222 		struct device_attribute *attr, char *buf)
223 {
224 	size_t sz;
225 	struct zram *zram = dev_to_zram(dev);
226 
227 	down_read(&zram->init_lock);
228 	sz = zcomp_available_show(zram->compressor, buf);
229 	up_read(&zram->init_lock);
230 
231 	return sz;
232 }
233 
234 static ssize_t comp_algorithm_store(struct device *dev,
235 		struct device_attribute *attr, const char *buf, size_t len)
236 {
237 	struct zram *zram = dev_to_zram(dev);
238 	down_write(&zram->init_lock);
239 	if (init_done(zram)) {
240 		up_write(&zram->init_lock);
241 		pr_info("Can't change algorithm for initialized device\n");
242 		return -EBUSY;
243 	}
244 	strlcpy(zram->compressor, buf, sizeof(zram->compressor));
245 	up_write(&zram->init_lock);
246 	return len;
247 }
248 
249 /* flag operations needs meta->tb_lock */
250 static int zram_test_flag(struct zram_meta *meta, u32 index,
251 			enum zram_pageflags flag)
252 {
253 	return meta->table[index].value & BIT(flag);
254 }
255 
256 static void zram_set_flag(struct zram_meta *meta, u32 index,
257 			enum zram_pageflags flag)
258 {
259 	meta->table[index].value |= BIT(flag);
260 }
261 
262 static void zram_clear_flag(struct zram_meta *meta, u32 index,
263 			enum zram_pageflags flag)
264 {
265 	meta->table[index].value &= ~BIT(flag);
266 }
267 
268 static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
269 {
270 	return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
271 }
272 
273 static void zram_set_obj_size(struct zram_meta *meta,
274 					u32 index, size_t size)
275 {
276 	unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
277 
278 	meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
279 }
280 
281 static inline int is_partial_io(struct bio_vec *bvec)
282 {
283 	return bvec->bv_len != PAGE_SIZE;
284 }
285 
286 /*
287  * Check if request is within bounds and aligned on zram logical blocks.
288  */
289 static inline int valid_io_request(struct zram *zram,
290 		sector_t start, unsigned int size)
291 {
292 	u64 end, bound;
293 
294 	/* unaligned request */
295 	if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
296 		return 0;
297 	if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
298 		return 0;
299 
300 	end = start + (size >> SECTOR_SHIFT);
301 	bound = zram->disksize >> SECTOR_SHIFT;
302 	/* out of range range */
303 	if (unlikely(start >= bound || end > bound || start > end))
304 		return 0;
305 
306 	/* I/O request is valid */
307 	return 1;
308 }
309 
310 static void zram_meta_free(struct zram_meta *meta)
311 {
312 	zs_destroy_pool(meta->mem_pool);
313 	vfree(meta->table);
314 	kfree(meta);
315 }
316 
317 static struct zram_meta *zram_meta_alloc(u64 disksize)
318 {
319 	size_t num_pages;
320 	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
321 	if (!meta)
322 		goto out;
323 
324 	num_pages = disksize >> PAGE_SHIFT;
325 	meta->table = vzalloc(num_pages * sizeof(*meta->table));
326 	if (!meta->table) {
327 		pr_err("Error allocating zram address table\n");
328 		goto free_meta;
329 	}
330 
331 	meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
332 	if (!meta->mem_pool) {
333 		pr_err("Error creating memory pool\n");
334 		goto free_table;
335 	}
336 
337 	return meta;
338 
339 free_table:
340 	vfree(meta->table);
341 free_meta:
342 	kfree(meta);
343 	meta = NULL;
344 out:
345 	return meta;
346 }
347 
348 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
349 {
350 	if (*offset + bvec->bv_len >= PAGE_SIZE)
351 		(*index)++;
352 	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
353 }
354 
355 static int page_zero_filled(void *ptr)
356 {
357 	unsigned int pos;
358 	unsigned long *page;
359 
360 	page = (unsigned long *)ptr;
361 
362 	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
363 		if (page[pos])
364 			return 0;
365 	}
366 
367 	return 1;
368 }
369 
370 static void handle_zero_page(struct bio_vec *bvec)
371 {
372 	struct page *page = bvec->bv_page;
373 	void *user_mem;
374 
375 	user_mem = kmap_atomic(page);
376 	if (is_partial_io(bvec))
377 		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
378 	else
379 		clear_page(user_mem);
380 	kunmap_atomic(user_mem);
381 
382 	flush_dcache_page(page);
383 }
384 
385 
386 /*
387  * To protect concurrent access to the same index entry,
388  * caller should hold this table index entry's bit_spinlock to
389  * indicate this index entry is accessing.
390  */
391 static void zram_free_page(struct zram *zram, size_t index)
392 {
393 	struct zram_meta *meta = zram->meta;
394 	unsigned long handle = meta->table[index].handle;
395 
396 	if (unlikely(!handle)) {
397 		/*
398 		 * No memory is allocated for zero filled pages.
399 		 * Simply clear zero page flag.
400 		 */
401 		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
402 			zram_clear_flag(meta, index, ZRAM_ZERO);
403 			atomic64_dec(&zram->stats.zero_pages);
404 		}
405 		return;
406 	}
407 
408 	zs_free(meta->mem_pool, handle);
409 
410 	atomic64_sub(zram_get_obj_size(meta, index),
411 			&zram->stats.compr_data_size);
412 	atomic64_dec(&zram->stats.pages_stored);
413 
414 	meta->table[index].handle = 0;
415 	zram_set_obj_size(meta, index, 0);
416 }
417 
418 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
419 {
420 	int ret = 0;
421 	unsigned char *cmem;
422 	struct zram_meta *meta = zram->meta;
423 	unsigned long handle;
424 	size_t size;
425 
426 	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
427 	handle = meta->table[index].handle;
428 	size = zram_get_obj_size(meta, index);
429 
430 	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
431 		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
432 		clear_page(mem);
433 		return 0;
434 	}
435 
436 	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
437 	if (size == PAGE_SIZE)
438 		copy_page(mem, cmem);
439 	else
440 		ret = zcomp_decompress(zram->comp, cmem, size, mem);
441 	zs_unmap_object(meta->mem_pool, handle);
442 	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
443 
444 	/* Should NEVER happen. Return bio error if it does. */
445 	if (unlikely(ret)) {
446 		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
447 		return ret;
448 	}
449 
450 	return 0;
451 }
452 
453 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
454 			  u32 index, int offset)
455 {
456 	int ret;
457 	struct page *page;
458 	unsigned char *user_mem, *uncmem = NULL;
459 	struct zram_meta *meta = zram->meta;
460 	page = bvec->bv_page;
461 
462 	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
463 	if (unlikely(!meta->table[index].handle) ||
464 			zram_test_flag(meta, index, ZRAM_ZERO)) {
465 		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
466 		handle_zero_page(bvec);
467 		return 0;
468 	}
469 	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
470 
471 	if (is_partial_io(bvec))
472 		/* Use  a temporary buffer to decompress the page */
473 		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
474 
475 	user_mem = kmap_atomic(page);
476 	if (!is_partial_io(bvec))
477 		uncmem = user_mem;
478 
479 	if (!uncmem) {
480 		pr_info("Unable to allocate temp memory\n");
481 		ret = -ENOMEM;
482 		goto out_cleanup;
483 	}
484 
485 	ret = zram_decompress_page(zram, uncmem, index);
486 	/* Should NEVER happen. Return bio error if it does. */
487 	if (unlikely(ret))
488 		goto out_cleanup;
489 
490 	if (is_partial_io(bvec))
491 		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
492 				bvec->bv_len);
493 
494 	flush_dcache_page(page);
495 	ret = 0;
496 out_cleanup:
497 	kunmap_atomic(user_mem);
498 	if (is_partial_io(bvec))
499 		kfree(uncmem);
500 	return ret;
501 }
502 
503 static inline void update_used_max(struct zram *zram,
504 					const unsigned long pages)
505 {
506 	int old_max, cur_max;
507 
508 	old_max = atomic_long_read(&zram->stats.max_used_pages);
509 
510 	do {
511 		cur_max = old_max;
512 		if (pages > cur_max)
513 			old_max = atomic_long_cmpxchg(
514 				&zram->stats.max_used_pages, cur_max, pages);
515 	} while (old_max != cur_max);
516 }
517 
518 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
519 			   int offset)
520 {
521 	int ret = 0;
522 	size_t clen;
523 	unsigned long handle;
524 	struct page *page;
525 	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
526 	struct zram_meta *meta = zram->meta;
527 	struct zcomp_strm *zstrm;
528 	bool locked = false;
529 	unsigned long alloced_pages;
530 
531 	page = bvec->bv_page;
532 	if (is_partial_io(bvec)) {
533 		/*
534 		 * This is a partial IO. We need to read the full page
535 		 * before to write the changes.
536 		 */
537 		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
538 		if (!uncmem) {
539 			ret = -ENOMEM;
540 			goto out;
541 		}
542 		ret = zram_decompress_page(zram, uncmem, index);
543 		if (ret)
544 			goto out;
545 	}
546 
547 	zstrm = zcomp_strm_find(zram->comp);
548 	locked = true;
549 	user_mem = kmap_atomic(page);
550 
551 	if (is_partial_io(bvec)) {
552 		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
553 		       bvec->bv_len);
554 		kunmap_atomic(user_mem);
555 		user_mem = NULL;
556 	} else {
557 		uncmem = user_mem;
558 	}
559 
560 	if (page_zero_filled(uncmem)) {
561 		if (user_mem)
562 			kunmap_atomic(user_mem);
563 		/* Free memory associated with this sector now. */
564 		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
565 		zram_free_page(zram, index);
566 		zram_set_flag(meta, index, ZRAM_ZERO);
567 		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
568 
569 		atomic64_inc(&zram->stats.zero_pages);
570 		ret = 0;
571 		goto out;
572 	}
573 
574 	ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
575 	if (!is_partial_io(bvec)) {
576 		kunmap_atomic(user_mem);
577 		user_mem = NULL;
578 		uncmem = NULL;
579 	}
580 
581 	if (unlikely(ret)) {
582 		pr_err("Compression failed! err=%d\n", ret);
583 		goto out;
584 	}
585 	src = zstrm->buffer;
586 	if (unlikely(clen > max_zpage_size)) {
587 		clen = PAGE_SIZE;
588 		if (is_partial_io(bvec))
589 			src = uncmem;
590 	}
591 
592 	handle = zs_malloc(meta->mem_pool, clen);
593 	if (!handle) {
594 		pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
595 			index, clen);
596 		ret = -ENOMEM;
597 		goto out;
598 	}
599 
600 	alloced_pages = zs_get_total_pages(meta->mem_pool);
601 	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
602 		zs_free(meta->mem_pool, handle);
603 		ret = -ENOMEM;
604 		goto out;
605 	}
606 
607 	update_used_max(zram, alloced_pages);
608 
609 	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
610 
611 	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
612 		src = kmap_atomic(page);
613 		copy_page(cmem, src);
614 		kunmap_atomic(src);
615 	} else {
616 		memcpy(cmem, src, clen);
617 	}
618 
619 	zcomp_strm_release(zram->comp, zstrm);
620 	locked = false;
621 	zs_unmap_object(meta->mem_pool, handle);
622 
623 	/*
624 	 * Free memory associated with this sector
625 	 * before overwriting unused sectors.
626 	 */
627 	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
628 	zram_free_page(zram, index);
629 
630 	meta->table[index].handle = handle;
631 	zram_set_obj_size(meta, index, clen);
632 	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
633 
634 	/* Update stats */
635 	atomic64_add(clen, &zram->stats.compr_data_size);
636 	atomic64_inc(&zram->stats.pages_stored);
637 out:
638 	if (locked)
639 		zcomp_strm_release(zram->comp, zstrm);
640 	if (is_partial_io(bvec))
641 		kfree(uncmem);
642 	return ret;
643 }
644 
645 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
646 			int offset, int rw)
647 {
648 	int ret;
649 
650 	if (rw == READ) {
651 		atomic64_inc(&zram->stats.num_reads);
652 		ret = zram_bvec_read(zram, bvec, index, offset);
653 	} else {
654 		atomic64_inc(&zram->stats.num_writes);
655 		ret = zram_bvec_write(zram, bvec, index, offset);
656 	}
657 
658 	if (unlikely(ret)) {
659 		if (rw == READ)
660 			atomic64_inc(&zram->stats.failed_reads);
661 		else
662 			atomic64_inc(&zram->stats.failed_writes);
663 	}
664 
665 	return ret;
666 }
667 
668 /*
669  * zram_bio_discard - handler on discard request
670  * @index: physical block index in PAGE_SIZE units
671  * @offset: byte offset within physical block
672  */
673 static void zram_bio_discard(struct zram *zram, u32 index,
674 			     int offset, struct bio *bio)
675 {
676 	size_t n = bio->bi_iter.bi_size;
677 	struct zram_meta *meta = zram->meta;
678 
679 	/*
680 	 * zram manages data in physical block size units. Because logical block
681 	 * size isn't identical with physical block size on some arch, we
682 	 * could get a discard request pointing to a specific offset within a
683 	 * certain physical block.  Although we can handle this request by
684 	 * reading that physiclal block and decompressing and partially zeroing
685 	 * and re-compressing and then re-storing it, this isn't reasonable
686 	 * because our intent with a discard request is to save memory.  So
687 	 * skipping this logical block is appropriate here.
688 	 */
689 	if (offset) {
690 		if (n <= (PAGE_SIZE - offset))
691 			return;
692 
693 		n -= (PAGE_SIZE - offset);
694 		index++;
695 	}
696 
697 	while (n >= PAGE_SIZE) {
698 		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
699 		zram_free_page(zram, index);
700 		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
701 		atomic64_inc(&zram->stats.notify_free);
702 		index++;
703 		n -= PAGE_SIZE;
704 	}
705 }
706 
707 static void zram_reset_device(struct zram *zram, bool reset_capacity)
708 {
709 	size_t index;
710 	struct zram_meta *meta;
711 
712 	down_write(&zram->init_lock);
713 
714 	zram->limit_pages = 0;
715 
716 	if (!init_done(zram)) {
717 		up_write(&zram->init_lock);
718 		return;
719 	}
720 
721 	meta = zram->meta;
722 	/* Free all pages that are still in this zram device */
723 	for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
724 		unsigned long handle = meta->table[index].handle;
725 		if (!handle)
726 			continue;
727 
728 		zs_free(meta->mem_pool, handle);
729 	}
730 
731 	zcomp_destroy(zram->comp);
732 	zram->max_comp_streams = 1;
733 
734 	zram_meta_free(zram->meta);
735 	zram->meta = NULL;
736 	/* Reset stats */
737 	memset(&zram->stats, 0, sizeof(zram->stats));
738 
739 	zram->disksize = 0;
740 	if (reset_capacity)
741 		set_capacity(zram->disk, 0);
742 
743 	up_write(&zram->init_lock);
744 
745 	/*
746 	 * Revalidate disk out of the init_lock to avoid lockdep splat.
747 	 * It's okay because disk's capacity is protected by init_lock
748 	 * so that revalidate_disk always sees up-to-date capacity.
749 	 */
750 	if (reset_capacity)
751 		revalidate_disk(zram->disk);
752 }
753 
754 static ssize_t disksize_store(struct device *dev,
755 		struct device_attribute *attr, const char *buf, size_t len)
756 {
757 	u64 disksize;
758 	struct zcomp *comp;
759 	struct zram_meta *meta;
760 	struct zram *zram = dev_to_zram(dev);
761 	int err;
762 
763 	disksize = memparse(buf, NULL);
764 	if (!disksize)
765 		return -EINVAL;
766 
767 	disksize = PAGE_ALIGN(disksize);
768 	meta = zram_meta_alloc(disksize);
769 	if (!meta)
770 		return -ENOMEM;
771 
772 	comp = zcomp_create(zram->compressor, zram->max_comp_streams);
773 	if (IS_ERR(comp)) {
774 		pr_info("Cannot initialise %s compressing backend\n",
775 				zram->compressor);
776 		err = PTR_ERR(comp);
777 		goto out_free_meta;
778 	}
779 
780 	down_write(&zram->init_lock);
781 	if (init_done(zram)) {
782 		pr_info("Cannot change disksize for initialized device\n");
783 		err = -EBUSY;
784 		goto out_destroy_comp;
785 	}
786 
787 	zram->meta = meta;
788 	zram->comp = comp;
789 	zram->disksize = disksize;
790 	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
791 	up_write(&zram->init_lock);
792 
793 	/*
794 	 * Revalidate disk out of the init_lock to avoid lockdep splat.
795 	 * It's okay because disk's capacity is protected by init_lock
796 	 * so that revalidate_disk always sees up-to-date capacity.
797 	 */
798 	revalidate_disk(zram->disk);
799 
800 	return len;
801 
802 out_destroy_comp:
803 	up_write(&zram->init_lock);
804 	zcomp_destroy(comp);
805 out_free_meta:
806 	zram_meta_free(meta);
807 	return err;
808 }
809 
810 static ssize_t reset_store(struct device *dev,
811 		struct device_attribute *attr, const char *buf, size_t len)
812 {
813 	int ret;
814 	unsigned short do_reset;
815 	struct zram *zram;
816 	struct block_device *bdev;
817 
818 	zram = dev_to_zram(dev);
819 	bdev = bdget_disk(zram->disk, 0);
820 
821 	if (!bdev)
822 		return -ENOMEM;
823 
824 	/* Do not reset an active device! */
825 	if (bdev->bd_holders) {
826 		ret = -EBUSY;
827 		goto out;
828 	}
829 
830 	ret = kstrtou16(buf, 10, &do_reset);
831 	if (ret)
832 		goto out;
833 
834 	if (!do_reset) {
835 		ret = -EINVAL;
836 		goto out;
837 	}
838 
839 	/* Make sure all pending I/O is finished */
840 	fsync_bdev(bdev);
841 	bdput(bdev);
842 
843 	zram_reset_device(zram, true);
844 	return len;
845 
846 out:
847 	bdput(bdev);
848 	return ret;
849 }
850 
851 static void __zram_make_request(struct zram *zram, struct bio *bio)
852 {
853 	int offset, rw;
854 	u32 index;
855 	struct bio_vec bvec;
856 	struct bvec_iter iter;
857 
858 	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
859 	offset = (bio->bi_iter.bi_sector &
860 		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
861 
862 	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
863 		zram_bio_discard(zram, index, offset, bio);
864 		bio_endio(bio, 0);
865 		return;
866 	}
867 
868 	rw = bio_data_dir(bio);
869 	bio_for_each_segment(bvec, bio, iter) {
870 		int max_transfer_size = PAGE_SIZE - offset;
871 
872 		if (bvec.bv_len > max_transfer_size) {
873 			/*
874 			 * zram_bvec_rw() can only make operation on a single
875 			 * zram page. Split the bio vector.
876 			 */
877 			struct bio_vec bv;
878 
879 			bv.bv_page = bvec.bv_page;
880 			bv.bv_len = max_transfer_size;
881 			bv.bv_offset = bvec.bv_offset;
882 
883 			if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
884 				goto out;
885 
886 			bv.bv_len = bvec.bv_len - max_transfer_size;
887 			bv.bv_offset += max_transfer_size;
888 			if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
889 				goto out;
890 		} else
891 			if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
892 				goto out;
893 
894 		update_position(&index, &offset, &bvec);
895 	}
896 
897 	set_bit(BIO_UPTODATE, &bio->bi_flags);
898 	bio_endio(bio, 0);
899 	return;
900 
901 out:
902 	bio_io_error(bio);
903 }
904 
905 /*
906  * Handler function for all zram I/O requests.
907  */
908 static void zram_make_request(struct request_queue *queue, struct bio *bio)
909 {
910 	struct zram *zram = queue->queuedata;
911 
912 	down_read(&zram->init_lock);
913 	if (unlikely(!init_done(zram)))
914 		goto error;
915 
916 	if (!valid_io_request(zram, bio->bi_iter.bi_sector,
917 					bio->bi_iter.bi_size)) {
918 		atomic64_inc(&zram->stats.invalid_io);
919 		goto error;
920 	}
921 
922 	__zram_make_request(zram, bio);
923 	up_read(&zram->init_lock);
924 
925 	return;
926 
927 error:
928 	up_read(&zram->init_lock);
929 	bio_io_error(bio);
930 }
931 
932 static void zram_slot_free_notify(struct block_device *bdev,
933 				unsigned long index)
934 {
935 	struct zram *zram;
936 	struct zram_meta *meta;
937 
938 	zram = bdev->bd_disk->private_data;
939 	meta = zram->meta;
940 
941 	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
942 	zram_free_page(zram, index);
943 	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
944 	atomic64_inc(&zram->stats.notify_free);
945 }
946 
947 static int zram_rw_page(struct block_device *bdev, sector_t sector,
948 		       struct page *page, int rw)
949 {
950 	int offset, err;
951 	u32 index;
952 	struct zram *zram;
953 	struct bio_vec bv;
954 
955 	zram = bdev->bd_disk->private_data;
956 	if (!valid_io_request(zram, sector, PAGE_SIZE)) {
957 		atomic64_inc(&zram->stats.invalid_io);
958 		return -EINVAL;
959 	}
960 
961 	down_read(&zram->init_lock);
962 	if (unlikely(!init_done(zram))) {
963 		err = -EIO;
964 		goto out_unlock;
965 	}
966 
967 	index = sector >> SECTORS_PER_PAGE_SHIFT;
968 	offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
969 
970 	bv.bv_page = page;
971 	bv.bv_len = PAGE_SIZE;
972 	bv.bv_offset = 0;
973 
974 	err = zram_bvec_rw(zram, &bv, index, offset, rw);
975 out_unlock:
976 	up_read(&zram->init_lock);
977 	/*
978 	 * If I/O fails, just return error(ie, non-zero) without
979 	 * calling page_endio.
980 	 * It causes resubmit the I/O with bio request by upper functions
981 	 * of rw_page(e.g., swap_readpage, __swap_writepage) and
982 	 * bio->bi_end_io does things to handle the error
983 	 * (e.g., SetPageError, set_page_dirty and extra works).
984 	 */
985 	if (err == 0)
986 		page_endio(page, rw, 0);
987 	return err;
988 }
989 
990 static const struct block_device_operations zram_devops = {
991 	.swap_slot_free_notify = zram_slot_free_notify,
992 	.rw_page = zram_rw_page,
993 	.owner = THIS_MODULE
994 };
995 
996 static DEVICE_ATTR_RW(disksize);
997 static DEVICE_ATTR_RO(initstate);
998 static DEVICE_ATTR_WO(reset);
999 static DEVICE_ATTR_RO(orig_data_size);
1000 static DEVICE_ATTR_RO(mem_used_total);
1001 static DEVICE_ATTR_RW(mem_limit);
1002 static DEVICE_ATTR_RW(mem_used_max);
1003 static DEVICE_ATTR_RW(max_comp_streams);
1004 static DEVICE_ATTR_RW(comp_algorithm);
1005 
1006 ZRAM_ATTR_RO(num_reads);
1007 ZRAM_ATTR_RO(num_writes);
1008 ZRAM_ATTR_RO(failed_reads);
1009 ZRAM_ATTR_RO(failed_writes);
1010 ZRAM_ATTR_RO(invalid_io);
1011 ZRAM_ATTR_RO(notify_free);
1012 ZRAM_ATTR_RO(zero_pages);
1013 ZRAM_ATTR_RO(compr_data_size);
1014 
1015 static struct attribute *zram_disk_attrs[] = {
1016 	&dev_attr_disksize.attr,
1017 	&dev_attr_initstate.attr,
1018 	&dev_attr_reset.attr,
1019 	&dev_attr_num_reads.attr,
1020 	&dev_attr_num_writes.attr,
1021 	&dev_attr_failed_reads.attr,
1022 	&dev_attr_failed_writes.attr,
1023 	&dev_attr_invalid_io.attr,
1024 	&dev_attr_notify_free.attr,
1025 	&dev_attr_zero_pages.attr,
1026 	&dev_attr_orig_data_size.attr,
1027 	&dev_attr_compr_data_size.attr,
1028 	&dev_attr_mem_used_total.attr,
1029 	&dev_attr_mem_limit.attr,
1030 	&dev_attr_mem_used_max.attr,
1031 	&dev_attr_max_comp_streams.attr,
1032 	&dev_attr_comp_algorithm.attr,
1033 	NULL,
1034 };
1035 
1036 static struct attribute_group zram_disk_attr_group = {
1037 	.attrs = zram_disk_attrs,
1038 };
1039 
1040 static int create_device(struct zram *zram, int device_id)
1041 {
1042 	int ret = -ENOMEM;
1043 
1044 	init_rwsem(&zram->init_lock);
1045 
1046 	zram->queue = blk_alloc_queue(GFP_KERNEL);
1047 	if (!zram->queue) {
1048 		pr_err("Error allocating disk queue for device %d\n",
1049 			device_id);
1050 		goto out;
1051 	}
1052 
1053 	blk_queue_make_request(zram->queue, zram_make_request);
1054 	zram->queue->queuedata = zram;
1055 
1056 	 /* gendisk structure */
1057 	zram->disk = alloc_disk(1);
1058 	if (!zram->disk) {
1059 		pr_warn("Error allocating disk structure for device %d\n",
1060 			device_id);
1061 		goto out_free_queue;
1062 	}
1063 
1064 	zram->disk->major = zram_major;
1065 	zram->disk->first_minor = device_id;
1066 	zram->disk->fops = &zram_devops;
1067 	zram->disk->queue = zram->queue;
1068 	zram->disk->private_data = zram;
1069 	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1070 
1071 	/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1072 	set_capacity(zram->disk, 0);
1073 	/* zram devices sort of resembles non-rotational disks */
1074 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1075 	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1076 	/*
1077 	 * To ensure that we always get PAGE_SIZE aligned
1078 	 * and n*PAGE_SIZED sized I/O requests.
1079 	 */
1080 	blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1081 	blk_queue_logical_block_size(zram->disk->queue,
1082 					ZRAM_LOGICAL_BLOCK_SIZE);
1083 	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1084 	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1085 	zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1086 	zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
1087 	/*
1088 	 * zram_bio_discard() will clear all logical blocks if logical block
1089 	 * size is identical with physical block size(PAGE_SIZE). But if it is
1090 	 * different, we will skip discarding some parts of logical blocks in
1091 	 * the part of the request range which isn't aligned to physical block
1092 	 * size.  So we can't ensure that all discarded logical blocks are
1093 	 * zeroed.
1094 	 */
1095 	if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1096 		zram->disk->queue->limits.discard_zeroes_data = 1;
1097 	else
1098 		zram->disk->queue->limits.discard_zeroes_data = 0;
1099 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1100 
1101 	add_disk(zram->disk);
1102 
1103 	ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1104 				&zram_disk_attr_group);
1105 	if (ret < 0) {
1106 		pr_warn("Error creating sysfs group");
1107 		goto out_free_disk;
1108 	}
1109 	strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1110 	zram->meta = NULL;
1111 	zram->max_comp_streams = 1;
1112 	return 0;
1113 
1114 out_free_disk:
1115 	del_gendisk(zram->disk);
1116 	put_disk(zram->disk);
1117 out_free_queue:
1118 	blk_cleanup_queue(zram->queue);
1119 out:
1120 	return ret;
1121 }
1122 
1123 static void destroy_device(struct zram *zram)
1124 {
1125 	sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1126 			&zram_disk_attr_group);
1127 
1128 	del_gendisk(zram->disk);
1129 	put_disk(zram->disk);
1130 
1131 	blk_cleanup_queue(zram->queue);
1132 }
1133 
1134 static int __init zram_init(void)
1135 {
1136 	int ret, dev_id;
1137 
1138 	if (num_devices > max_num_devices) {
1139 		pr_warn("Invalid value for num_devices: %u\n",
1140 				num_devices);
1141 		ret = -EINVAL;
1142 		goto out;
1143 	}
1144 
1145 	zram_major = register_blkdev(0, "zram");
1146 	if (zram_major <= 0) {
1147 		pr_warn("Unable to get major number\n");
1148 		ret = -EBUSY;
1149 		goto out;
1150 	}
1151 
1152 	/* Allocate the device array and initialize each one */
1153 	zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
1154 	if (!zram_devices) {
1155 		ret = -ENOMEM;
1156 		goto unregister;
1157 	}
1158 
1159 	for (dev_id = 0; dev_id < num_devices; dev_id++) {
1160 		ret = create_device(&zram_devices[dev_id], dev_id);
1161 		if (ret)
1162 			goto free_devices;
1163 	}
1164 
1165 	pr_info("Created %u device(s) ...\n", num_devices);
1166 
1167 	return 0;
1168 
1169 free_devices:
1170 	while (dev_id)
1171 		destroy_device(&zram_devices[--dev_id]);
1172 	kfree(zram_devices);
1173 unregister:
1174 	unregister_blkdev(zram_major, "zram");
1175 out:
1176 	return ret;
1177 }
1178 
1179 static void __exit zram_exit(void)
1180 {
1181 	int i;
1182 	struct zram *zram;
1183 
1184 	for (i = 0; i < num_devices; i++) {
1185 		zram = &zram_devices[i];
1186 
1187 		destroy_device(zram);
1188 		/*
1189 		 * Shouldn't access zram->disk after destroy_device
1190 		 * because destroy_device already released zram->disk.
1191 		 */
1192 		zram_reset_device(zram, false);
1193 	}
1194 
1195 	unregister_blkdev(zram_major, "zram");
1196 
1197 	kfree(zram_devices);
1198 	pr_debug("Cleanup done!\n");
1199 }
1200 
1201 module_init(zram_init);
1202 module_exit(zram_exit);
1203 
1204 module_param(num_devices, uint, 0);
1205 MODULE_PARM_DESC(num_devices, "Number of zram devices");
1206 
1207 MODULE_LICENSE("Dual BSD/GPL");
1208 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1209 MODULE_DESCRIPTION("Compressed RAM Block Device");
1210