xref: /openbmc/linux/mm/zswap.c (revision ea7f03a4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * zswap.c - zswap driver file
4  *
5  * zswap is a cache that takes pages that are in the process
6  * of being swapped out and attempts to compress and store them in a
7  * RAM-based memory pool.  This can result in a significant I/O reduction on
8  * the swap device and, in the case where decompressing from RAM is faster
9  * than reading from the swap device, can also improve workload performance.
10  *
11  * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
12 */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/highmem.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/rbtree.h>
24 #include <linux/swap.h>
25 #include <linux/crypto.h>
26 #include <linux/scatterlist.h>
27 #include <linux/mempool.h>
28 #include <linux/zpool.h>
29 #include <crypto/acompress.h>
30 #include <linux/zswap.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
33 #include <linux/swapops.h>
34 #include <linux/writeback.h>
35 #include <linux/pagemap.h>
36 #include <linux/workqueue.h>
37 
38 #include "swap.h"
39 #include "internal.h"
40 
41 /*********************************
42 * statistics
43 **********************************/
44 /* Total bytes used by the compressed storage */
45 u64 zswap_pool_total_size;
46 /* The number of compressed pages currently stored in zswap */
47 atomic_t zswap_stored_pages = ATOMIC_INIT(0);
48 /* The number of same-value filled pages currently stored in zswap */
49 static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
50 
51 /*
52  * The statistics below are not protected from concurrent access for
53  * performance reasons so they may not be a 100% accurate.  However,
54  * they do provide useful information on roughly how many times a
55  * certain event is occurring.
56 */
57 
58 /* Pool limit was hit (see zswap_max_pool_percent) */
59 static u64 zswap_pool_limit_hit;
60 /* Pages written back when pool limit was reached */
61 static u64 zswap_written_back_pages;
62 /* Store failed due to a reclaim failure after pool limit was reached */
63 static u64 zswap_reject_reclaim_fail;
64 /* Compressed page was too big for the allocator to (optimally) store */
65 static u64 zswap_reject_compress_poor;
66 /* Store failed because underlying allocator could not get memory */
67 static u64 zswap_reject_alloc_fail;
68 /* Store failed because the entry metadata could not be allocated (rare) */
69 static u64 zswap_reject_kmemcache_fail;
70 /* Duplicate store was encountered (rare) */
71 static u64 zswap_duplicate_entry;
72 
73 /* Shrinker work queue */
74 static struct workqueue_struct *shrink_wq;
75 /* Pool limit was hit, we need to calm down */
76 static bool zswap_pool_reached_full;
77 
78 /*********************************
79 * tunables
80 **********************************/
81 
82 #define ZSWAP_PARAM_UNSET ""
83 
84 static int zswap_setup(void);
85 
86 /* Enable/disable zswap */
87 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
88 static int zswap_enabled_param_set(const char *,
89 				   const struct kernel_param *);
90 static const struct kernel_param_ops zswap_enabled_param_ops = {
91 	.set =		zswap_enabled_param_set,
92 	.get =		param_get_bool,
93 };
94 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
95 
96 /* Crypto compressor to use */
97 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
98 static int zswap_compressor_param_set(const char *,
99 				      const struct kernel_param *);
100 static const struct kernel_param_ops zswap_compressor_param_ops = {
101 	.set =		zswap_compressor_param_set,
102 	.get =		param_get_charp,
103 	.free =		param_free_charp,
104 };
105 module_param_cb(compressor, &zswap_compressor_param_ops,
106 		&zswap_compressor, 0644);
107 
108 /* Compressed storage zpool to use */
109 static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
110 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
111 static const struct kernel_param_ops zswap_zpool_param_ops = {
112 	.set =		zswap_zpool_param_set,
113 	.get =		param_get_charp,
114 	.free =		param_free_charp,
115 };
116 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
117 
118 /* The maximum percentage of memory that the compressed pool can occupy */
119 static unsigned int zswap_max_pool_percent = 20;
120 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
121 
122 /* The threshold for accepting new pages after the max_pool_percent was hit */
123 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
124 module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
125 		   uint, 0644);
126 
127 /*
128  * Enable/disable handling same-value filled pages (enabled by default).
129  * If disabled every page is considered non-same-value filled.
130  */
131 static bool zswap_same_filled_pages_enabled = true;
132 module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
133 		   bool, 0644);
134 
135 /* Enable/disable handling non-same-value filled pages (enabled by default) */
136 static bool zswap_non_same_filled_pages_enabled = true;
137 module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
138 		   bool, 0644);
139 
140 static bool zswap_exclusive_loads_enabled = IS_ENABLED(
141 		CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON);
142 module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
143 
144 /* Number of zpools in zswap_pool (empirically determined for scalability) */
145 #define ZSWAP_NR_ZPOOLS 32
146 
147 /*********************************
148 * data structures
149 **********************************/
150 
151 struct crypto_acomp_ctx {
152 	struct crypto_acomp *acomp;
153 	struct acomp_req *req;
154 	struct crypto_wait wait;
155 	u8 *dstmem;
156 	struct mutex *mutex;
157 };
158 
159 /*
160  * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
161  * The only case where lru_lock is not acquired while holding tree.lock is
162  * when a zswap_entry is taken off the lru for writeback, in that case it
163  * needs to be verified that it's still valid in the tree.
164  */
165 struct zswap_pool {
166 	struct zpool *zpools[ZSWAP_NR_ZPOOLS];
167 	struct crypto_acomp_ctx __percpu *acomp_ctx;
168 	struct kref kref;
169 	struct list_head list;
170 	struct work_struct release_work;
171 	struct work_struct shrink_work;
172 	struct hlist_node node;
173 	char tfm_name[CRYPTO_MAX_ALG_NAME];
174 	struct list_head lru;
175 	spinlock_t lru_lock;
176 };
177 
178 /*
179  * struct zswap_entry
180  *
181  * This structure contains the metadata for tracking a single compressed
182  * page within zswap.
183  *
184  * rbnode - links the entry into red-black tree for the appropriate swap type
185  * offset - the swap offset for the entry.  Index into the red-black tree.
186  * refcount - the number of outstanding reference to the entry. This is needed
187  *            to protect against premature freeing of the entry by code
188  *            concurrent calls to load, invalidate, and writeback.  The lock
189  *            for the zswap_tree structure that contains the entry must
190  *            be held while changing the refcount.  Since the lock must
191  *            be held, there is no reason to also make refcount atomic.
192  * length - the length in bytes of the compressed page data.  Needed during
193  *          decompression. For a same value filled page length is 0, and both
194  *          pool and lru are invalid and must be ignored.
195  * pool - the zswap_pool the entry's data is in
196  * handle - zpool allocation handle that stores the compressed page data
197  * value - value of the same-value filled pages which have same content
198  * lru - handle to the pool's lru used to evict pages.
199  */
200 struct zswap_entry {
201 	struct rb_node rbnode;
202 	swp_entry_t swpentry;
203 	int refcount;
204 	unsigned int length;
205 	struct zswap_pool *pool;
206 	union {
207 		unsigned long handle;
208 		unsigned long value;
209 	};
210 	struct obj_cgroup *objcg;
211 	struct list_head lru;
212 };
213 
214 /*
215  * The tree lock in the zswap_tree struct protects a few things:
216  * - the rbtree
217  * - the refcount field of each entry in the tree
218  */
219 struct zswap_tree {
220 	struct rb_root rbroot;
221 	spinlock_t lock;
222 };
223 
224 static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
225 
226 /* RCU-protected iteration */
227 static LIST_HEAD(zswap_pools);
228 /* protects zswap_pools list modification */
229 static DEFINE_SPINLOCK(zswap_pools_lock);
230 /* pool counter to provide unique names to zpool */
231 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
232 
233 enum zswap_init_type {
234 	ZSWAP_UNINIT,
235 	ZSWAP_INIT_SUCCEED,
236 	ZSWAP_INIT_FAILED
237 };
238 
239 static enum zswap_init_type zswap_init_state;
240 
241 /* used to ensure the integrity of initialization */
242 static DEFINE_MUTEX(zswap_init_lock);
243 
244 /* init completed, but couldn't create the initial pool */
245 static bool zswap_has_pool;
246 
247 /*********************************
248 * helpers and fwd declarations
249 **********************************/
250 
251 #define zswap_pool_debug(msg, p)				\
252 	pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,		\
253 		 zpool_get_type((p)->zpools[0]))
254 
255 static int zswap_writeback_entry(struct zswap_entry *entry,
256 				 struct zswap_tree *tree);
257 static int zswap_pool_get(struct zswap_pool *pool);
258 static void zswap_pool_put(struct zswap_pool *pool);
259 
260 static bool zswap_is_full(void)
261 {
262 	return totalram_pages() * zswap_max_pool_percent / 100 <
263 			DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
264 }
265 
266 static bool zswap_can_accept(void)
267 {
268 	return totalram_pages() * zswap_accept_thr_percent / 100 *
269 				zswap_max_pool_percent / 100 >
270 			DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
271 }
272 
273 static void zswap_update_total_size(void)
274 {
275 	struct zswap_pool *pool;
276 	u64 total = 0;
277 	int i;
278 
279 	rcu_read_lock();
280 
281 	list_for_each_entry_rcu(pool, &zswap_pools, list)
282 		for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
283 			total += zpool_get_total_size(pool->zpools[i]);
284 
285 	rcu_read_unlock();
286 
287 	zswap_pool_total_size = total;
288 }
289 
290 /*********************************
291 * zswap entry functions
292 **********************************/
293 static struct kmem_cache *zswap_entry_cache;
294 
295 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
296 {
297 	struct zswap_entry *entry;
298 	entry = kmem_cache_alloc(zswap_entry_cache, gfp);
299 	if (!entry)
300 		return NULL;
301 	entry->refcount = 1;
302 	RB_CLEAR_NODE(&entry->rbnode);
303 	return entry;
304 }
305 
306 static void zswap_entry_cache_free(struct zswap_entry *entry)
307 {
308 	kmem_cache_free(zswap_entry_cache, entry);
309 }
310 
311 /*********************************
312 * rbtree functions
313 **********************************/
314 static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
315 {
316 	struct rb_node *node = root->rb_node;
317 	struct zswap_entry *entry;
318 	pgoff_t entry_offset;
319 
320 	while (node) {
321 		entry = rb_entry(node, struct zswap_entry, rbnode);
322 		entry_offset = swp_offset(entry->swpentry);
323 		if (entry_offset > offset)
324 			node = node->rb_left;
325 		else if (entry_offset < offset)
326 			node = node->rb_right;
327 		else
328 			return entry;
329 	}
330 	return NULL;
331 }
332 
333 /*
334  * In the case that a entry with the same offset is found, a pointer to
335  * the existing entry is stored in dupentry and the function returns -EEXIST
336  */
337 static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
338 			struct zswap_entry **dupentry)
339 {
340 	struct rb_node **link = &root->rb_node, *parent = NULL;
341 	struct zswap_entry *myentry;
342 	pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
343 
344 	while (*link) {
345 		parent = *link;
346 		myentry = rb_entry(parent, struct zswap_entry, rbnode);
347 		myentry_offset = swp_offset(myentry->swpentry);
348 		if (myentry_offset > entry_offset)
349 			link = &(*link)->rb_left;
350 		else if (myentry_offset < entry_offset)
351 			link = &(*link)->rb_right;
352 		else {
353 			*dupentry = myentry;
354 			return -EEXIST;
355 		}
356 	}
357 	rb_link_node(&entry->rbnode, parent, link);
358 	rb_insert_color(&entry->rbnode, root);
359 	return 0;
360 }
361 
362 static bool zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
363 {
364 	if (!RB_EMPTY_NODE(&entry->rbnode)) {
365 		rb_erase(&entry->rbnode, root);
366 		RB_CLEAR_NODE(&entry->rbnode);
367 		return true;
368 	}
369 	return false;
370 }
371 
372 static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
373 {
374 	int i = 0;
375 
376 	if (ZSWAP_NR_ZPOOLS > 1)
377 		i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS));
378 
379 	return entry->pool->zpools[i];
380 }
381 
382 /*
383  * Carries out the common pattern of freeing and entry's zpool allocation,
384  * freeing the entry itself, and decrementing the number of stored pages.
385  */
386 static void zswap_free_entry(struct zswap_entry *entry)
387 {
388 	if (entry->objcg) {
389 		obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
390 		obj_cgroup_put(entry->objcg);
391 	}
392 	if (!entry->length)
393 		atomic_dec(&zswap_same_filled_pages);
394 	else {
395 		spin_lock(&entry->pool->lru_lock);
396 		list_del(&entry->lru);
397 		spin_unlock(&entry->pool->lru_lock);
398 		zpool_free(zswap_find_zpool(entry), entry->handle);
399 		zswap_pool_put(entry->pool);
400 	}
401 	zswap_entry_cache_free(entry);
402 	atomic_dec(&zswap_stored_pages);
403 	zswap_update_total_size();
404 }
405 
406 /* caller must hold the tree lock */
407 static void zswap_entry_get(struct zswap_entry *entry)
408 {
409 	entry->refcount++;
410 }
411 
412 /* caller must hold the tree lock
413 * remove from the tree and free it, if nobody reference the entry
414 */
415 static void zswap_entry_put(struct zswap_tree *tree,
416 			struct zswap_entry *entry)
417 {
418 	int refcount = --entry->refcount;
419 
420 	WARN_ON_ONCE(refcount < 0);
421 	if (refcount == 0) {
422 		WARN_ON_ONCE(!RB_EMPTY_NODE(&entry->rbnode));
423 		zswap_free_entry(entry);
424 	}
425 }
426 
427 /* caller must hold the tree lock */
428 static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
429 				pgoff_t offset)
430 {
431 	struct zswap_entry *entry;
432 
433 	entry = zswap_rb_search(root, offset);
434 	if (entry)
435 		zswap_entry_get(entry);
436 
437 	return entry;
438 }
439 
440 /*********************************
441 * per-cpu code
442 **********************************/
443 static DEFINE_PER_CPU(u8 *, zswap_dstmem);
444 /*
445  * If users dynamically change the zpool type and compressor at runtime, i.e.
446  * zswap is running, zswap can have more than one zpool on one cpu, but they
447  * are sharing dtsmem. So we need this mutex to be per-cpu.
448  */
449 static DEFINE_PER_CPU(struct mutex *, zswap_mutex);
450 
451 static int zswap_dstmem_prepare(unsigned int cpu)
452 {
453 	struct mutex *mutex;
454 	u8 *dst;
455 
456 	dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
457 	if (!dst)
458 		return -ENOMEM;
459 
460 	mutex = kmalloc_node(sizeof(*mutex), GFP_KERNEL, cpu_to_node(cpu));
461 	if (!mutex) {
462 		kfree(dst);
463 		return -ENOMEM;
464 	}
465 
466 	mutex_init(mutex);
467 	per_cpu(zswap_dstmem, cpu) = dst;
468 	per_cpu(zswap_mutex, cpu) = mutex;
469 	return 0;
470 }
471 
472 static int zswap_dstmem_dead(unsigned int cpu)
473 {
474 	struct mutex *mutex;
475 	u8 *dst;
476 
477 	mutex = per_cpu(zswap_mutex, cpu);
478 	kfree(mutex);
479 	per_cpu(zswap_mutex, cpu) = NULL;
480 
481 	dst = per_cpu(zswap_dstmem, cpu);
482 	kfree(dst);
483 	per_cpu(zswap_dstmem, cpu) = NULL;
484 
485 	return 0;
486 }
487 
488 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
489 {
490 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
491 	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
492 	struct crypto_acomp *acomp;
493 	struct acomp_req *req;
494 
495 	acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
496 	if (IS_ERR(acomp)) {
497 		pr_err("could not alloc crypto acomp %s : %ld\n",
498 				pool->tfm_name, PTR_ERR(acomp));
499 		return PTR_ERR(acomp);
500 	}
501 	acomp_ctx->acomp = acomp;
502 
503 	req = acomp_request_alloc(acomp_ctx->acomp);
504 	if (!req) {
505 		pr_err("could not alloc crypto acomp_request %s\n",
506 		       pool->tfm_name);
507 		crypto_free_acomp(acomp_ctx->acomp);
508 		return -ENOMEM;
509 	}
510 	acomp_ctx->req = req;
511 
512 	crypto_init_wait(&acomp_ctx->wait);
513 	/*
514 	 * if the backend of acomp is async zip, crypto_req_done() will wakeup
515 	 * crypto_wait_req(); if the backend of acomp is scomp, the callback
516 	 * won't be called, crypto_wait_req() will return without blocking.
517 	 */
518 	acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
519 				   crypto_req_done, &acomp_ctx->wait);
520 
521 	acomp_ctx->mutex = per_cpu(zswap_mutex, cpu);
522 	acomp_ctx->dstmem = per_cpu(zswap_dstmem, cpu);
523 
524 	return 0;
525 }
526 
527 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
528 {
529 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
530 	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
531 
532 	if (!IS_ERR_OR_NULL(acomp_ctx)) {
533 		if (!IS_ERR_OR_NULL(acomp_ctx->req))
534 			acomp_request_free(acomp_ctx->req);
535 		if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
536 			crypto_free_acomp(acomp_ctx->acomp);
537 	}
538 
539 	return 0;
540 }
541 
542 /*********************************
543 * pool functions
544 **********************************/
545 
546 static struct zswap_pool *__zswap_pool_current(void)
547 {
548 	struct zswap_pool *pool;
549 
550 	pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
551 	WARN_ONCE(!pool && zswap_has_pool,
552 		  "%s: no page storage pool!\n", __func__);
553 
554 	return pool;
555 }
556 
557 static struct zswap_pool *zswap_pool_current(void)
558 {
559 	assert_spin_locked(&zswap_pools_lock);
560 
561 	return __zswap_pool_current();
562 }
563 
564 static struct zswap_pool *zswap_pool_current_get(void)
565 {
566 	struct zswap_pool *pool;
567 
568 	rcu_read_lock();
569 
570 	pool = __zswap_pool_current();
571 	if (!zswap_pool_get(pool))
572 		pool = NULL;
573 
574 	rcu_read_unlock();
575 
576 	return pool;
577 }
578 
579 static struct zswap_pool *zswap_pool_last_get(void)
580 {
581 	struct zswap_pool *pool, *last = NULL;
582 
583 	rcu_read_lock();
584 
585 	list_for_each_entry_rcu(pool, &zswap_pools, list)
586 		last = pool;
587 	WARN_ONCE(!last && zswap_has_pool,
588 		  "%s: no page storage pool!\n", __func__);
589 	if (!zswap_pool_get(last))
590 		last = NULL;
591 
592 	rcu_read_unlock();
593 
594 	return last;
595 }
596 
597 /* type and compressor must be null-terminated */
598 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
599 {
600 	struct zswap_pool *pool;
601 
602 	assert_spin_locked(&zswap_pools_lock);
603 
604 	list_for_each_entry_rcu(pool, &zswap_pools, list) {
605 		if (strcmp(pool->tfm_name, compressor))
606 			continue;
607 		/* all zpools share the same type */
608 		if (strcmp(zpool_get_type(pool->zpools[0]), type))
609 			continue;
610 		/* if we can't get it, it's about to be destroyed */
611 		if (!zswap_pool_get(pool))
612 			continue;
613 		return pool;
614 	}
615 
616 	return NULL;
617 }
618 
619 /*
620  * If the entry is still valid in the tree, drop the initial ref and remove it
621  * from the tree. This function must be called with an additional ref held,
622  * otherwise it may race with another invalidation freeing the entry.
623  */
624 static void zswap_invalidate_entry(struct zswap_tree *tree,
625 				   struct zswap_entry *entry)
626 {
627 	if (zswap_rb_erase(&tree->rbroot, entry))
628 		zswap_entry_put(tree, entry);
629 }
630 
631 static int zswap_reclaim_entry(struct zswap_pool *pool)
632 {
633 	struct zswap_entry *entry;
634 	struct zswap_tree *tree;
635 	pgoff_t swpoffset;
636 	int ret;
637 
638 	/* Get an entry off the LRU */
639 	spin_lock(&pool->lru_lock);
640 	if (list_empty(&pool->lru)) {
641 		spin_unlock(&pool->lru_lock);
642 		return -EINVAL;
643 	}
644 	entry = list_last_entry(&pool->lru, struct zswap_entry, lru);
645 	list_del_init(&entry->lru);
646 	/*
647 	 * Once the lru lock is dropped, the entry might get freed. The
648 	 * swpoffset is copied to the stack, and entry isn't deref'd again
649 	 * until the entry is verified to still be alive in the tree.
650 	 */
651 	swpoffset = swp_offset(entry->swpentry);
652 	tree = zswap_trees[swp_type(entry->swpentry)];
653 	spin_unlock(&pool->lru_lock);
654 
655 	/* Check for invalidate() race */
656 	spin_lock(&tree->lock);
657 	if (entry != zswap_rb_search(&tree->rbroot, swpoffset)) {
658 		ret = -EAGAIN;
659 		goto unlock;
660 	}
661 	/* Hold a reference to prevent a free during writeback */
662 	zswap_entry_get(entry);
663 	spin_unlock(&tree->lock);
664 
665 	ret = zswap_writeback_entry(entry, tree);
666 
667 	spin_lock(&tree->lock);
668 	if (ret) {
669 		/* Writeback failed, put entry back on LRU */
670 		spin_lock(&pool->lru_lock);
671 		list_move(&entry->lru, &pool->lru);
672 		spin_unlock(&pool->lru_lock);
673 		goto put_unlock;
674 	}
675 
676 	/*
677 	 * Writeback started successfully, the page now belongs to the
678 	 * swapcache. Drop the entry from zswap - unless invalidate already
679 	 * took it out while we had the tree->lock released for IO.
680 	 */
681 	zswap_invalidate_entry(tree, entry);
682 
683 put_unlock:
684 	/* Drop local reference */
685 	zswap_entry_put(tree, entry);
686 unlock:
687 	spin_unlock(&tree->lock);
688 	return ret ? -EAGAIN : 0;
689 }
690 
691 static void shrink_worker(struct work_struct *w)
692 {
693 	struct zswap_pool *pool = container_of(w, typeof(*pool),
694 						shrink_work);
695 	int ret, failures = 0;
696 
697 	do {
698 		ret = zswap_reclaim_entry(pool);
699 		if (ret) {
700 			zswap_reject_reclaim_fail++;
701 			if (ret != -EAGAIN)
702 				break;
703 			if (++failures == MAX_RECLAIM_RETRIES)
704 				break;
705 		}
706 		cond_resched();
707 	} while (!zswap_can_accept());
708 	zswap_pool_put(pool);
709 }
710 
711 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
712 {
713 	int i;
714 	struct zswap_pool *pool;
715 	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
716 	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
717 	int ret;
718 
719 	if (!zswap_has_pool) {
720 		/* if either are unset, pool initialization failed, and we
721 		 * need both params to be set correctly before trying to
722 		 * create a pool.
723 		 */
724 		if (!strcmp(type, ZSWAP_PARAM_UNSET))
725 			return NULL;
726 		if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
727 			return NULL;
728 	}
729 
730 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
731 	if (!pool)
732 		return NULL;
733 
734 	for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
735 		/* unique name for each pool specifically required by zsmalloc */
736 		snprintf(name, 38, "zswap%x",
737 			 atomic_inc_return(&zswap_pools_count));
738 
739 		pool->zpools[i] = zpool_create_pool(type, name, gfp);
740 		if (!pool->zpools[i]) {
741 			pr_err("%s zpool not available\n", type);
742 			goto error;
743 		}
744 	}
745 	pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
746 
747 	strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
748 
749 	pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
750 	if (!pool->acomp_ctx) {
751 		pr_err("percpu alloc failed\n");
752 		goto error;
753 	}
754 
755 	ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
756 				       &pool->node);
757 	if (ret)
758 		goto error;
759 	pr_debug("using %s compressor\n", pool->tfm_name);
760 
761 	/* being the current pool takes 1 ref; this func expects the
762 	 * caller to always add the new pool as the current pool
763 	 */
764 	kref_init(&pool->kref);
765 	INIT_LIST_HEAD(&pool->list);
766 	INIT_LIST_HEAD(&pool->lru);
767 	spin_lock_init(&pool->lru_lock);
768 	INIT_WORK(&pool->shrink_work, shrink_worker);
769 
770 	zswap_pool_debug("created", pool);
771 
772 	return pool;
773 
774 error:
775 	if (pool->acomp_ctx)
776 		free_percpu(pool->acomp_ctx);
777 	while (i--)
778 		zpool_destroy_pool(pool->zpools[i]);
779 	kfree(pool);
780 	return NULL;
781 }
782 
783 static struct zswap_pool *__zswap_pool_create_fallback(void)
784 {
785 	bool has_comp, has_zpool;
786 
787 	has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
788 	if (!has_comp && strcmp(zswap_compressor,
789 				CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
790 		pr_err("compressor %s not available, using default %s\n",
791 		       zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
792 		param_free_charp(&zswap_compressor);
793 		zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
794 		has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
795 	}
796 	if (!has_comp) {
797 		pr_err("default compressor %s not available\n",
798 		       zswap_compressor);
799 		param_free_charp(&zswap_compressor);
800 		zswap_compressor = ZSWAP_PARAM_UNSET;
801 	}
802 
803 	has_zpool = zpool_has_pool(zswap_zpool_type);
804 	if (!has_zpool && strcmp(zswap_zpool_type,
805 				 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
806 		pr_err("zpool %s not available, using default %s\n",
807 		       zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
808 		param_free_charp(&zswap_zpool_type);
809 		zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
810 		has_zpool = zpool_has_pool(zswap_zpool_type);
811 	}
812 	if (!has_zpool) {
813 		pr_err("default zpool %s not available\n",
814 		       zswap_zpool_type);
815 		param_free_charp(&zswap_zpool_type);
816 		zswap_zpool_type = ZSWAP_PARAM_UNSET;
817 	}
818 
819 	if (!has_comp || !has_zpool)
820 		return NULL;
821 
822 	return zswap_pool_create(zswap_zpool_type, zswap_compressor);
823 }
824 
825 static void zswap_pool_destroy(struct zswap_pool *pool)
826 {
827 	int i;
828 
829 	zswap_pool_debug("destroying", pool);
830 
831 	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
832 	free_percpu(pool->acomp_ctx);
833 	for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
834 		zpool_destroy_pool(pool->zpools[i]);
835 	kfree(pool);
836 }
837 
838 static int __must_check zswap_pool_get(struct zswap_pool *pool)
839 {
840 	if (!pool)
841 		return 0;
842 
843 	return kref_get_unless_zero(&pool->kref);
844 }
845 
846 static void __zswap_pool_release(struct work_struct *work)
847 {
848 	struct zswap_pool *pool = container_of(work, typeof(*pool),
849 						release_work);
850 
851 	synchronize_rcu();
852 
853 	/* nobody should have been able to get a kref... */
854 	WARN_ON(kref_get_unless_zero(&pool->kref));
855 
856 	/* pool is now off zswap_pools list and has no references. */
857 	zswap_pool_destroy(pool);
858 }
859 
860 static void __zswap_pool_empty(struct kref *kref)
861 {
862 	struct zswap_pool *pool;
863 
864 	pool = container_of(kref, typeof(*pool), kref);
865 
866 	spin_lock(&zswap_pools_lock);
867 
868 	WARN_ON(pool == zswap_pool_current());
869 
870 	list_del_rcu(&pool->list);
871 
872 	INIT_WORK(&pool->release_work, __zswap_pool_release);
873 	schedule_work(&pool->release_work);
874 
875 	spin_unlock(&zswap_pools_lock);
876 }
877 
878 static void zswap_pool_put(struct zswap_pool *pool)
879 {
880 	kref_put(&pool->kref, __zswap_pool_empty);
881 }
882 
883 /*********************************
884 * param callbacks
885 **********************************/
886 
887 static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
888 {
889 	/* no change required */
890 	if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
891 		return false;
892 	return true;
893 }
894 
895 /* val must be a null-terminated string */
896 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
897 			     char *type, char *compressor)
898 {
899 	struct zswap_pool *pool, *put_pool = NULL;
900 	char *s = strstrip((char *)val);
901 	int ret = 0;
902 	bool new_pool = false;
903 
904 	mutex_lock(&zswap_init_lock);
905 	switch (zswap_init_state) {
906 	case ZSWAP_UNINIT:
907 		/* if this is load-time (pre-init) param setting,
908 		 * don't create a pool; that's done during init.
909 		 */
910 		ret = param_set_charp(s, kp);
911 		break;
912 	case ZSWAP_INIT_SUCCEED:
913 		new_pool = zswap_pool_changed(s, kp);
914 		break;
915 	case ZSWAP_INIT_FAILED:
916 		pr_err("can't set param, initialization failed\n");
917 		ret = -ENODEV;
918 	}
919 	mutex_unlock(&zswap_init_lock);
920 
921 	/* no need to create a new pool, return directly */
922 	if (!new_pool)
923 		return ret;
924 
925 	if (!type) {
926 		if (!zpool_has_pool(s)) {
927 			pr_err("zpool %s not available\n", s);
928 			return -ENOENT;
929 		}
930 		type = s;
931 	} else if (!compressor) {
932 		if (!crypto_has_acomp(s, 0, 0)) {
933 			pr_err("compressor %s not available\n", s);
934 			return -ENOENT;
935 		}
936 		compressor = s;
937 	} else {
938 		WARN_ON(1);
939 		return -EINVAL;
940 	}
941 
942 	spin_lock(&zswap_pools_lock);
943 
944 	pool = zswap_pool_find_get(type, compressor);
945 	if (pool) {
946 		zswap_pool_debug("using existing", pool);
947 		WARN_ON(pool == zswap_pool_current());
948 		list_del_rcu(&pool->list);
949 	}
950 
951 	spin_unlock(&zswap_pools_lock);
952 
953 	if (!pool)
954 		pool = zswap_pool_create(type, compressor);
955 
956 	if (pool)
957 		ret = param_set_charp(s, kp);
958 	else
959 		ret = -EINVAL;
960 
961 	spin_lock(&zswap_pools_lock);
962 
963 	if (!ret) {
964 		put_pool = zswap_pool_current();
965 		list_add_rcu(&pool->list, &zswap_pools);
966 		zswap_has_pool = true;
967 	} else if (pool) {
968 		/* add the possibly pre-existing pool to the end of the pools
969 		 * list; if it's new (and empty) then it'll be removed and
970 		 * destroyed by the put after we drop the lock
971 		 */
972 		list_add_tail_rcu(&pool->list, &zswap_pools);
973 		put_pool = pool;
974 	}
975 
976 	spin_unlock(&zswap_pools_lock);
977 
978 	if (!zswap_has_pool && !pool) {
979 		/* if initial pool creation failed, and this pool creation also
980 		 * failed, maybe both compressor and zpool params were bad.
981 		 * Allow changing this param, so pool creation will succeed
982 		 * when the other param is changed. We already verified this
983 		 * param is ok in the zpool_has_pool() or crypto_has_acomp()
984 		 * checks above.
985 		 */
986 		ret = param_set_charp(s, kp);
987 	}
988 
989 	/* drop the ref from either the old current pool,
990 	 * or the new pool we failed to add
991 	 */
992 	if (put_pool)
993 		zswap_pool_put(put_pool);
994 
995 	return ret;
996 }
997 
998 static int zswap_compressor_param_set(const char *val,
999 				      const struct kernel_param *kp)
1000 {
1001 	return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
1002 }
1003 
1004 static int zswap_zpool_param_set(const char *val,
1005 				 const struct kernel_param *kp)
1006 {
1007 	return __zswap_param_set(val, kp, NULL, zswap_compressor);
1008 }
1009 
1010 static int zswap_enabled_param_set(const char *val,
1011 				   const struct kernel_param *kp)
1012 {
1013 	int ret = -ENODEV;
1014 
1015 	/* if this is load-time (pre-init) param setting, only set param. */
1016 	if (system_state != SYSTEM_RUNNING)
1017 		return param_set_bool(val, kp);
1018 
1019 	mutex_lock(&zswap_init_lock);
1020 	switch (zswap_init_state) {
1021 	case ZSWAP_UNINIT:
1022 		if (zswap_setup())
1023 			break;
1024 		fallthrough;
1025 	case ZSWAP_INIT_SUCCEED:
1026 		if (!zswap_has_pool)
1027 			pr_err("can't enable, no pool configured\n");
1028 		else
1029 			ret = param_set_bool(val, kp);
1030 		break;
1031 	case ZSWAP_INIT_FAILED:
1032 		pr_err("can't enable, initialization failed\n");
1033 	}
1034 	mutex_unlock(&zswap_init_lock);
1035 
1036 	return ret;
1037 }
1038 
1039 /*********************************
1040 * writeback code
1041 **********************************/
1042 /*
1043  * Attempts to free an entry by adding a page to the swap cache,
1044  * decompressing the entry data into the page, and issuing a
1045  * bio write to write the page back to the swap device.
1046  *
1047  * This can be thought of as a "resumed writeback" of the page
1048  * to the swap device.  We are basically resuming the same swap
1049  * writeback path that was intercepted with the zswap_store()
1050  * in the first place.  After the page has been decompressed into
1051  * the swap cache, the compressed version stored by zswap can be
1052  * freed.
1053  */
1054 static int zswap_writeback_entry(struct zswap_entry *entry,
1055 				 struct zswap_tree *tree)
1056 {
1057 	swp_entry_t swpentry = entry->swpentry;
1058 	struct page *page;
1059 	struct scatterlist input, output;
1060 	struct crypto_acomp_ctx *acomp_ctx;
1061 	struct zpool *pool = zswap_find_zpool(entry);
1062 	bool page_was_allocated;
1063 	u8 *src, *tmp = NULL;
1064 	unsigned int dlen;
1065 	int ret;
1066 	struct writeback_control wbc = {
1067 		.sync_mode = WB_SYNC_NONE,
1068 	};
1069 
1070 	if (!zpool_can_sleep_mapped(pool)) {
1071 		tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
1072 		if (!tmp)
1073 			return -ENOMEM;
1074 	}
1075 
1076 	/* try to allocate swap cache page */
1077 	page = __read_swap_cache_async(swpentry, GFP_KERNEL, NULL, 0,
1078 				       &page_was_allocated);
1079 	if (!page) {
1080 		ret = -ENOMEM;
1081 		goto fail;
1082 	}
1083 
1084 	/* Found an existing page, we raced with load/swapin */
1085 	if (!page_was_allocated) {
1086 		put_page(page);
1087 		ret = -EEXIST;
1088 		goto fail;
1089 	}
1090 
1091 	/*
1092 	 * Page is locked, and the swapcache is now secured against
1093 	 * concurrent swapping to and from the slot. Verify that the
1094 	 * swap entry hasn't been invalidated and recycled behind our
1095 	 * backs (our zswap_entry reference doesn't prevent that), to
1096 	 * avoid overwriting a new swap page with old compressed data.
1097 	 */
1098 	spin_lock(&tree->lock);
1099 	if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
1100 		spin_unlock(&tree->lock);
1101 		delete_from_swap_cache(page_folio(page));
1102 		ret = -ENOMEM;
1103 		goto fail;
1104 	}
1105 	spin_unlock(&tree->lock);
1106 
1107 	/* decompress */
1108 	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1109 	dlen = PAGE_SIZE;
1110 
1111 	src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
1112 	if (!zpool_can_sleep_mapped(pool)) {
1113 		memcpy(tmp, src, entry->length);
1114 		src = tmp;
1115 		zpool_unmap_handle(pool, entry->handle);
1116 	}
1117 
1118 	mutex_lock(acomp_ctx->mutex);
1119 	sg_init_one(&input, src, entry->length);
1120 	sg_init_table(&output, 1);
1121 	sg_set_page(&output, page, PAGE_SIZE, 0);
1122 	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
1123 	ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
1124 	dlen = acomp_ctx->req->dlen;
1125 	mutex_unlock(acomp_ctx->mutex);
1126 
1127 	if (!zpool_can_sleep_mapped(pool))
1128 		kfree(tmp);
1129 	else
1130 		zpool_unmap_handle(pool, entry->handle);
1131 
1132 	BUG_ON(ret);
1133 	BUG_ON(dlen != PAGE_SIZE);
1134 
1135 	/* page is up to date */
1136 	SetPageUptodate(page);
1137 
1138 	/* move it to the tail of the inactive list after end_writeback */
1139 	SetPageReclaim(page);
1140 
1141 	/* start writeback */
1142 	__swap_writepage(page, &wbc);
1143 	put_page(page);
1144 	zswap_written_back_pages++;
1145 
1146 	return ret;
1147 
1148 fail:
1149 	if (!zpool_can_sleep_mapped(pool))
1150 		kfree(tmp);
1151 
1152 	/*
1153 	 * If we get here because the page is already in swapcache, a
1154 	 * load may be happening concurrently. It is safe and okay to
1155 	 * not free the entry. It is also okay to return !0.
1156 	 */
1157 	return ret;
1158 }
1159 
1160 static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1161 {
1162 	unsigned long *page;
1163 	unsigned long val;
1164 	unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
1165 
1166 	page = (unsigned long *)ptr;
1167 	val = page[0];
1168 
1169 	if (val != page[last_pos])
1170 		return 0;
1171 
1172 	for (pos = 1; pos < last_pos; pos++) {
1173 		if (val != page[pos])
1174 			return 0;
1175 	}
1176 
1177 	*value = val;
1178 
1179 	return 1;
1180 }
1181 
1182 static void zswap_fill_page(void *ptr, unsigned long value)
1183 {
1184 	unsigned long *page;
1185 
1186 	page = (unsigned long *)ptr;
1187 	memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1188 }
1189 
1190 bool zswap_store(struct folio *folio)
1191 {
1192 	swp_entry_t swp = folio_swap_entry(folio);
1193 	int type = swp_type(swp);
1194 	pgoff_t offset = swp_offset(swp);
1195 	struct page *page = &folio->page;
1196 	struct zswap_tree *tree = zswap_trees[type];
1197 	struct zswap_entry *entry, *dupentry;
1198 	struct scatterlist input, output;
1199 	struct crypto_acomp_ctx *acomp_ctx;
1200 	struct obj_cgroup *objcg = NULL;
1201 	struct zswap_pool *pool;
1202 	struct zpool *zpool;
1203 	unsigned int dlen = PAGE_SIZE;
1204 	unsigned long handle, value;
1205 	char *buf;
1206 	u8 *src, *dst;
1207 	gfp_t gfp;
1208 	int ret;
1209 
1210 	VM_WARN_ON_ONCE(!folio_test_locked(folio));
1211 	VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1212 
1213 	/* Large folios aren't supported */
1214 	if (folio_test_large(folio))
1215 		return false;
1216 
1217 	if (!zswap_enabled || !tree)
1218 		return false;
1219 
1220 	/*
1221 	 * XXX: zswap reclaim does not work with cgroups yet. Without a
1222 	 * cgroup-aware entry LRU, we will push out entries system-wide based on
1223 	 * local cgroup limits.
1224 	 */
1225 	objcg = get_obj_cgroup_from_folio(folio);
1226 	if (objcg && !obj_cgroup_may_zswap(objcg))
1227 		goto reject;
1228 
1229 	/* reclaim space if needed */
1230 	if (zswap_is_full()) {
1231 		zswap_pool_limit_hit++;
1232 		zswap_pool_reached_full = true;
1233 		goto shrink;
1234 	}
1235 
1236 	if (zswap_pool_reached_full) {
1237 	       if (!zswap_can_accept())
1238 			goto shrink;
1239 		else
1240 			zswap_pool_reached_full = false;
1241 	}
1242 
1243 	/* allocate entry */
1244 	entry = zswap_entry_cache_alloc(GFP_KERNEL);
1245 	if (!entry) {
1246 		zswap_reject_kmemcache_fail++;
1247 		goto reject;
1248 	}
1249 
1250 	if (zswap_same_filled_pages_enabled) {
1251 		src = kmap_atomic(page);
1252 		if (zswap_is_page_same_filled(src, &value)) {
1253 			kunmap_atomic(src);
1254 			entry->swpentry = swp_entry(type, offset);
1255 			entry->length = 0;
1256 			entry->value = value;
1257 			atomic_inc(&zswap_same_filled_pages);
1258 			goto insert_entry;
1259 		}
1260 		kunmap_atomic(src);
1261 	}
1262 
1263 	if (!zswap_non_same_filled_pages_enabled)
1264 		goto freepage;
1265 
1266 	/* if entry is successfully added, it keeps the reference */
1267 	entry->pool = zswap_pool_current_get();
1268 	if (!entry->pool)
1269 		goto freepage;
1270 
1271 	/* compress */
1272 	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1273 
1274 	mutex_lock(acomp_ctx->mutex);
1275 
1276 	dst = acomp_ctx->dstmem;
1277 	sg_init_table(&input, 1);
1278 	sg_set_page(&input, page, PAGE_SIZE, 0);
1279 
1280 	/* zswap_dstmem is of size (PAGE_SIZE * 2). Reflect same in sg_list */
1281 	sg_init_one(&output, dst, PAGE_SIZE * 2);
1282 	acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1283 	/*
1284 	 * it maybe looks a little bit silly that we send an asynchronous request,
1285 	 * then wait for its completion synchronously. This makes the process look
1286 	 * synchronous in fact.
1287 	 * Theoretically, acomp supports users send multiple acomp requests in one
1288 	 * acomp instance, then get those requests done simultaneously. but in this
1289 	 * case, zswap actually does store and load page by page, there is no
1290 	 * existing method to send the second page before the first page is done
1291 	 * in one thread doing zwap.
1292 	 * but in different threads running on different cpu, we have different
1293 	 * acomp instance, so multiple threads can do (de)compression in parallel.
1294 	 */
1295 	ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1296 	dlen = acomp_ctx->req->dlen;
1297 
1298 	if (ret)
1299 		goto put_dstmem;
1300 
1301 	/* store */
1302 	zpool = zswap_find_zpool(entry);
1303 	gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1304 	if (zpool_malloc_support_movable(zpool))
1305 		gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
1306 	ret = zpool_malloc(zpool, dlen, gfp, &handle);
1307 	if (ret == -ENOSPC) {
1308 		zswap_reject_compress_poor++;
1309 		goto put_dstmem;
1310 	}
1311 	if (ret) {
1312 		zswap_reject_alloc_fail++;
1313 		goto put_dstmem;
1314 	}
1315 	buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
1316 	memcpy(buf, dst, dlen);
1317 	zpool_unmap_handle(zpool, handle);
1318 	mutex_unlock(acomp_ctx->mutex);
1319 
1320 	/* populate entry */
1321 	entry->swpentry = swp_entry(type, offset);
1322 	entry->handle = handle;
1323 	entry->length = dlen;
1324 
1325 insert_entry:
1326 	entry->objcg = objcg;
1327 	if (objcg) {
1328 		obj_cgroup_charge_zswap(objcg, entry->length);
1329 		/* Account before objcg ref is moved to tree */
1330 		count_objcg_event(objcg, ZSWPOUT);
1331 	}
1332 
1333 	/* map */
1334 	spin_lock(&tree->lock);
1335 	while (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
1336 		zswap_duplicate_entry++;
1337 		zswap_invalidate_entry(tree, dupentry);
1338 	}
1339 	if (entry->length) {
1340 		spin_lock(&entry->pool->lru_lock);
1341 		list_add(&entry->lru, &entry->pool->lru);
1342 		spin_unlock(&entry->pool->lru_lock);
1343 	}
1344 	spin_unlock(&tree->lock);
1345 
1346 	/* update stats */
1347 	atomic_inc(&zswap_stored_pages);
1348 	zswap_update_total_size();
1349 	count_vm_event(ZSWPOUT);
1350 
1351 	return true;
1352 
1353 put_dstmem:
1354 	mutex_unlock(acomp_ctx->mutex);
1355 	zswap_pool_put(entry->pool);
1356 freepage:
1357 	zswap_entry_cache_free(entry);
1358 reject:
1359 	if (objcg)
1360 		obj_cgroup_put(objcg);
1361 	return false;
1362 
1363 shrink:
1364 	pool = zswap_pool_last_get();
1365 	if (pool)
1366 		queue_work(shrink_wq, &pool->shrink_work);
1367 	goto reject;
1368 }
1369 
1370 bool zswap_load(struct folio *folio)
1371 {
1372 	swp_entry_t swp = folio_swap_entry(folio);
1373 	int type = swp_type(swp);
1374 	pgoff_t offset = swp_offset(swp);
1375 	struct page *page = &folio->page;
1376 	struct zswap_tree *tree = zswap_trees[type];
1377 	struct zswap_entry *entry;
1378 	struct scatterlist input, output;
1379 	struct crypto_acomp_ctx *acomp_ctx;
1380 	u8 *src, *dst, *tmp;
1381 	struct zpool *zpool;
1382 	unsigned int dlen;
1383 	bool ret;
1384 
1385 	VM_WARN_ON_ONCE(!folio_test_locked(folio));
1386 
1387 	/* find */
1388 	spin_lock(&tree->lock);
1389 	entry = zswap_entry_find_get(&tree->rbroot, offset);
1390 	if (!entry) {
1391 		spin_unlock(&tree->lock);
1392 		return false;
1393 	}
1394 	spin_unlock(&tree->lock);
1395 
1396 	if (!entry->length) {
1397 		dst = kmap_atomic(page);
1398 		zswap_fill_page(dst, entry->value);
1399 		kunmap_atomic(dst);
1400 		ret = true;
1401 		goto stats;
1402 	}
1403 
1404 	zpool = zswap_find_zpool(entry);
1405 	if (!zpool_can_sleep_mapped(zpool)) {
1406 		tmp = kmalloc(entry->length, GFP_KERNEL);
1407 		if (!tmp) {
1408 			ret = false;
1409 			goto freeentry;
1410 		}
1411 	}
1412 
1413 	/* decompress */
1414 	dlen = PAGE_SIZE;
1415 	src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
1416 
1417 	if (!zpool_can_sleep_mapped(zpool)) {
1418 		memcpy(tmp, src, entry->length);
1419 		src = tmp;
1420 		zpool_unmap_handle(zpool, entry->handle);
1421 	}
1422 
1423 	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1424 	mutex_lock(acomp_ctx->mutex);
1425 	sg_init_one(&input, src, entry->length);
1426 	sg_init_table(&output, 1);
1427 	sg_set_page(&output, page, PAGE_SIZE, 0);
1428 	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
1429 	if (crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait))
1430 		WARN_ON(1);
1431 	mutex_unlock(acomp_ctx->mutex);
1432 
1433 	if (zpool_can_sleep_mapped(zpool))
1434 		zpool_unmap_handle(zpool, entry->handle);
1435 	else
1436 		kfree(tmp);
1437 
1438 	ret = true;
1439 stats:
1440 	count_vm_event(ZSWPIN);
1441 	if (entry->objcg)
1442 		count_objcg_event(entry->objcg, ZSWPIN);
1443 freeentry:
1444 	spin_lock(&tree->lock);
1445 	if (ret && zswap_exclusive_loads_enabled) {
1446 		zswap_invalidate_entry(tree, entry);
1447 		folio_mark_dirty(folio);
1448 	} else if (entry->length) {
1449 		spin_lock(&entry->pool->lru_lock);
1450 		list_move(&entry->lru, &entry->pool->lru);
1451 		spin_unlock(&entry->pool->lru_lock);
1452 	}
1453 	zswap_entry_put(tree, entry);
1454 	spin_unlock(&tree->lock);
1455 
1456 	return ret;
1457 }
1458 
1459 void zswap_invalidate(int type, pgoff_t offset)
1460 {
1461 	struct zswap_tree *tree = zswap_trees[type];
1462 	struct zswap_entry *entry;
1463 
1464 	/* find */
1465 	spin_lock(&tree->lock);
1466 	entry = zswap_rb_search(&tree->rbroot, offset);
1467 	if (!entry) {
1468 		/* entry was written back */
1469 		spin_unlock(&tree->lock);
1470 		return;
1471 	}
1472 	zswap_invalidate_entry(tree, entry);
1473 	spin_unlock(&tree->lock);
1474 }
1475 
1476 void zswap_swapon(int type)
1477 {
1478 	struct zswap_tree *tree;
1479 
1480 	tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1481 	if (!tree) {
1482 		pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1483 		return;
1484 	}
1485 
1486 	tree->rbroot = RB_ROOT;
1487 	spin_lock_init(&tree->lock);
1488 	zswap_trees[type] = tree;
1489 }
1490 
1491 void zswap_swapoff(int type)
1492 {
1493 	struct zswap_tree *tree = zswap_trees[type];
1494 	struct zswap_entry *entry, *n;
1495 
1496 	if (!tree)
1497 		return;
1498 
1499 	/* walk the tree and free everything */
1500 	spin_lock(&tree->lock);
1501 	rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1502 		zswap_free_entry(entry);
1503 	tree->rbroot = RB_ROOT;
1504 	spin_unlock(&tree->lock);
1505 	kfree(tree);
1506 	zswap_trees[type] = NULL;
1507 }
1508 
1509 /*********************************
1510 * debugfs functions
1511 **********************************/
1512 #ifdef CONFIG_DEBUG_FS
1513 #include <linux/debugfs.h>
1514 
1515 static struct dentry *zswap_debugfs_root;
1516 
1517 static int zswap_debugfs_init(void)
1518 {
1519 	if (!debugfs_initialized())
1520 		return -ENODEV;
1521 
1522 	zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1523 
1524 	debugfs_create_u64("pool_limit_hit", 0444,
1525 			   zswap_debugfs_root, &zswap_pool_limit_hit);
1526 	debugfs_create_u64("reject_reclaim_fail", 0444,
1527 			   zswap_debugfs_root, &zswap_reject_reclaim_fail);
1528 	debugfs_create_u64("reject_alloc_fail", 0444,
1529 			   zswap_debugfs_root, &zswap_reject_alloc_fail);
1530 	debugfs_create_u64("reject_kmemcache_fail", 0444,
1531 			   zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1532 	debugfs_create_u64("reject_compress_poor", 0444,
1533 			   zswap_debugfs_root, &zswap_reject_compress_poor);
1534 	debugfs_create_u64("written_back_pages", 0444,
1535 			   zswap_debugfs_root, &zswap_written_back_pages);
1536 	debugfs_create_u64("duplicate_entry", 0444,
1537 			   zswap_debugfs_root, &zswap_duplicate_entry);
1538 	debugfs_create_u64("pool_total_size", 0444,
1539 			   zswap_debugfs_root, &zswap_pool_total_size);
1540 	debugfs_create_atomic_t("stored_pages", 0444,
1541 				zswap_debugfs_root, &zswap_stored_pages);
1542 	debugfs_create_atomic_t("same_filled_pages", 0444,
1543 				zswap_debugfs_root, &zswap_same_filled_pages);
1544 
1545 	return 0;
1546 }
1547 #else
1548 static int zswap_debugfs_init(void)
1549 {
1550 	return 0;
1551 }
1552 #endif
1553 
1554 /*********************************
1555 * module init and exit
1556 **********************************/
1557 static int zswap_setup(void)
1558 {
1559 	struct zswap_pool *pool;
1560 	int ret;
1561 
1562 	zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1563 	if (!zswap_entry_cache) {
1564 		pr_err("entry cache creation failed\n");
1565 		goto cache_fail;
1566 	}
1567 
1568 	ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
1569 				zswap_dstmem_prepare, zswap_dstmem_dead);
1570 	if (ret) {
1571 		pr_err("dstmem alloc failed\n");
1572 		goto dstmem_fail;
1573 	}
1574 
1575 	ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1576 				      "mm/zswap_pool:prepare",
1577 				      zswap_cpu_comp_prepare,
1578 				      zswap_cpu_comp_dead);
1579 	if (ret)
1580 		goto hp_fail;
1581 
1582 	pool = __zswap_pool_create_fallback();
1583 	if (pool) {
1584 		pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1585 			zpool_get_type(pool->zpools[0]));
1586 		list_add(&pool->list, &zswap_pools);
1587 		zswap_has_pool = true;
1588 	} else {
1589 		pr_err("pool creation failed\n");
1590 		zswap_enabled = false;
1591 	}
1592 
1593 	shrink_wq = create_workqueue("zswap-shrink");
1594 	if (!shrink_wq)
1595 		goto fallback_fail;
1596 
1597 	if (zswap_debugfs_init())
1598 		pr_warn("debugfs initialization failed\n");
1599 	zswap_init_state = ZSWAP_INIT_SUCCEED;
1600 	return 0;
1601 
1602 fallback_fail:
1603 	if (pool)
1604 		zswap_pool_destroy(pool);
1605 hp_fail:
1606 	cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
1607 dstmem_fail:
1608 	kmem_cache_destroy(zswap_entry_cache);
1609 cache_fail:
1610 	/* if built-in, we aren't unloaded on failure; don't allow use */
1611 	zswap_init_state = ZSWAP_INIT_FAILED;
1612 	zswap_enabled = false;
1613 	return -ENOMEM;
1614 }
1615 
1616 static int __init zswap_init(void)
1617 {
1618 	if (!zswap_enabled)
1619 		return 0;
1620 	return zswap_setup();
1621 }
1622 /* must be late so crypto has time to come up */
1623 late_initcall(zswap_init);
1624 
1625 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1626 MODULE_DESCRIPTION("Compressed cache for swap pages");
1627