xref: /openbmc/linux/mm/zsmalloc.c (revision 3f58ff6b)
1 /*
2  * zsmalloc memory allocator
3  *
4  * Copyright (C) 2011  Nitin Gupta
5  * Copyright (C) 2012, 2013 Minchan Kim
6  *
7  * This code is released using a dual license strategy: BSD/GPL
8  * You can choose the license that better fits your requirements.
9  *
10  * Released under the terms of 3-clause BSD License
11  * Released under the terms of GNU General Public License Version 2.0
12  */
13 
14 /*
15  * Following is how we use various fields and flags of underlying
16  * struct page(s) to form a zspage.
17  *
18  * Usage of struct page fields:
19  *	page->private: points to zspage
20  *	page->index: links together all component pages of a zspage
21  *		For the huge page, this is always 0, so we use this field
22  *		to store handle.
23  *	page->page_type: first object offset in a subpage of zspage
24  *
25  * Usage of struct page flags:
26  *	PG_private: identifies the first component page
27  *	PG_owner_priv_1: identifies the huge component page
28  *
29  */
30 
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 
33 /*
34  * lock ordering:
35  *	page_lock
36  *	pool->lock
37  *	zspage->lock
38  */
39 
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/sched.h>
43 #include <linux/bitops.h>
44 #include <linux/errno.h>
45 #include <linux/highmem.h>
46 #include <linux/string.h>
47 #include <linux/slab.h>
48 #include <linux/pgtable.h>
49 #include <asm/tlbflush.h>
50 #include <linux/cpumask.h>
51 #include <linux/cpu.h>
52 #include <linux/vmalloc.h>
53 #include <linux/preempt.h>
54 #include <linux/spinlock.h>
55 #include <linux/shrinker.h>
56 #include <linux/types.h>
57 #include <linux/debugfs.h>
58 #include <linux/zsmalloc.h>
59 #include <linux/zpool.h>
60 #include <linux/migrate.h>
61 #include <linux/wait.h>
62 #include <linux/pagemap.h>
63 #include <linux/fs.h>
64 #include <linux/local_lock.h>
65 
66 #define ZSPAGE_MAGIC	0x58
67 
68 /*
69  * This must be power of 2 and greater than or equal to sizeof(link_free).
70  * These two conditions ensure that any 'struct link_free' itself doesn't
71  * span more than 1 page which avoids complex case of mapping 2 pages simply
72  * to restore link_free pointer values.
73  */
74 #define ZS_ALIGN		8
75 
76 /*
77  * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
78  * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
79  */
80 #define ZS_MAX_ZSPAGE_ORDER 2
81 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
82 
83 #define ZS_HANDLE_SIZE (sizeof(unsigned long))
84 
85 /*
86  * Object location (<PFN>, <obj_idx>) is encoded as
87  * a single (unsigned long) handle value.
88  *
89  * Note that object index <obj_idx> starts from 0.
90  *
91  * This is made more complicated by various memory models and PAE.
92  */
93 
94 #ifndef MAX_POSSIBLE_PHYSMEM_BITS
95 #ifdef MAX_PHYSMEM_BITS
96 #define MAX_POSSIBLE_PHYSMEM_BITS MAX_PHYSMEM_BITS
97 #else
98 /*
99  * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
100  * be PAGE_SHIFT
101  */
102 #define MAX_POSSIBLE_PHYSMEM_BITS BITS_PER_LONG
103 #endif
104 #endif
105 
106 #define _PFN_BITS		(MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT)
107 
108 /*
109  * Head in allocated object should have OBJ_ALLOCATED_TAG
110  * to identify the object was allocated or not.
111  * It's okay to add the status bit in the least bit because
112  * header keeps handle which is 4byte-aligned address so we
113  * have room for two bit at least.
114  */
115 #define OBJ_ALLOCATED_TAG 1
116 #define OBJ_TAG_BITS 1
117 #define OBJ_INDEX_BITS	(BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
118 #define OBJ_INDEX_MASK	((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
119 
120 #define HUGE_BITS	1
121 #define FULLNESS_BITS	2
122 #define CLASS_BITS	8
123 #define ISOLATED_BITS	3
124 #define MAGIC_VAL_BITS	8
125 
126 #define MAX(a, b) ((a) >= (b) ? (a) : (b))
127 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
128 #define ZS_MIN_ALLOC_SIZE \
129 	MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
130 /* each chunk includes extra space to keep handle */
131 #define ZS_MAX_ALLOC_SIZE	PAGE_SIZE
132 
133 /*
134  * On systems with 4K page size, this gives 255 size classes! There is a
135  * trader-off here:
136  *  - Large number of size classes is potentially wasteful as free page are
137  *    spread across these classes
138  *  - Small number of size classes causes large internal fragmentation
139  *  - Probably its better to use specific size classes (empirically
140  *    determined). NOTE: all those class sizes must be set as multiple of
141  *    ZS_ALIGN to make sure link_free itself never has to span 2 pages.
142  *
143  *  ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
144  *  (reason above)
145  */
146 #define ZS_SIZE_CLASS_DELTA	(PAGE_SIZE >> CLASS_BITS)
147 #define ZS_SIZE_CLASSES	(DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
148 				      ZS_SIZE_CLASS_DELTA) + 1)
149 
150 enum fullness_group {
151 	ZS_EMPTY,
152 	ZS_ALMOST_EMPTY,
153 	ZS_ALMOST_FULL,
154 	ZS_FULL,
155 	NR_ZS_FULLNESS,
156 };
157 
158 enum class_stat_type {
159 	CLASS_EMPTY,
160 	CLASS_ALMOST_EMPTY,
161 	CLASS_ALMOST_FULL,
162 	CLASS_FULL,
163 	OBJ_ALLOCATED,
164 	OBJ_USED,
165 	NR_ZS_STAT_TYPE,
166 };
167 
168 struct zs_size_stat {
169 	unsigned long objs[NR_ZS_STAT_TYPE];
170 };
171 
172 #ifdef CONFIG_ZSMALLOC_STAT
173 static struct dentry *zs_stat_root;
174 #endif
175 
176 /*
177  * We assign a page to ZS_ALMOST_EMPTY fullness group when:
178  *	n <= N / f, where
179  * n = number of allocated objects
180  * N = total number of objects zspage can store
181  * f = fullness_threshold_frac
182  *
183  * Similarly, we assign zspage to:
184  *	ZS_ALMOST_FULL	when n > N / f
185  *	ZS_EMPTY	when n == 0
186  *	ZS_FULL		when n == N
187  *
188  * (see: fix_fullness_group())
189  */
190 static const int fullness_threshold_frac = 4;
191 static size_t huge_class_size;
192 
193 struct size_class {
194 	struct list_head fullness_list[NR_ZS_FULLNESS];
195 	/*
196 	 * Size of objects stored in this class. Must be multiple
197 	 * of ZS_ALIGN.
198 	 */
199 	int size;
200 	int objs_per_zspage;
201 	/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
202 	int pages_per_zspage;
203 
204 	unsigned int index;
205 	struct zs_size_stat stats;
206 };
207 
208 /*
209  * Placed within free objects to form a singly linked list.
210  * For every zspage, zspage->freeobj gives head of this list.
211  *
212  * This must be power of 2 and less than or equal to ZS_ALIGN
213  */
214 struct link_free {
215 	union {
216 		/*
217 		 * Free object index;
218 		 * It's valid for non-allocated object
219 		 */
220 		unsigned long next;
221 		/*
222 		 * Handle of allocated object.
223 		 */
224 		unsigned long handle;
225 	};
226 };
227 
228 struct zs_pool {
229 	const char *name;
230 
231 	struct size_class *size_class[ZS_SIZE_CLASSES];
232 	struct kmem_cache *handle_cachep;
233 	struct kmem_cache *zspage_cachep;
234 
235 	atomic_long_t pages_allocated;
236 
237 	struct zs_pool_stats stats;
238 
239 	/* Compact classes */
240 	struct shrinker shrinker;
241 
242 #ifdef CONFIG_ZPOOL
243 	/* List tracking the zspages in LRU order by most recently added object */
244 	struct list_head lru;
245 	struct zpool *zpool;
246 	const struct zpool_ops *zpool_ops;
247 #endif
248 
249 #ifdef CONFIG_ZSMALLOC_STAT
250 	struct dentry *stat_dentry;
251 #endif
252 #ifdef CONFIG_COMPACTION
253 	struct work_struct free_work;
254 #endif
255 	spinlock_t lock;
256 };
257 
258 struct zspage {
259 	struct {
260 		unsigned int huge:HUGE_BITS;
261 		unsigned int fullness:FULLNESS_BITS;
262 		unsigned int class:CLASS_BITS + 1;
263 		unsigned int isolated:ISOLATED_BITS;
264 		unsigned int magic:MAGIC_VAL_BITS;
265 	};
266 	unsigned int inuse;
267 	unsigned int freeobj;
268 	struct page *first_page;
269 	struct list_head list; /* fullness list */
270 
271 #ifdef CONFIG_ZPOOL
272 	/* links the zspage to the lru list in the pool */
273 	struct list_head lru;
274 	bool under_reclaim;
275 	/* list of unfreed handles whose objects have been reclaimed */
276 	unsigned long *deferred_handles;
277 #endif
278 
279 	struct zs_pool *pool;
280 	rwlock_t lock;
281 };
282 
283 struct mapping_area {
284 	local_lock_t lock;
285 	char *vm_buf; /* copy buffer for objects that span pages */
286 	char *vm_addr; /* address of kmap_atomic()'ed pages */
287 	enum zs_mapmode vm_mm; /* mapping mode */
288 };
289 
290 /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
291 static void SetZsHugePage(struct zspage *zspage)
292 {
293 	zspage->huge = 1;
294 }
295 
296 static bool ZsHugePage(struct zspage *zspage)
297 {
298 	return zspage->huge;
299 }
300 
301 static void migrate_lock_init(struct zspage *zspage);
302 static void migrate_read_lock(struct zspage *zspage);
303 static void migrate_read_unlock(struct zspage *zspage);
304 
305 #ifdef CONFIG_COMPACTION
306 static void migrate_write_lock(struct zspage *zspage);
307 static void migrate_write_lock_nested(struct zspage *zspage);
308 static void migrate_write_unlock(struct zspage *zspage);
309 static void kick_deferred_free(struct zs_pool *pool);
310 static void init_deferred_free(struct zs_pool *pool);
311 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
312 #else
313 static void migrate_write_lock(struct zspage *zspage) {}
314 static void migrate_write_lock_nested(struct zspage *zspage) {}
315 static void migrate_write_unlock(struct zspage *zspage) {}
316 static void kick_deferred_free(struct zs_pool *pool) {}
317 static void init_deferred_free(struct zs_pool *pool) {}
318 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
319 #endif
320 
321 static int create_cache(struct zs_pool *pool)
322 {
323 	pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
324 					0, 0, NULL);
325 	if (!pool->handle_cachep)
326 		return 1;
327 
328 	pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
329 					0, 0, NULL);
330 	if (!pool->zspage_cachep) {
331 		kmem_cache_destroy(pool->handle_cachep);
332 		pool->handle_cachep = NULL;
333 		return 1;
334 	}
335 
336 	return 0;
337 }
338 
339 static void destroy_cache(struct zs_pool *pool)
340 {
341 	kmem_cache_destroy(pool->handle_cachep);
342 	kmem_cache_destroy(pool->zspage_cachep);
343 }
344 
345 static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
346 {
347 	return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
348 			gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
349 }
350 
351 static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
352 {
353 	kmem_cache_free(pool->handle_cachep, (void *)handle);
354 }
355 
356 static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
357 {
358 	return kmem_cache_zalloc(pool->zspage_cachep,
359 			flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
360 }
361 
362 static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
363 {
364 	kmem_cache_free(pool->zspage_cachep, zspage);
365 }
366 
367 /* pool->lock(which owns the handle) synchronizes races */
368 static void record_obj(unsigned long handle, unsigned long obj)
369 {
370 	*(unsigned long *)handle = obj;
371 }
372 
373 /* zpool driver */
374 
375 #ifdef CONFIG_ZPOOL
376 
377 static void *zs_zpool_create(const char *name, gfp_t gfp,
378 			     const struct zpool_ops *zpool_ops,
379 			     struct zpool *zpool)
380 {
381 	/*
382 	 * Ignore global gfp flags: zs_malloc() may be invoked from
383 	 * different contexts and its caller must provide a valid
384 	 * gfp mask.
385 	 */
386 	struct zs_pool *pool = zs_create_pool(name);
387 
388 	if (pool) {
389 		pool->zpool = zpool;
390 		pool->zpool_ops = zpool_ops;
391 	}
392 
393 	return pool;
394 }
395 
396 static void zs_zpool_destroy(void *pool)
397 {
398 	zs_destroy_pool(pool);
399 }
400 
401 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
402 			unsigned long *handle)
403 {
404 	*handle = zs_malloc(pool, size, gfp);
405 
406 	if (IS_ERR_VALUE(*handle))
407 		return PTR_ERR((void *)*handle);
408 	return 0;
409 }
410 static void zs_zpool_free(void *pool, unsigned long handle)
411 {
412 	zs_free(pool, handle);
413 }
414 
415 static int zs_reclaim_page(struct zs_pool *pool, unsigned int retries);
416 
417 static int zs_zpool_shrink(void *pool, unsigned int pages,
418 			unsigned int *reclaimed)
419 {
420 	unsigned int total = 0;
421 	int ret = -EINVAL;
422 
423 	while (total < pages) {
424 		ret = zs_reclaim_page(pool, 8);
425 		if (ret < 0)
426 			break;
427 		total++;
428 	}
429 
430 	if (reclaimed)
431 		*reclaimed = total;
432 
433 	return ret;
434 }
435 
436 static void *zs_zpool_map(void *pool, unsigned long handle,
437 			enum zpool_mapmode mm)
438 {
439 	enum zs_mapmode zs_mm;
440 
441 	switch (mm) {
442 	case ZPOOL_MM_RO:
443 		zs_mm = ZS_MM_RO;
444 		break;
445 	case ZPOOL_MM_WO:
446 		zs_mm = ZS_MM_WO;
447 		break;
448 	case ZPOOL_MM_RW:
449 	default:
450 		zs_mm = ZS_MM_RW;
451 		break;
452 	}
453 
454 	return zs_map_object(pool, handle, zs_mm);
455 }
456 static void zs_zpool_unmap(void *pool, unsigned long handle)
457 {
458 	zs_unmap_object(pool, handle);
459 }
460 
461 static u64 zs_zpool_total_size(void *pool)
462 {
463 	return zs_get_total_pages(pool) << PAGE_SHIFT;
464 }
465 
466 static struct zpool_driver zs_zpool_driver = {
467 	.type =			  "zsmalloc",
468 	.owner =		  THIS_MODULE,
469 	.create =		  zs_zpool_create,
470 	.destroy =		  zs_zpool_destroy,
471 	.malloc_support_movable = true,
472 	.malloc =		  zs_zpool_malloc,
473 	.free =			  zs_zpool_free,
474 	.shrink =		  zs_zpool_shrink,
475 	.map =			  zs_zpool_map,
476 	.unmap =		  zs_zpool_unmap,
477 	.total_size =		  zs_zpool_total_size,
478 };
479 
480 MODULE_ALIAS("zpool-zsmalloc");
481 #endif /* CONFIG_ZPOOL */
482 
483 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
484 static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = {
485 	.lock	= INIT_LOCAL_LOCK(lock),
486 };
487 
488 static __maybe_unused int is_first_page(struct page *page)
489 {
490 	return PagePrivate(page);
491 }
492 
493 /* Protected by pool->lock */
494 static inline int get_zspage_inuse(struct zspage *zspage)
495 {
496 	return zspage->inuse;
497 }
498 
499 
500 static inline void mod_zspage_inuse(struct zspage *zspage, int val)
501 {
502 	zspage->inuse += val;
503 }
504 
505 static inline struct page *get_first_page(struct zspage *zspage)
506 {
507 	struct page *first_page = zspage->first_page;
508 
509 	VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
510 	return first_page;
511 }
512 
513 static inline unsigned int get_first_obj_offset(struct page *page)
514 {
515 	return page->page_type;
516 }
517 
518 static inline void set_first_obj_offset(struct page *page, unsigned int offset)
519 {
520 	page->page_type = offset;
521 }
522 
523 static inline unsigned int get_freeobj(struct zspage *zspage)
524 {
525 	return zspage->freeobj;
526 }
527 
528 static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
529 {
530 	zspage->freeobj = obj;
531 }
532 
533 static void get_zspage_mapping(struct zspage *zspage,
534 				unsigned int *class_idx,
535 				enum fullness_group *fullness)
536 {
537 	BUG_ON(zspage->magic != ZSPAGE_MAGIC);
538 
539 	*fullness = zspage->fullness;
540 	*class_idx = zspage->class;
541 }
542 
543 static struct size_class *zspage_class(struct zs_pool *pool,
544 					     struct zspage *zspage)
545 {
546 	return pool->size_class[zspage->class];
547 }
548 
549 static void set_zspage_mapping(struct zspage *zspage,
550 				unsigned int class_idx,
551 				enum fullness_group fullness)
552 {
553 	zspage->class = class_idx;
554 	zspage->fullness = fullness;
555 }
556 
557 /*
558  * zsmalloc divides the pool into various size classes where each
559  * class maintains a list of zspages where each zspage is divided
560  * into equal sized chunks. Each allocation falls into one of these
561  * classes depending on its size. This function returns index of the
562  * size class which has chunk size big enough to hold the given size.
563  */
564 static int get_size_class_index(int size)
565 {
566 	int idx = 0;
567 
568 	if (likely(size > ZS_MIN_ALLOC_SIZE))
569 		idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
570 				ZS_SIZE_CLASS_DELTA);
571 
572 	return min_t(int, ZS_SIZE_CLASSES - 1, idx);
573 }
574 
575 /* type can be of enum type class_stat_type or fullness_group */
576 static inline void class_stat_inc(struct size_class *class,
577 				int type, unsigned long cnt)
578 {
579 	class->stats.objs[type] += cnt;
580 }
581 
582 /* type can be of enum type class_stat_type or fullness_group */
583 static inline void class_stat_dec(struct size_class *class,
584 				int type, unsigned long cnt)
585 {
586 	class->stats.objs[type] -= cnt;
587 }
588 
589 /* type can be of enum type class_stat_type or fullness_group */
590 static inline unsigned long zs_stat_get(struct size_class *class,
591 				int type)
592 {
593 	return class->stats.objs[type];
594 }
595 
596 #ifdef CONFIG_ZSMALLOC_STAT
597 
598 static void __init zs_stat_init(void)
599 {
600 	if (!debugfs_initialized()) {
601 		pr_warn("debugfs not available, stat dir not created\n");
602 		return;
603 	}
604 
605 	zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
606 }
607 
608 static void __exit zs_stat_exit(void)
609 {
610 	debugfs_remove_recursive(zs_stat_root);
611 }
612 
613 static unsigned long zs_can_compact(struct size_class *class);
614 
615 static int zs_stats_size_show(struct seq_file *s, void *v)
616 {
617 	int i;
618 	struct zs_pool *pool = s->private;
619 	struct size_class *class;
620 	int objs_per_zspage;
621 	unsigned long class_almost_full, class_almost_empty;
622 	unsigned long obj_allocated, obj_used, pages_used, freeable;
623 	unsigned long total_class_almost_full = 0, total_class_almost_empty = 0;
624 	unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
625 	unsigned long total_freeable = 0;
626 
627 	seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
628 			"class", "size", "almost_full", "almost_empty",
629 			"obj_allocated", "obj_used", "pages_used",
630 			"pages_per_zspage", "freeable");
631 
632 	for (i = 0; i < ZS_SIZE_CLASSES; i++) {
633 		class = pool->size_class[i];
634 
635 		if (class->index != i)
636 			continue;
637 
638 		spin_lock(&pool->lock);
639 		class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL);
640 		class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
641 		obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
642 		obj_used = zs_stat_get(class, OBJ_USED);
643 		freeable = zs_can_compact(class);
644 		spin_unlock(&pool->lock);
645 
646 		objs_per_zspage = class->objs_per_zspage;
647 		pages_used = obj_allocated / objs_per_zspage *
648 				class->pages_per_zspage;
649 
650 		seq_printf(s, " %5u %5u %11lu %12lu %13lu"
651 				" %10lu %10lu %16d %8lu\n",
652 			i, class->size, class_almost_full, class_almost_empty,
653 			obj_allocated, obj_used, pages_used,
654 			class->pages_per_zspage, freeable);
655 
656 		total_class_almost_full += class_almost_full;
657 		total_class_almost_empty += class_almost_empty;
658 		total_objs += obj_allocated;
659 		total_used_objs += obj_used;
660 		total_pages += pages_used;
661 		total_freeable += freeable;
662 	}
663 
664 	seq_puts(s, "\n");
665 	seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
666 			"Total", "", total_class_almost_full,
667 			total_class_almost_empty, total_objs,
668 			total_used_objs, total_pages, "", total_freeable);
669 
670 	return 0;
671 }
672 DEFINE_SHOW_ATTRIBUTE(zs_stats_size);
673 
674 static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
675 {
676 	if (!zs_stat_root) {
677 		pr_warn("no root stat dir, not creating <%s> stat dir\n", name);
678 		return;
679 	}
680 
681 	pool->stat_dentry = debugfs_create_dir(name, zs_stat_root);
682 
683 	debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool,
684 			    &zs_stats_size_fops);
685 }
686 
687 static void zs_pool_stat_destroy(struct zs_pool *pool)
688 {
689 	debugfs_remove_recursive(pool->stat_dentry);
690 }
691 
692 #else /* CONFIG_ZSMALLOC_STAT */
693 static void __init zs_stat_init(void)
694 {
695 }
696 
697 static void __exit zs_stat_exit(void)
698 {
699 }
700 
701 static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name)
702 {
703 }
704 
705 static inline void zs_pool_stat_destroy(struct zs_pool *pool)
706 {
707 }
708 #endif
709 
710 
711 /*
712  * For each size class, zspages are divided into different groups
713  * depending on how "full" they are. This was done so that we could
714  * easily find empty or nearly empty zspages when we try to shrink
715  * the pool (not yet implemented). This function returns fullness
716  * status of the given page.
717  */
718 static enum fullness_group get_fullness_group(struct size_class *class,
719 						struct zspage *zspage)
720 {
721 	int inuse, objs_per_zspage;
722 	enum fullness_group fg;
723 
724 	inuse = get_zspage_inuse(zspage);
725 	objs_per_zspage = class->objs_per_zspage;
726 
727 	if (inuse == 0)
728 		fg = ZS_EMPTY;
729 	else if (inuse == objs_per_zspage)
730 		fg = ZS_FULL;
731 	else if (inuse <= 3 * objs_per_zspage / fullness_threshold_frac)
732 		fg = ZS_ALMOST_EMPTY;
733 	else
734 		fg = ZS_ALMOST_FULL;
735 
736 	return fg;
737 }
738 
739 /*
740  * Each size class maintains various freelists and zspages are assigned
741  * to one of these freelists based on the number of live objects they
742  * have. This functions inserts the given zspage into the freelist
743  * identified by <class, fullness_group>.
744  */
745 static void insert_zspage(struct size_class *class,
746 				struct zspage *zspage,
747 				enum fullness_group fullness)
748 {
749 	struct zspage *head;
750 
751 	class_stat_inc(class, fullness, 1);
752 	head = list_first_entry_or_null(&class->fullness_list[fullness],
753 					struct zspage, list);
754 	/*
755 	 * We want to see more ZS_FULL pages and less almost empty/full.
756 	 * Put pages with higher ->inuse first.
757 	 */
758 	if (head && get_zspage_inuse(zspage) < get_zspage_inuse(head))
759 		list_add(&zspage->list, &head->list);
760 	else
761 		list_add(&zspage->list, &class->fullness_list[fullness]);
762 }
763 
764 /*
765  * This function removes the given zspage from the freelist identified
766  * by <class, fullness_group>.
767  */
768 static void remove_zspage(struct size_class *class,
769 				struct zspage *zspage,
770 				enum fullness_group fullness)
771 {
772 	VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
773 
774 	list_del_init(&zspage->list);
775 	class_stat_dec(class, fullness, 1);
776 }
777 
778 /*
779  * Each size class maintains zspages in different fullness groups depending
780  * on the number of live objects they contain. When allocating or freeing
781  * objects, the fullness status of the page can change, say, from ALMOST_FULL
782  * to ALMOST_EMPTY when freeing an object. This function checks if such
783  * a status change has occurred for the given page and accordingly moves the
784  * page from the freelist of the old fullness group to that of the new
785  * fullness group.
786  */
787 static enum fullness_group fix_fullness_group(struct size_class *class,
788 						struct zspage *zspage)
789 {
790 	int class_idx;
791 	enum fullness_group currfg, newfg;
792 
793 	get_zspage_mapping(zspage, &class_idx, &currfg);
794 	newfg = get_fullness_group(class, zspage);
795 	if (newfg == currfg)
796 		goto out;
797 
798 	remove_zspage(class, zspage, currfg);
799 	insert_zspage(class, zspage, newfg);
800 	set_zspage_mapping(zspage, class_idx, newfg);
801 out:
802 	return newfg;
803 }
804 
805 /*
806  * We have to decide on how many pages to link together
807  * to form a zspage for each size class. This is important
808  * to reduce wastage due to unusable space left at end of
809  * each zspage which is given as:
810  *     wastage = Zp % class_size
811  *     usage = Zp - wastage
812  * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
813  *
814  * For example, for size class of 3/8 * PAGE_SIZE, we should
815  * link together 3 PAGE_SIZE sized pages to form a zspage
816  * since then we can perfectly fit in 8 such objects.
817  */
818 static int get_pages_per_zspage(int class_size)
819 {
820 	int i, max_usedpc = 0;
821 	/* zspage order which gives maximum used size per KB */
822 	int max_usedpc_order = 1;
823 
824 	for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
825 		int zspage_size;
826 		int waste, usedpc;
827 
828 		zspage_size = i * PAGE_SIZE;
829 		waste = zspage_size % class_size;
830 		usedpc = (zspage_size - waste) * 100 / zspage_size;
831 
832 		if (usedpc > max_usedpc) {
833 			max_usedpc = usedpc;
834 			max_usedpc_order = i;
835 		}
836 	}
837 
838 	return max_usedpc_order;
839 }
840 
841 static struct zspage *get_zspage(struct page *page)
842 {
843 	struct zspage *zspage = (struct zspage *)page_private(page);
844 
845 	BUG_ON(zspage->magic != ZSPAGE_MAGIC);
846 	return zspage;
847 }
848 
849 static struct page *get_next_page(struct page *page)
850 {
851 	struct zspage *zspage = get_zspage(page);
852 
853 	if (unlikely(ZsHugePage(zspage)))
854 		return NULL;
855 
856 	return (struct page *)page->index;
857 }
858 
859 /**
860  * obj_to_location - get (<page>, <obj_idx>) from encoded object value
861  * @obj: the encoded object value
862  * @page: page object resides in zspage
863  * @obj_idx: object index
864  */
865 static void obj_to_location(unsigned long obj, struct page **page,
866 				unsigned int *obj_idx)
867 {
868 	obj >>= OBJ_TAG_BITS;
869 	*page = pfn_to_page(obj >> OBJ_INDEX_BITS);
870 	*obj_idx = (obj & OBJ_INDEX_MASK);
871 }
872 
873 static void obj_to_page(unsigned long obj, struct page **page)
874 {
875 	obj >>= OBJ_TAG_BITS;
876 	*page = pfn_to_page(obj >> OBJ_INDEX_BITS);
877 }
878 
879 /**
880  * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
881  * @page: page object resides in zspage
882  * @obj_idx: object index
883  */
884 static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
885 {
886 	unsigned long obj;
887 
888 	obj = page_to_pfn(page) << OBJ_INDEX_BITS;
889 	obj |= obj_idx & OBJ_INDEX_MASK;
890 	obj <<= OBJ_TAG_BITS;
891 
892 	return obj;
893 }
894 
895 static unsigned long handle_to_obj(unsigned long handle)
896 {
897 	return *(unsigned long *)handle;
898 }
899 
900 static bool obj_allocated(struct page *page, void *obj, unsigned long *phandle)
901 {
902 	unsigned long handle;
903 	struct zspage *zspage = get_zspage(page);
904 
905 	if (unlikely(ZsHugePage(zspage))) {
906 		VM_BUG_ON_PAGE(!is_first_page(page), page);
907 		handle = page->index;
908 	} else
909 		handle = *(unsigned long *)obj;
910 
911 	if (!(handle & OBJ_ALLOCATED_TAG))
912 		return false;
913 
914 	*phandle = handle & ~OBJ_ALLOCATED_TAG;
915 	return true;
916 }
917 
918 static void reset_page(struct page *page)
919 {
920 	__ClearPageMovable(page);
921 	ClearPagePrivate(page);
922 	set_page_private(page, 0);
923 	page_mapcount_reset(page);
924 	page->index = 0;
925 }
926 
927 static int trylock_zspage(struct zspage *zspage)
928 {
929 	struct page *cursor, *fail;
930 
931 	for (cursor = get_first_page(zspage); cursor != NULL; cursor =
932 					get_next_page(cursor)) {
933 		if (!trylock_page(cursor)) {
934 			fail = cursor;
935 			goto unlock;
936 		}
937 	}
938 
939 	return 1;
940 unlock:
941 	for (cursor = get_first_page(zspage); cursor != fail; cursor =
942 					get_next_page(cursor))
943 		unlock_page(cursor);
944 
945 	return 0;
946 }
947 
948 #ifdef CONFIG_ZPOOL
949 /*
950  * Free all the deferred handles whose objects are freed in zs_free.
951  */
952 static void free_handles(struct zs_pool *pool, struct zspage *zspage)
953 {
954 	unsigned long handle = (unsigned long)zspage->deferred_handles;
955 
956 	while (handle) {
957 		unsigned long nxt_handle = handle_to_obj(handle);
958 
959 		cache_free_handle(pool, handle);
960 		handle = nxt_handle;
961 	}
962 }
963 #else
964 static inline void free_handles(struct zs_pool *pool, struct zspage *zspage) {}
965 #endif
966 
967 static void __free_zspage(struct zs_pool *pool, struct size_class *class,
968 				struct zspage *zspage)
969 {
970 	struct page *page, *next;
971 	enum fullness_group fg;
972 	unsigned int class_idx;
973 
974 	get_zspage_mapping(zspage, &class_idx, &fg);
975 
976 	assert_spin_locked(&pool->lock);
977 
978 	VM_BUG_ON(get_zspage_inuse(zspage));
979 	VM_BUG_ON(fg != ZS_EMPTY);
980 
981 	/* Free all deferred handles from zs_free */
982 	free_handles(pool, zspage);
983 
984 	next = page = get_first_page(zspage);
985 	do {
986 		VM_BUG_ON_PAGE(!PageLocked(page), page);
987 		next = get_next_page(page);
988 		reset_page(page);
989 		unlock_page(page);
990 		dec_zone_page_state(page, NR_ZSPAGES);
991 		put_page(page);
992 		page = next;
993 	} while (page != NULL);
994 
995 	cache_free_zspage(pool, zspage);
996 
997 	class_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage);
998 	atomic_long_sub(class->pages_per_zspage,
999 					&pool->pages_allocated);
1000 }
1001 
1002 static void free_zspage(struct zs_pool *pool, struct size_class *class,
1003 				struct zspage *zspage)
1004 {
1005 	VM_BUG_ON(get_zspage_inuse(zspage));
1006 	VM_BUG_ON(list_empty(&zspage->list));
1007 
1008 	/*
1009 	 * Since zs_free couldn't be sleepable, this function cannot call
1010 	 * lock_page. The page locks trylock_zspage got will be released
1011 	 * by __free_zspage.
1012 	 */
1013 	if (!trylock_zspage(zspage)) {
1014 		kick_deferred_free(pool);
1015 		return;
1016 	}
1017 
1018 	remove_zspage(class, zspage, ZS_EMPTY);
1019 #ifdef CONFIG_ZPOOL
1020 	list_del(&zspage->lru);
1021 #endif
1022 	__free_zspage(pool, class, zspage);
1023 }
1024 
1025 /* Initialize a newly allocated zspage */
1026 static void init_zspage(struct size_class *class, struct zspage *zspage)
1027 {
1028 	unsigned int freeobj = 1;
1029 	unsigned long off = 0;
1030 	struct page *page = get_first_page(zspage);
1031 
1032 	while (page) {
1033 		struct page *next_page;
1034 		struct link_free *link;
1035 		void *vaddr;
1036 
1037 		set_first_obj_offset(page, off);
1038 
1039 		vaddr = kmap_atomic(page);
1040 		link = (struct link_free *)vaddr + off / sizeof(*link);
1041 
1042 		while ((off += class->size) < PAGE_SIZE) {
1043 			link->next = freeobj++ << OBJ_TAG_BITS;
1044 			link += class->size / sizeof(*link);
1045 		}
1046 
1047 		/*
1048 		 * We now come to the last (full or partial) object on this
1049 		 * page, which must point to the first object on the next
1050 		 * page (if present)
1051 		 */
1052 		next_page = get_next_page(page);
1053 		if (next_page) {
1054 			link->next = freeobj++ << OBJ_TAG_BITS;
1055 		} else {
1056 			/*
1057 			 * Reset OBJ_TAG_BITS bit to last link to tell
1058 			 * whether it's allocated object or not.
1059 			 */
1060 			link->next = -1UL << OBJ_TAG_BITS;
1061 		}
1062 		kunmap_atomic(vaddr);
1063 		page = next_page;
1064 		off %= PAGE_SIZE;
1065 	}
1066 
1067 #ifdef CONFIG_ZPOOL
1068 	INIT_LIST_HEAD(&zspage->lru);
1069 	zspage->under_reclaim = false;
1070 	zspage->deferred_handles = NULL;
1071 #endif
1072 
1073 	set_freeobj(zspage, 0);
1074 }
1075 
1076 static void create_page_chain(struct size_class *class, struct zspage *zspage,
1077 				struct page *pages[])
1078 {
1079 	int i;
1080 	struct page *page;
1081 	struct page *prev_page = NULL;
1082 	int nr_pages = class->pages_per_zspage;
1083 
1084 	/*
1085 	 * Allocate individual pages and link them together as:
1086 	 * 1. all pages are linked together using page->index
1087 	 * 2. each sub-page point to zspage using page->private
1088 	 *
1089 	 * we set PG_private to identify the first page (i.e. no other sub-page
1090 	 * has this flag set).
1091 	 */
1092 	for (i = 0; i < nr_pages; i++) {
1093 		page = pages[i];
1094 		set_page_private(page, (unsigned long)zspage);
1095 		page->index = 0;
1096 		if (i == 0) {
1097 			zspage->first_page = page;
1098 			SetPagePrivate(page);
1099 			if (unlikely(class->objs_per_zspage == 1 &&
1100 					class->pages_per_zspage == 1))
1101 				SetZsHugePage(zspage);
1102 		} else {
1103 			prev_page->index = (unsigned long)page;
1104 		}
1105 		prev_page = page;
1106 	}
1107 }
1108 
1109 /*
1110  * Allocate a zspage for the given size class
1111  */
1112 static struct zspage *alloc_zspage(struct zs_pool *pool,
1113 					struct size_class *class,
1114 					gfp_t gfp)
1115 {
1116 	int i;
1117 	struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
1118 	struct zspage *zspage = cache_alloc_zspage(pool, gfp);
1119 
1120 	if (!zspage)
1121 		return NULL;
1122 
1123 	zspage->magic = ZSPAGE_MAGIC;
1124 	migrate_lock_init(zspage);
1125 
1126 	for (i = 0; i < class->pages_per_zspage; i++) {
1127 		struct page *page;
1128 
1129 		page = alloc_page(gfp);
1130 		if (!page) {
1131 			while (--i >= 0) {
1132 				dec_zone_page_state(pages[i], NR_ZSPAGES);
1133 				__free_page(pages[i]);
1134 			}
1135 			cache_free_zspage(pool, zspage);
1136 			return NULL;
1137 		}
1138 
1139 		inc_zone_page_state(page, NR_ZSPAGES);
1140 		pages[i] = page;
1141 	}
1142 
1143 	create_page_chain(class, zspage, pages);
1144 	init_zspage(class, zspage);
1145 	zspage->pool = pool;
1146 
1147 	return zspage;
1148 }
1149 
1150 static struct zspage *find_get_zspage(struct size_class *class)
1151 {
1152 	int i;
1153 	struct zspage *zspage;
1154 
1155 	for (i = ZS_ALMOST_FULL; i >= ZS_EMPTY; i--) {
1156 		zspage = list_first_entry_or_null(&class->fullness_list[i],
1157 				struct zspage, list);
1158 		if (zspage)
1159 			break;
1160 	}
1161 
1162 	return zspage;
1163 }
1164 
1165 static inline int __zs_cpu_up(struct mapping_area *area)
1166 {
1167 	/*
1168 	 * Make sure we don't leak memory if a cpu UP notification
1169 	 * and zs_init() race and both call zs_cpu_up() on the same cpu
1170 	 */
1171 	if (area->vm_buf)
1172 		return 0;
1173 	area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL);
1174 	if (!area->vm_buf)
1175 		return -ENOMEM;
1176 	return 0;
1177 }
1178 
1179 static inline void __zs_cpu_down(struct mapping_area *area)
1180 {
1181 	kfree(area->vm_buf);
1182 	area->vm_buf = NULL;
1183 }
1184 
1185 static void *__zs_map_object(struct mapping_area *area,
1186 			struct page *pages[2], int off, int size)
1187 {
1188 	int sizes[2];
1189 	void *addr;
1190 	char *buf = area->vm_buf;
1191 
1192 	/* disable page faults to match kmap_atomic() return conditions */
1193 	pagefault_disable();
1194 
1195 	/* no read fastpath */
1196 	if (area->vm_mm == ZS_MM_WO)
1197 		goto out;
1198 
1199 	sizes[0] = PAGE_SIZE - off;
1200 	sizes[1] = size - sizes[0];
1201 
1202 	/* copy object to per-cpu buffer */
1203 	addr = kmap_atomic(pages[0]);
1204 	memcpy(buf, addr + off, sizes[0]);
1205 	kunmap_atomic(addr);
1206 	addr = kmap_atomic(pages[1]);
1207 	memcpy(buf + sizes[0], addr, sizes[1]);
1208 	kunmap_atomic(addr);
1209 out:
1210 	return area->vm_buf;
1211 }
1212 
1213 static void __zs_unmap_object(struct mapping_area *area,
1214 			struct page *pages[2], int off, int size)
1215 {
1216 	int sizes[2];
1217 	void *addr;
1218 	char *buf;
1219 
1220 	/* no write fastpath */
1221 	if (area->vm_mm == ZS_MM_RO)
1222 		goto out;
1223 
1224 	buf = area->vm_buf;
1225 	buf = buf + ZS_HANDLE_SIZE;
1226 	size -= ZS_HANDLE_SIZE;
1227 	off += ZS_HANDLE_SIZE;
1228 
1229 	sizes[0] = PAGE_SIZE - off;
1230 	sizes[1] = size - sizes[0];
1231 
1232 	/* copy per-cpu buffer to object */
1233 	addr = kmap_atomic(pages[0]);
1234 	memcpy(addr + off, buf, sizes[0]);
1235 	kunmap_atomic(addr);
1236 	addr = kmap_atomic(pages[1]);
1237 	memcpy(addr, buf + sizes[0], sizes[1]);
1238 	kunmap_atomic(addr);
1239 
1240 out:
1241 	/* enable page faults to match kunmap_atomic() return conditions */
1242 	pagefault_enable();
1243 }
1244 
1245 static int zs_cpu_prepare(unsigned int cpu)
1246 {
1247 	struct mapping_area *area;
1248 
1249 	area = &per_cpu(zs_map_area, cpu);
1250 	return __zs_cpu_up(area);
1251 }
1252 
1253 static int zs_cpu_dead(unsigned int cpu)
1254 {
1255 	struct mapping_area *area;
1256 
1257 	area = &per_cpu(zs_map_area, cpu);
1258 	__zs_cpu_down(area);
1259 	return 0;
1260 }
1261 
1262 static bool can_merge(struct size_class *prev, int pages_per_zspage,
1263 					int objs_per_zspage)
1264 {
1265 	if (prev->pages_per_zspage == pages_per_zspage &&
1266 		prev->objs_per_zspage == objs_per_zspage)
1267 		return true;
1268 
1269 	return false;
1270 }
1271 
1272 static bool zspage_full(struct size_class *class, struct zspage *zspage)
1273 {
1274 	return get_zspage_inuse(zspage) == class->objs_per_zspage;
1275 }
1276 
1277 /**
1278  * zs_lookup_class_index() - Returns index of the zsmalloc &size_class
1279  * that hold objects of the provided size.
1280  * @pool: zsmalloc pool to use
1281  * @size: object size
1282  *
1283  * Context: Any context.
1284  *
1285  * Return: the index of the zsmalloc &size_class that hold objects of the
1286  * provided size.
1287  */
1288 unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size)
1289 {
1290 	struct size_class *class;
1291 
1292 	class = pool->size_class[get_size_class_index(size)];
1293 
1294 	return class->index;
1295 }
1296 EXPORT_SYMBOL_GPL(zs_lookup_class_index);
1297 
1298 unsigned long zs_get_total_pages(struct zs_pool *pool)
1299 {
1300 	return atomic_long_read(&pool->pages_allocated);
1301 }
1302 EXPORT_SYMBOL_GPL(zs_get_total_pages);
1303 
1304 /**
1305  * zs_map_object - get address of allocated object from handle.
1306  * @pool: pool from which the object was allocated
1307  * @handle: handle returned from zs_malloc
1308  * @mm: mapping mode to use
1309  *
1310  * Before using an object allocated from zs_malloc, it must be mapped using
1311  * this function. When done with the object, it must be unmapped using
1312  * zs_unmap_object.
1313  *
1314  * Only one object can be mapped per cpu at a time. There is no protection
1315  * against nested mappings.
1316  *
1317  * This function returns with preemption and page faults disabled.
1318  */
1319 void *zs_map_object(struct zs_pool *pool, unsigned long handle,
1320 			enum zs_mapmode mm)
1321 {
1322 	struct zspage *zspage;
1323 	struct page *page;
1324 	unsigned long obj, off;
1325 	unsigned int obj_idx;
1326 
1327 	struct size_class *class;
1328 	struct mapping_area *area;
1329 	struct page *pages[2];
1330 	void *ret;
1331 
1332 	/*
1333 	 * Because we use per-cpu mapping areas shared among the
1334 	 * pools/users, we can't allow mapping in interrupt context
1335 	 * because it can corrupt another users mappings.
1336 	 */
1337 	BUG_ON(in_interrupt());
1338 
1339 	/* It guarantees it can get zspage from handle safely */
1340 	spin_lock(&pool->lock);
1341 	obj = handle_to_obj(handle);
1342 	obj_to_location(obj, &page, &obj_idx);
1343 	zspage = get_zspage(page);
1344 
1345 #ifdef CONFIG_ZPOOL
1346 	/*
1347 	 * Move the zspage to front of pool's LRU.
1348 	 *
1349 	 * Note that this is swap-specific, so by definition there are no ongoing
1350 	 * accesses to the memory while the page is swapped out that would make
1351 	 * it "hot". A new entry is hot, then ages to the tail until it gets either
1352 	 * written back or swaps back in.
1353 	 *
1354 	 * Furthermore, map is also called during writeback. We must not put an
1355 	 * isolated page on the LRU mid-reclaim.
1356 	 *
1357 	 * As a result, only update the LRU when the page is mapped for write
1358 	 * when it's first instantiated.
1359 	 *
1360 	 * This is a deviation from the other backends, which perform this update
1361 	 * in the allocation function (zbud_alloc, z3fold_alloc).
1362 	 */
1363 	if (mm == ZS_MM_WO) {
1364 		if (!list_empty(&zspage->lru))
1365 			list_del(&zspage->lru);
1366 		list_add(&zspage->lru, &pool->lru);
1367 	}
1368 #endif
1369 
1370 	/*
1371 	 * migration cannot move any zpages in this zspage. Here, pool->lock
1372 	 * is too heavy since callers would take some time until they calls
1373 	 * zs_unmap_object API so delegate the locking from class to zspage
1374 	 * which is smaller granularity.
1375 	 */
1376 	migrate_read_lock(zspage);
1377 	spin_unlock(&pool->lock);
1378 
1379 	class = zspage_class(pool, zspage);
1380 	off = (class->size * obj_idx) & ~PAGE_MASK;
1381 
1382 	local_lock(&zs_map_area.lock);
1383 	area = this_cpu_ptr(&zs_map_area);
1384 	area->vm_mm = mm;
1385 	if (off + class->size <= PAGE_SIZE) {
1386 		/* this object is contained entirely within a page */
1387 		area->vm_addr = kmap_atomic(page);
1388 		ret = area->vm_addr + off;
1389 		goto out;
1390 	}
1391 
1392 	/* this object spans two pages */
1393 	pages[0] = page;
1394 	pages[1] = get_next_page(page);
1395 	BUG_ON(!pages[1]);
1396 
1397 	ret = __zs_map_object(area, pages, off, class->size);
1398 out:
1399 	if (likely(!ZsHugePage(zspage)))
1400 		ret += ZS_HANDLE_SIZE;
1401 
1402 	return ret;
1403 }
1404 EXPORT_SYMBOL_GPL(zs_map_object);
1405 
1406 void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
1407 {
1408 	struct zspage *zspage;
1409 	struct page *page;
1410 	unsigned long obj, off;
1411 	unsigned int obj_idx;
1412 
1413 	struct size_class *class;
1414 	struct mapping_area *area;
1415 
1416 	obj = handle_to_obj(handle);
1417 	obj_to_location(obj, &page, &obj_idx);
1418 	zspage = get_zspage(page);
1419 	class = zspage_class(pool, zspage);
1420 	off = (class->size * obj_idx) & ~PAGE_MASK;
1421 
1422 	area = this_cpu_ptr(&zs_map_area);
1423 	if (off + class->size <= PAGE_SIZE)
1424 		kunmap_atomic(area->vm_addr);
1425 	else {
1426 		struct page *pages[2];
1427 
1428 		pages[0] = page;
1429 		pages[1] = get_next_page(page);
1430 		BUG_ON(!pages[1]);
1431 
1432 		__zs_unmap_object(area, pages, off, class->size);
1433 	}
1434 	local_unlock(&zs_map_area.lock);
1435 
1436 	migrate_read_unlock(zspage);
1437 }
1438 EXPORT_SYMBOL_GPL(zs_unmap_object);
1439 
1440 /**
1441  * zs_huge_class_size() - Returns the size (in bytes) of the first huge
1442  *                        zsmalloc &size_class.
1443  * @pool: zsmalloc pool to use
1444  *
1445  * The function returns the size of the first huge class - any object of equal
1446  * or bigger size will be stored in zspage consisting of a single physical
1447  * page.
1448  *
1449  * Context: Any context.
1450  *
1451  * Return: the size (in bytes) of the first huge zsmalloc &size_class.
1452  */
1453 size_t zs_huge_class_size(struct zs_pool *pool)
1454 {
1455 	return huge_class_size;
1456 }
1457 EXPORT_SYMBOL_GPL(zs_huge_class_size);
1458 
1459 static unsigned long obj_malloc(struct zs_pool *pool,
1460 				struct zspage *zspage, unsigned long handle)
1461 {
1462 	int i, nr_page, offset;
1463 	unsigned long obj;
1464 	struct link_free *link;
1465 	struct size_class *class;
1466 
1467 	struct page *m_page;
1468 	unsigned long m_offset;
1469 	void *vaddr;
1470 
1471 	class = pool->size_class[zspage->class];
1472 	handle |= OBJ_ALLOCATED_TAG;
1473 	obj = get_freeobj(zspage);
1474 
1475 	offset = obj * class->size;
1476 	nr_page = offset >> PAGE_SHIFT;
1477 	m_offset = offset & ~PAGE_MASK;
1478 	m_page = get_first_page(zspage);
1479 
1480 	for (i = 0; i < nr_page; i++)
1481 		m_page = get_next_page(m_page);
1482 
1483 	vaddr = kmap_atomic(m_page);
1484 	link = (struct link_free *)vaddr + m_offset / sizeof(*link);
1485 	set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
1486 	if (likely(!ZsHugePage(zspage)))
1487 		/* record handle in the header of allocated chunk */
1488 		link->handle = handle;
1489 	else
1490 		/* record handle to page->index */
1491 		zspage->first_page->index = handle;
1492 
1493 	kunmap_atomic(vaddr);
1494 	mod_zspage_inuse(zspage, 1);
1495 
1496 	obj = location_to_obj(m_page, obj);
1497 
1498 	return obj;
1499 }
1500 
1501 
1502 /**
1503  * zs_malloc - Allocate block of given size from pool.
1504  * @pool: pool to allocate from
1505  * @size: size of block to allocate
1506  * @gfp: gfp flags when allocating object
1507  *
1508  * On success, handle to the allocated object is returned,
1509  * otherwise an ERR_PTR().
1510  * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
1511  */
1512 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
1513 {
1514 	unsigned long handle, obj;
1515 	struct size_class *class;
1516 	enum fullness_group newfg;
1517 	struct zspage *zspage;
1518 
1519 	if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
1520 		return (unsigned long)ERR_PTR(-EINVAL);
1521 
1522 	handle = cache_alloc_handle(pool, gfp);
1523 	if (!handle)
1524 		return (unsigned long)ERR_PTR(-ENOMEM);
1525 
1526 	/* extra space in chunk to keep the handle */
1527 	size += ZS_HANDLE_SIZE;
1528 	class = pool->size_class[get_size_class_index(size)];
1529 
1530 	/* pool->lock effectively protects the zpage migration */
1531 	spin_lock(&pool->lock);
1532 	zspage = find_get_zspage(class);
1533 	if (likely(zspage)) {
1534 		obj = obj_malloc(pool, zspage, handle);
1535 		/* Now move the zspage to another fullness group, if required */
1536 		fix_fullness_group(class, zspage);
1537 		record_obj(handle, obj);
1538 		class_stat_inc(class, OBJ_USED, 1);
1539 		spin_unlock(&pool->lock);
1540 
1541 		return handle;
1542 	}
1543 
1544 	spin_unlock(&pool->lock);
1545 
1546 	zspage = alloc_zspage(pool, class, gfp);
1547 	if (!zspage) {
1548 		cache_free_handle(pool, handle);
1549 		return (unsigned long)ERR_PTR(-ENOMEM);
1550 	}
1551 
1552 	spin_lock(&pool->lock);
1553 	obj = obj_malloc(pool, zspage, handle);
1554 	newfg = get_fullness_group(class, zspage);
1555 	insert_zspage(class, zspage, newfg);
1556 	set_zspage_mapping(zspage, class->index, newfg);
1557 	record_obj(handle, obj);
1558 	atomic_long_add(class->pages_per_zspage,
1559 				&pool->pages_allocated);
1560 	class_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage);
1561 	class_stat_inc(class, OBJ_USED, 1);
1562 
1563 	/* We completely set up zspage so mark them as movable */
1564 	SetZsPageMovable(pool, zspage);
1565 	spin_unlock(&pool->lock);
1566 
1567 	return handle;
1568 }
1569 EXPORT_SYMBOL_GPL(zs_malloc);
1570 
1571 static void obj_free(int class_size, unsigned long obj)
1572 {
1573 	struct link_free *link;
1574 	struct zspage *zspage;
1575 	struct page *f_page;
1576 	unsigned long f_offset;
1577 	unsigned int f_objidx;
1578 	void *vaddr;
1579 
1580 	obj_to_location(obj, &f_page, &f_objidx);
1581 	f_offset = (class_size * f_objidx) & ~PAGE_MASK;
1582 	zspage = get_zspage(f_page);
1583 
1584 	vaddr = kmap_atomic(f_page);
1585 
1586 	/* Insert this object in containing zspage's freelist */
1587 	link = (struct link_free *)(vaddr + f_offset);
1588 	if (likely(!ZsHugePage(zspage)))
1589 		link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
1590 	else
1591 		f_page->index = 0;
1592 	kunmap_atomic(vaddr);
1593 	set_freeobj(zspage, f_objidx);
1594 	mod_zspage_inuse(zspage, -1);
1595 }
1596 
1597 void zs_free(struct zs_pool *pool, unsigned long handle)
1598 {
1599 	struct zspage *zspage;
1600 	struct page *f_page;
1601 	unsigned long obj;
1602 	struct size_class *class;
1603 	enum fullness_group fullness;
1604 
1605 	if (IS_ERR_OR_NULL((void *)handle))
1606 		return;
1607 
1608 	/*
1609 	 * The pool->lock protects the race with zpage's migration
1610 	 * so it's safe to get the page from handle.
1611 	 */
1612 	spin_lock(&pool->lock);
1613 	obj = handle_to_obj(handle);
1614 	obj_to_page(obj, &f_page);
1615 	zspage = get_zspage(f_page);
1616 	class = zspage_class(pool, zspage);
1617 
1618 	obj_free(class->size, obj);
1619 	class_stat_dec(class, OBJ_USED, 1);
1620 
1621 #ifdef CONFIG_ZPOOL
1622 	if (zspage->under_reclaim) {
1623 		/*
1624 		 * Reclaim needs the handles during writeback. It'll free
1625 		 * them along with the zspage when it's done with them.
1626 		 *
1627 		 * Record current deferred handle at the memory location
1628 		 * whose address is given by handle.
1629 		 */
1630 		record_obj(handle, (unsigned long)zspage->deferred_handles);
1631 		zspage->deferred_handles = (unsigned long *)handle;
1632 		spin_unlock(&pool->lock);
1633 		return;
1634 	}
1635 #endif
1636 	fullness = fix_fullness_group(class, zspage);
1637 	if (fullness == ZS_EMPTY)
1638 		free_zspage(pool, class, zspage);
1639 
1640 	spin_unlock(&pool->lock);
1641 	cache_free_handle(pool, handle);
1642 }
1643 EXPORT_SYMBOL_GPL(zs_free);
1644 
1645 static void zs_object_copy(struct size_class *class, unsigned long dst,
1646 				unsigned long src)
1647 {
1648 	struct page *s_page, *d_page;
1649 	unsigned int s_objidx, d_objidx;
1650 	unsigned long s_off, d_off;
1651 	void *s_addr, *d_addr;
1652 	int s_size, d_size, size;
1653 	int written = 0;
1654 
1655 	s_size = d_size = class->size;
1656 
1657 	obj_to_location(src, &s_page, &s_objidx);
1658 	obj_to_location(dst, &d_page, &d_objidx);
1659 
1660 	s_off = (class->size * s_objidx) & ~PAGE_MASK;
1661 	d_off = (class->size * d_objidx) & ~PAGE_MASK;
1662 
1663 	if (s_off + class->size > PAGE_SIZE)
1664 		s_size = PAGE_SIZE - s_off;
1665 
1666 	if (d_off + class->size > PAGE_SIZE)
1667 		d_size = PAGE_SIZE - d_off;
1668 
1669 	s_addr = kmap_atomic(s_page);
1670 	d_addr = kmap_atomic(d_page);
1671 
1672 	while (1) {
1673 		size = min(s_size, d_size);
1674 		memcpy(d_addr + d_off, s_addr + s_off, size);
1675 		written += size;
1676 
1677 		if (written == class->size)
1678 			break;
1679 
1680 		s_off += size;
1681 		s_size -= size;
1682 		d_off += size;
1683 		d_size -= size;
1684 
1685 		/*
1686 		 * Calling kunmap_atomic(d_addr) is necessary. kunmap_atomic()
1687 		 * calls must occurs in reverse order of calls to kmap_atomic().
1688 		 * So, to call kunmap_atomic(s_addr) we should first call
1689 		 * kunmap_atomic(d_addr). For more details see
1690 		 * Documentation/mm/highmem.rst.
1691 		 */
1692 		if (s_off >= PAGE_SIZE) {
1693 			kunmap_atomic(d_addr);
1694 			kunmap_atomic(s_addr);
1695 			s_page = get_next_page(s_page);
1696 			s_addr = kmap_atomic(s_page);
1697 			d_addr = kmap_atomic(d_page);
1698 			s_size = class->size - written;
1699 			s_off = 0;
1700 		}
1701 
1702 		if (d_off >= PAGE_SIZE) {
1703 			kunmap_atomic(d_addr);
1704 			d_page = get_next_page(d_page);
1705 			d_addr = kmap_atomic(d_page);
1706 			d_size = class->size - written;
1707 			d_off = 0;
1708 		}
1709 	}
1710 
1711 	kunmap_atomic(d_addr);
1712 	kunmap_atomic(s_addr);
1713 }
1714 
1715 /*
1716  * Find alloced object in zspage from index object and
1717  * return handle.
1718  */
1719 static unsigned long find_alloced_obj(struct size_class *class,
1720 					struct page *page, int *obj_idx)
1721 {
1722 	unsigned int offset;
1723 	int index = *obj_idx;
1724 	unsigned long handle = 0;
1725 	void *addr = kmap_atomic(page);
1726 
1727 	offset = get_first_obj_offset(page);
1728 	offset += class->size * index;
1729 
1730 	while (offset < PAGE_SIZE) {
1731 		if (obj_allocated(page, addr + offset, &handle))
1732 			break;
1733 
1734 		offset += class->size;
1735 		index++;
1736 	}
1737 
1738 	kunmap_atomic(addr);
1739 
1740 	*obj_idx = index;
1741 
1742 	return handle;
1743 }
1744 
1745 struct zs_compact_control {
1746 	/* Source spage for migration which could be a subpage of zspage */
1747 	struct page *s_page;
1748 	/* Destination page for migration which should be a first page
1749 	 * of zspage. */
1750 	struct page *d_page;
1751 	 /* Starting object index within @s_page which used for live object
1752 	  * in the subpage. */
1753 	int obj_idx;
1754 };
1755 
1756 static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
1757 				struct zs_compact_control *cc)
1758 {
1759 	unsigned long used_obj, free_obj;
1760 	unsigned long handle;
1761 	struct page *s_page = cc->s_page;
1762 	struct page *d_page = cc->d_page;
1763 	int obj_idx = cc->obj_idx;
1764 	int ret = 0;
1765 
1766 	while (1) {
1767 		handle = find_alloced_obj(class, s_page, &obj_idx);
1768 		if (!handle) {
1769 			s_page = get_next_page(s_page);
1770 			if (!s_page)
1771 				break;
1772 			obj_idx = 0;
1773 			continue;
1774 		}
1775 
1776 		/* Stop if there is no more space */
1777 		if (zspage_full(class, get_zspage(d_page))) {
1778 			ret = -ENOMEM;
1779 			break;
1780 		}
1781 
1782 		used_obj = handle_to_obj(handle);
1783 		free_obj = obj_malloc(pool, get_zspage(d_page), handle);
1784 		zs_object_copy(class, free_obj, used_obj);
1785 		obj_idx++;
1786 		record_obj(handle, free_obj);
1787 		obj_free(class->size, used_obj);
1788 	}
1789 
1790 	/* Remember last position in this iteration */
1791 	cc->s_page = s_page;
1792 	cc->obj_idx = obj_idx;
1793 
1794 	return ret;
1795 }
1796 
1797 static struct zspage *isolate_zspage(struct size_class *class, bool source)
1798 {
1799 	int i;
1800 	struct zspage *zspage;
1801 	enum fullness_group fg[2] = {ZS_ALMOST_EMPTY, ZS_ALMOST_FULL};
1802 
1803 	if (!source) {
1804 		fg[0] = ZS_ALMOST_FULL;
1805 		fg[1] = ZS_ALMOST_EMPTY;
1806 	}
1807 
1808 	for (i = 0; i < 2; i++) {
1809 		zspage = list_first_entry_or_null(&class->fullness_list[fg[i]],
1810 							struct zspage, list);
1811 		if (zspage) {
1812 			remove_zspage(class, zspage, fg[i]);
1813 			return zspage;
1814 		}
1815 	}
1816 
1817 	return zspage;
1818 }
1819 
1820 /*
1821  * putback_zspage - add @zspage into right class's fullness list
1822  * @class: destination class
1823  * @zspage: target page
1824  *
1825  * Return @zspage's fullness_group
1826  */
1827 static enum fullness_group putback_zspage(struct size_class *class,
1828 			struct zspage *zspage)
1829 {
1830 	enum fullness_group fullness;
1831 
1832 	fullness = get_fullness_group(class, zspage);
1833 	insert_zspage(class, zspage, fullness);
1834 	set_zspage_mapping(zspage, class->index, fullness);
1835 
1836 	return fullness;
1837 }
1838 
1839 #if defined(CONFIG_ZPOOL) || defined(CONFIG_COMPACTION)
1840 /*
1841  * To prevent zspage destroy during migration, zspage freeing should
1842  * hold locks of all pages in the zspage.
1843  */
1844 static void lock_zspage(struct zspage *zspage)
1845 {
1846 	struct page *curr_page, *page;
1847 
1848 	/*
1849 	 * Pages we haven't locked yet can be migrated off the list while we're
1850 	 * trying to lock them, so we need to be careful and only attempt to
1851 	 * lock each page under migrate_read_lock(). Otherwise, the page we lock
1852 	 * may no longer belong to the zspage. This means that we may wait for
1853 	 * the wrong page to unlock, so we must take a reference to the page
1854 	 * prior to waiting for it to unlock outside migrate_read_lock().
1855 	 */
1856 	while (1) {
1857 		migrate_read_lock(zspage);
1858 		page = get_first_page(zspage);
1859 		if (trylock_page(page))
1860 			break;
1861 		get_page(page);
1862 		migrate_read_unlock(zspage);
1863 		wait_on_page_locked(page);
1864 		put_page(page);
1865 	}
1866 
1867 	curr_page = page;
1868 	while ((page = get_next_page(curr_page))) {
1869 		if (trylock_page(page)) {
1870 			curr_page = page;
1871 		} else {
1872 			get_page(page);
1873 			migrate_read_unlock(zspage);
1874 			wait_on_page_locked(page);
1875 			put_page(page);
1876 			migrate_read_lock(zspage);
1877 		}
1878 	}
1879 	migrate_read_unlock(zspage);
1880 }
1881 #endif /* defined(CONFIG_ZPOOL) || defined(CONFIG_COMPACTION) */
1882 
1883 #ifdef CONFIG_ZPOOL
1884 /*
1885  * Unlocks all the pages of the zspage.
1886  *
1887  * pool->lock must be held before this function is called
1888  * to prevent the underlying pages from migrating.
1889  */
1890 static void unlock_zspage(struct zspage *zspage)
1891 {
1892 	struct page *page = get_first_page(zspage);
1893 
1894 	do {
1895 		unlock_page(page);
1896 	} while ((page = get_next_page(page)) != NULL);
1897 }
1898 #endif /* CONFIG_ZPOOL */
1899 
1900 static void migrate_lock_init(struct zspage *zspage)
1901 {
1902 	rwlock_init(&zspage->lock);
1903 }
1904 
1905 static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock)
1906 {
1907 	read_lock(&zspage->lock);
1908 }
1909 
1910 static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)
1911 {
1912 	read_unlock(&zspage->lock);
1913 }
1914 
1915 #ifdef CONFIG_COMPACTION
1916 static void migrate_write_lock(struct zspage *zspage)
1917 {
1918 	write_lock(&zspage->lock);
1919 }
1920 
1921 static void migrate_write_lock_nested(struct zspage *zspage)
1922 {
1923 	write_lock_nested(&zspage->lock, SINGLE_DEPTH_NESTING);
1924 }
1925 
1926 static void migrate_write_unlock(struct zspage *zspage)
1927 {
1928 	write_unlock(&zspage->lock);
1929 }
1930 
1931 /* Number of isolated subpage for *page migration* in this zspage */
1932 static void inc_zspage_isolation(struct zspage *zspage)
1933 {
1934 	zspage->isolated++;
1935 }
1936 
1937 static void dec_zspage_isolation(struct zspage *zspage)
1938 {
1939 	VM_BUG_ON(zspage->isolated == 0);
1940 	zspage->isolated--;
1941 }
1942 
1943 static const struct movable_operations zsmalloc_mops;
1944 
1945 static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1946 				struct page *newpage, struct page *oldpage)
1947 {
1948 	struct page *page;
1949 	struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
1950 	int idx = 0;
1951 
1952 	page = get_first_page(zspage);
1953 	do {
1954 		if (page == oldpage)
1955 			pages[idx] = newpage;
1956 		else
1957 			pages[idx] = page;
1958 		idx++;
1959 	} while ((page = get_next_page(page)) != NULL);
1960 
1961 	create_page_chain(class, zspage, pages);
1962 	set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
1963 	if (unlikely(ZsHugePage(zspage)))
1964 		newpage->index = oldpage->index;
1965 	__SetPageMovable(newpage, &zsmalloc_mops);
1966 }
1967 
1968 static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
1969 {
1970 	struct zspage *zspage;
1971 
1972 	/*
1973 	 * Page is locked so zspage couldn't be destroyed. For detail, look at
1974 	 * lock_zspage in free_zspage.
1975 	 */
1976 	VM_BUG_ON_PAGE(!PageMovable(page), page);
1977 	VM_BUG_ON_PAGE(PageIsolated(page), page);
1978 
1979 	zspage = get_zspage(page);
1980 	migrate_write_lock(zspage);
1981 	inc_zspage_isolation(zspage);
1982 	migrate_write_unlock(zspage);
1983 
1984 	return true;
1985 }
1986 
1987 static int zs_page_migrate(struct page *newpage, struct page *page,
1988 		enum migrate_mode mode)
1989 {
1990 	struct zs_pool *pool;
1991 	struct size_class *class;
1992 	struct zspage *zspage;
1993 	struct page *dummy;
1994 	void *s_addr, *d_addr, *addr;
1995 	unsigned int offset;
1996 	unsigned long handle;
1997 	unsigned long old_obj, new_obj;
1998 	unsigned int obj_idx;
1999 
2000 	/*
2001 	 * We cannot support the _NO_COPY case here, because copy needs to
2002 	 * happen under the zs lock, which does not work with
2003 	 * MIGRATE_SYNC_NO_COPY workflow.
2004 	 */
2005 	if (mode == MIGRATE_SYNC_NO_COPY)
2006 		return -EINVAL;
2007 
2008 	VM_BUG_ON_PAGE(!PageMovable(page), page);
2009 	VM_BUG_ON_PAGE(!PageIsolated(page), page);
2010 
2011 	/* The page is locked, so this pointer must remain valid */
2012 	zspage = get_zspage(page);
2013 	pool = zspage->pool;
2014 
2015 	/*
2016 	 * The pool's lock protects the race between zpage migration
2017 	 * and zs_free.
2018 	 */
2019 	spin_lock(&pool->lock);
2020 	class = zspage_class(pool, zspage);
2021 
2022 	/* the migrate_write_lock protects zpage access via zs_map_object */
2023 	migrate_write_lock(zspage);
2024 
2025 	offset = get_first_obj_offset(page);
2026 	s_addr = kmap_atomic(page);
2027 
2028 	/*
2029 	 * Here, any user cannot access all objects in the zspage so let's move.
2030 	 */
2031 	d_addr = kmap_atomic(newpage);
2032 	memcpy(d_addr, s_addr, PAGE_SIZE);
2033 	kunmap_atomic(d_addr);
2034 
2035 	for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
2036 					addr += class->size) {
2037 		if (obj_allocated(page, addr, &handle)) {
2038 
2039 			old_obj = handle_to_obj(handle);
2040 			obj_to_location(old_obj, &dummy, &obj_idx);
2041 			new_obj = (unsigned long)location_to_obj(newpage,
2042 								obj_idx);
2043 			record_obj(handle, new_obj);
2044 		}
2045 	}
2046 	kunmap_atomic(s_addr);
2047 
2048 	replace_sub_page(class, zspage, newpage, page);
2049 	/*
2050 	 * Since we complete the data copy and set up new zspage structure,
2051 	 * it's okay to release the pool's lock.
2052 	 */
2053 	spin_unlock(&pool->lock);
2054 	dec_zspage_isolation(zspage);
2055 	migrate_write_unlock(zspage);
2056 
2057 	get_page(newpage);
2058 	if (page_zone(newpage) != page_zone(page)) {
2059 		dec_zone_page_state(page, NR_ZSPAGES);
2060 		inc_zone_page_state(newpage, NR_ZSPAGES);
2061 	}
2062 
2063 	reset_page(page);
2064 	put_page(page);
2065 
2066 	return MIGRATEPAGE_SUCCESS;
2067 }
2068 
2069 static void zs_page_putback(struct page *page)
2070 {
2071 	struct zspage *zspage;
2072 
2073 	VM_BUG_ON_PAGE(!PageMovable(page), page);
2074 	VM_BUG_ON_PAGE(!PageIsolated(page), page);
2075 
2076 	zspage = get_zspage(page);
2077 	migrate_write_lock(zspage);
2078 	dec_zspage_isolation(zspage);
2079 	migrate_write_unlock(zspage);
2080 }
2081 
2082 static const struct movable_operations zsmalloc_mops = {
2083 	.isolate_page = zs_page_isolate,
2084 	.migrate_page = zs_page_migrate,
2085 	.putback_page = zs_page_putback,
2086 };
2087 
2088 /*
2089  * Caller should hold page_lock of all pages in the zspage
2090  * In here, we cannot use zspage meta data.
2091  */
2092 static void async_free_zspage(struct work_struct *work)
2093 {
2094 	int i;
2095 	struct size_class *class;
2096 	unsigned int class_idx;
2097 	enum fullness_group fullness;
2098 	struct zspage *zspage, *tmp;
2099 	LIST_HEAD(free_pages);
2100 	struct zs_pool *pool = container_of(work, struct zs_pool,
2101 					free_work);
2102 
2103 	for (i = 0; i < ZS_SIZE_CLASSES; i++) {
2104 		class = pool->size_class[i];
2105 		if (class->index != i)
2106 			continue;
2107 
2108 		spin_lock(&pool->lock);
2109 		list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages);
2110 		spin_unlock(&pool->lock);
2111 	}
2112 
2113 	list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
2114 		list_del(&zspage->list);
2115 		lock_zspage(zspage);
2116 
2117 		get_zspage_mapping(zspage, &class_idx, &fullness);
2118 		VM_BUG_ON(fullness != ZS_EMPTY);
2119 		class = pool->size_class[class_idx];
2120 		spin_lock(&pool->lock);
2121 #ifdef CONFIG_ZPOOL
2122 		list_del(&zspage->lru);
2123 #endif
2124 		__free_zspage(pool, class, zspage);
2125 		spin_unlock(&pool->lock);
2126 	}
2127 };
2128 
2129 static void kick_deferred_free(struct zs_pool *pool)
2130 {
2131 	schedule_work(&pool->free_work);
2132 }
2133 
2134 static void zs_flush_migration(struct zs_pool *pool)
2135 {
2136 	flush_work(&pool->free_work);
2137 }
2138 
2139 static void init_deferred_free(struct zs_pool *pool)
2140 {
2141 	INIT_WORK(&pool->free_work, async_free_zspage);
2142 }
2143 
2144 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
2145 {
2146 	struct page *page = get_first_page(zspage);
2147 
2148 	do {
2149 		WARN_ON(!trylock_page(page));
2150 		__SetPageMovable(page, &zsmalloc_mops);
2151 		unlock_page(page);
2152 	} while ((page = get_next_page(page)) != NULL);
2153 }
2154 #else
2155 static inline void zs_flush_migration(struct zs_pool *pool) { }
2156 #endif
2157 
2158 /*
2159  *
2160  * Based on the number of unused allocated objects calculate
2161  * and return the number of pages that we can free.
2162  */
2163 static unsigned long zs_can_compact(struct size_class *class)
2164 {
2165 	unsigned long obj_wasted;
2166 	unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
2167 	unsigned long obj_used = zs_stat_get(class, OBJ_USED);
2168 
2169 	if (obj_allocated <= obj_used)
2170 		return 0;
2171 
2172 	obj_wasted = obj_allocated - obj_used;
2173 	obj_wasted /= class->objs_per_zspage;
2174 
2175 	return obj_wasted * class->pages_per_zspage;
2176 }
2177 
2178 static unsigned long __zs_compact(struct zs_pool *pool,
2179 				  struct size_class *class)
2180 {
2181 	struct zs_compact_control cc;
2182 	struct zspage *src_zspage;
2183 	struct zspage *dst_zspage = NULL;
2184 	unsigned long pages_freed = 0;
2185 
2186 	/*
2187 	 * protect the race between zpage migration and zs_free
2188 	 * as well as zpage allocation/free
2189 	 */
2190 	spin_lock(&pool->lock);
2191 	while ((src_zspage = isolate_zspage(class, true))) {
2192 		/* protect someone accessing the zspage(i.e., zs_map_object) */
2193 		migrate_write_lock(src_zspage);
2194 
2195 		if (!zs_can_compact(class))
2196 			break;
2197 
2198 		cc.obj_idx = 0;
2199 		cc.s_page = get_first_page(src_zspage);
2200 
2201 		while ((dst_zspage = isolate_zspage(class, false))) {
2202 			migrate_write_lock_nested(dst_zspage);
2203 
2204 			cc.d_page = get_first_page(dst_zspage);
2205 			/*
2206 			 * If there is no more space in dst_page, resched
2207 			 * and see if anyone had allocated another zspage.
2208 			 */
2209 			if (!migrate_zspage(pool, class, &cc))
2210 				break;
2211 
2212 			putback_zspage(class, dst_zspage);
2213 			migrate_write_unlock(dst_zspage);
2214 			dst_zspage = NULL;
2215 			if (spin_is_contended(&pool->lock))
2216 				break;
2217 		}
2218 
2219 		/* Stop if we couldn't find slot */
2220 		if (dst_zspage == NULL)
2221 			break;
2222 
2223 		putback_zspage(class, dst_zspage);
2224 		migrate_write_unlock(dst_zspage);
2225 
2226 		if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
2227 			migrate_write_unlock(src_zspage);
2228 			free_zspage(pool, class, src_zspage);
2229 			pages_freed += class->pages_per_zspage;
2230 		} else
2231 			migrate_write_unlock(src_zspage);
2232 		spin_unlock(&pool->lock);
2233 		cond_resched();
2234 		spin_lock(&pool->lock);
2235 	}
2236 
2237 	if (src_zspage) {
2238 		putback_zspage(class, src_zspage);
2239 		migrate_write_unlock(src_zspage);
2240 	}
2241 
2242 	spin_unlock(&pool->lock);
2243 
2244 	return pages_freed;
2245 }
2246 
2247 unsigned long zs_compact(struct zs_pool *pool)
2248 {
2249 	int i;
2250 	struct size_class *class;
2251 	unsigned long pages_freed = 0;
2252 
2253 	for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2254 		class = pool->size_class[i];
2255 		if (class->index != i)
2256 			continue;
2257 		pages_freed += __zs_compact(pool, class);
2258 	}
2259 	atomic_long_add(pages_freed, &pool->stats.pages_compacted);
2260 
2261 	return pages_freed;
2262 }
2263 EXPORT_SYMBOL_GPL(zs_compact);
2264 
2265 void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
2266 {
2267 	memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
2268 }
2269 EXPORT_SYMBOL_GPL(zs_pool_stats);
2270 
2271 static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
2272 		struct shrink_control *sc)
2273 {
2274 	unsigned long pages_freed;
2275 	struct zs_pool *pool = container_of(shrinker, struct zs_pool,
2276 			shrinker);
2277 
2278 	/*
2279 	 * Compact classes and calculate compaction delta.
2280 	 * Can run concurrently with a manually triggered
2281 	 * (by user) compaction.
2282 	 */
2283 	pages_freed = zs_compact(pool);
2284 
2285 	return pages_freed ? pages_freed : SHRINK_STOP;
2286 }
2287 
2288 static unsigned long zs_shrinker_count(struct shrinker *shrinker,
2289 		struct shrink_control *sc)
2290 {
2291 	int i;
2292 	struct size_class *class;
2293 	unsigned long pages_to_free = 0;
2294 	struct zs_pool *pool = container_of(shrinker, struct zs_pool,
2295 			shrinker);
2296 
2297 	for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2298 		class = pool->size_class[i];
2299 		if (class->index != i)
2300 			continue;
2301 
2302 		pages_to_free += zs_can_compact(class);
2303 	}
2304 
2305 	return pages_to_free;
2306 }
2307 
2308 static void zs_unregister_shrinker(struct zs_pool *pool)
2309 {
2310 	unregister_shrinker(&pool->shrinker);
2311 }
2312 
2313 static int zs_register_shrinker(struct zs_pool *pool)
2314 {
2315 	pool->shrinker.scan_objects = zs_shrinker_scan;
2316 	pool->shrinker.count_objects = zs_shrinker_count;
2317 	pool->shrinker.batch = 0;
2318 	pool->shrinker.seeks = DEFAULT_SEEKS;
2319 
2320 	return register_shrinker(&pool->shrinker, "mm-zspool:%s",
2321 				 pool->name);
2322 }
2323 
2324 /**
2325  * zs_create_pool - Creates an allocation pool to work from.
2326  * @name: pool name to be created
2327  *
2328  * This function must be called before anything when using
2329  * the zsmalloc allocator.
2330  *
2331  * On success, a pointer to the newly created pool is returned,
2332  * otherwise NULL.
2333  */
2334 struct zs_pool *zs_create_pool(const char *name)
2335 {
2336 	int i;
2337 	struct zs_pool *pool;
2338 	struct size_class *prev_class = NULL;
2339 
2340 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
2341 	if (!pool)
2342 		return NULL;
2343 
2344 	init_deferred_free(pool);
2345 	spin_lock_init(&pool->lock);
2346 
2347 	pool->name = kstrdup(name, GFP_KERNEL);
2348 	if (!pool->name)
2349 		goto err;
2350 
2351 	if (create_cache(pool))
2352 		goto err;
2353 
2354 	/*
2355 	 * Iterate reversely, because, size of size_class that we want to use
2356 	 * for merging should be larger or equal to current size.
2357 	 */
2358 	for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2359 		int size;
2360 		int pages_per_zspage;
2361 		int objs_per_zspage;
2362 		struct size_class *class;
2363 		int fullness = 0;
2364 
2365 		size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
2366 		if (size > ZS_MAX_ALLOC_SIZE)
2367 			size = ZS_MAX_ALLOC_SIZE;
2368 		pages_per_zspage = get_pages_per_zspage(size);
2369 		objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
2370 
2371 		/*
2372 		 * We iterate from biggest down to smallest classes,
2373 		 * so huge_class_size holds the size of the first huge
2374 		 * class. Any object bigger than or equal to that will
2375 		 * endup in the huge class.
2376 		 */
2377 		if (pages_per_zspage != 1 && objs_per_zspage != 1 &&
2378 				!huge_class_size) {
2379 			huge_class_size = size;
2380 			/*
2381 			 * The object uses ZS_HANDLE_SIZE bytes to store the
2382 			 * handle. We need to subtract it, because zs_malloc()
2383 			 * unconditionally adds handle size before it performs
2384 			 * size class search - so object may be smaller than
2385 			 * huge class size, yet it still can end up in the huge
2386 			 * class because it grows by ZS_HANDLE_SIZE extra bytes
2387 			 * right before class lookup.
2388 			 */
2389 			huge_class_size -= (ZS_HANDLE_SIZE - 1);
2390 		}
2391 
2392 		/*
2393 		 * size_class is used for normal zsmalloc operation such
2394 		 * as alloc/free for that size. Although it is natural that we
2395 		 * have one size_class for each size, there is a chance that we
2396 		 * can get more memory utilization if we use one size_class for
2397 		 * many different sizes whose size_class have same
2398 		 * characteristics. So, we makes size_class point to
2399 		 * previous size_class if possible.
2400 		 */
2401 		if (prev_class) {
2402 			if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) {
2403 				pool->size_class[i] = prev_class;
2404 				continue;
2405 			}
2406 		}
2407 
2408 		class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
2409 		if (!class)
2410 			goto err;
2411 
2412 		class->size = size;
2413 		class->index = i;
2414 		class->pages_per_zspage = pages_per_zspage;
2415 		class->objs_per_zspage = objs_per_zspage;
2416 		pool->size_class[i] = class;
2417 		for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;
2418 							fullness++)
2419 			INIT_LIST_HEAD(&class->fullness_list[fullness]);
2420 
2421 		prev_class = class;
2422 	}
2423 
2424 	/* debug only, don't abort if it fails */
2425 	zs_pool_stat_create(pool, name);
2426 
2427 	/*
2428 	 * Not critical since shrinker is only used to trigger internal
2429 	 * defragmentation of the pool which is pretty optional thing.  If
2430 	 * registration fails we still can use the pool normally and user can
2431 	 * trigger compaction manually. Thus, ignore return code.
2432 	 */
2433 	zs_register_shrinker(pool);
2434 
2435 #ifdef CONFIG_ZPOOL
2436 	INIT_LIST_HEAD(&pool->lru);
2437 #endif
2438 
2439 	return pool;
2440 
2441 err:
2442 	zs_destroy_pool(pool);
2443 	return NULL;
2444 }
2445 EXPORT_SYMBOL_GPL(zs_create_pool);
2446 
2447 void zs_destroy_pool(struct zs_pool *pool)
2448 {
2449 	int i;
2450 
2451 	zs_unregister_shrinker(pool);
2452 	zs_flush_migration(pool);
2453 	zs_pool_stat_destroy(pool);
2454 
2455 	for (i = 0; i < ZS_SIZE_CLASSES; i++) {
2456 		int fg;
2457 		struct size_class *class = pool->size_class[i];
2458 
2459 		if (!class)
2460 			continue;
2461 
2462 		if (class->index != i)
2463 			continue;
2464 
2465 		for (fg = ZS_EMPTY; fg < NR_ZS_FULLNESS; fg++) {
2466 			if (!list_empty(&class->fullness_list[fg])) {
2467 				pr_info("Freeing non-empty class with size %db, fullness group %d\n",
2468 					class->size, fg);
2469 			}
2470 		}
2471 		kfree(class);
2472 	}
2473 
2474 	destroy_cache(pool);
2475 	kfree(pool->name);
2476 	kfree(pool);
2477 }
2478 EXPORT_SYMBOL_GPL(zs_destroy_pool);
2479 
2480 #ifdef CONFIG_ZPOOL
2481 static int zs_reclaim_page(struct zs_pool *pool, unsigned int retries)
2482 {
2483 	int i, obj_idx, ret = 0;
2484 	unsigned long handle;
2485 	struct zspage *zspage;
2486 	struct page *page;
2487 	enum fullness_group fullness;
2488 
2489 	/* Lock LRU and fullness list */
2490 	spin_lock(&pool->lock);
2491 	if (list_empty(&pool->lru)) {
2492 		spin_unlock(&pool->lock);
2493 		return -EINVAL;
2494 	}
2495 
2496 	for (i = 0; i < retries; i++) {
2497 		struct size_class *class;
2498 
2499 		zspage = list_last_entry(&pool->lru, struct zspage, lru);
2500 		list_del(&zspage->lru);
2501 
2502 		/* zs_free may free objects, but not the zspage and handles */
2503 		zspage->under_reclaim = true;
2504 
2505 		class = zspage_class(pool, zspage);
2506 		fullness = get_fullness_group(class, zspage);
2507 
2508 		/* Lock out object allocations and object compaction */
2509 		remove_zspage(class, zspage, fullness);
2510 
2511 		spin_unlock(&pool->lock);
2512 		cond_resched();
2513 
2514 		/* Lock backing pages into place */
2515 		lock_zspage(zspage);
2516 
2517 		obj_idx = 0;
2518 		page = get_first_page(zspage);
2519 		while (1) {
2520 			handle = find_alloced_obj(class, page, &obj_idx);
2521 			if (!handle) {
2522 				page = get_next_page(page);
2523 				if (!page)
2524 					break;
2525 				obj_idx = 0;
2526 				continue;
2527 			}
2528 
2529 			/*
2530 			 * This will write the object and call zs_free.
2531 			 *
2532 			 * zs_free will free the object, but the
2533 			 * under_reclaim flag prevents it from freeing
2534 			 * the zspage altogether. This is necessary so
2535 			 * that we can continue working with the
2536 			 * zspage potentially after the last object
2537 			 * has been freed.
2538 			 */
2539 			ret = pool->zpool_ops->evict(pool->zpool, handle);
2540 			if (ret)
2541 				goto next;
2542 
2543 			obj_idx++;
2544 		}
2545 
2546 next:
2547 		/* For freeing the zspage, or putting it back in the pool and LRU list. */
2548 		spin_lock(&pool->lock);
2549 		zspage->under_reclaim = false;
2550 
2551 		if (!get_zspage_inuse(zspage)) {
2552 			/*
2553 			 * Fullness went stale as zs_free() won't touch it
2554 			 * while the page is removed from the pool. Fix it
2555 			 * up for the check in __free_zspage().
2556 			 */
2557 			zspage->fullness = ZS_EMPTY;
2558 
2559 			__free_zspage(pool, class, zspage);
2560 			spin_unlock(&pool->lock);
2561 			return 0;
2562 		}
2563 
2564 		putback_zspage(class, zspage);
2565 		list_add(&zspage->lru, &pool->lru);
2566 		unlock_zspage(zspage);
2567 	}
2568 
2569 	spin_unlock(&pool->lock);
2570 	return -EAGAIN;
2571 }
2572 #endif /* CONFIG_ZPOOL */
2573 
2574 static int __init zs_init(void)
2575 {
2576 	int ret;
2577 
2578 	ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare",
2579 				zs_cpu_prepare, zs_cpu_dead);
2580 	if (ret)
2581 		goto out;
2582 
2583 #ifdef CONFIG_ZPOOL
2584 	zpool_register_driver(&zs_zpool_driver);
2585 #endif
2586 
2587 	zs_stat_init();
2588 
2589 	return 0;
2590 
2591 out:
2592 	return ret;
2593 }
2594 
2595 static void __exit zs_exit(void)
2596 {
2597 #ifdef CONFIG_ZPOOL
2598 	zpool_unregister_driver(&zs_zpool_driver);
2599 #endif
2600 	cpuhp_remove_state(CPUHP_MM_ZS_PREPARE);
2601 
2602 	zs_stat_exit();
2603 }
2604 
2605 module_init(zs_init);
2606 module_exit(zs_exit);
2607 
2608 MODULE_LICENSE("Dual BSD/GPL");
2609 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
2610