xref: /openbmc/linux/mm/zsmalloc.c (revision 609e478b)
1 /*
2  * zsmalloc memory allocator
3  *
4  * Copyright (C) 2011  Nitin Gupta
5  * Copyright (C) 2012, 2013 Minchan Kim
6  *
7  * This code is released using a dual license strategy: BSD/GPL
8  * You can choose the license that better fits your requirements.
9  *
10  * Released under the terms of 3-clause BSD License
11  * Released under the terms of GNU General Public License Version 2.0
12  */
13 
14 /*
15  * This allocator is designed for use with zram. Thus, the allocator is
16  * supposed to work well under low memory conditions. In particular, it
17  * never attempts higher order page allocation which is very likely to
18  * fail under memory pressure. On the other hand, if we just use single
19  * (0-order) pages, it would suffer from very high fragmentation --
20  * any object of size PAGE_SIZE/2 or larger would occupy an entire page.
21  * This was one of the major issues with its predecessor (xvmalloc).
22  *
23  * To overcome these issues, zsmalloc allocates a bunch of 0-order pages
24  * and links them together using various 'struct page' fields. These linked
25  * pages act as a single higher-order page i.e. an object can span 0-order
26  * page boundaries. The code refers to these linked pages as a single entity
27  * called zspage.
28  *
29  * For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE
30  * since this satisfies the requirements of all its current users (in the
31  * worst case, page is incompressible and is thus stored "as-is" i.e. in
32  * uncompressed form). For allocation requests larger than this size, failure
33  * is returned (see zs_malloc).
34  *
35  * Additionally, zs_malloc() does not return a dereferenceable pointer.
36  * Instead, it returns an opaque handle (unsigned long) which encodes actual
37  * location of the allocated object. The reason for this indirection is that
38  * zsmalloc does not keep zspages permanently mapped since that would cause
39  * issues on 32-bit systems where the VA region for kernel space mappings
40  * is very small. So, before using the allocating memory, the object has to
41  * be mapped using zs_map_object() to get a usable pointer and subsequently
42  * unmapped using zs_unmap_object().
43  *
44  * Following is how we use various fields and flags of underlying
45  * struct page(s) to form a zspage.
46  *
47  * Usage of struct page fields:
48  *	page->first_page: points to the first component (0-order) page
49  *	page->index (union with page->freelist): offset of the first object
50  *		starting in this page. For the first page, this is
51  *		always 0, so we use this field (aka freelist) to point
52  *		to the first free object in zspage.
53  *	page->lru: links together all component pages (except the first page)
54  *		of a zspage
55  *
56  *	For _first_ page only:
57  *
58  *	page->private (union with page->first_page): refers to the
59  *		component page after the first page
60  *	page->freelist: points to the first free object in zspage.
61  *		Free objects are linked together using in-place
62  *		metadata.
63  *	page->objects: maximum number of objects we can store in this
64  *		zspage (class->zspage_order * PAGE_SIZE / class->size)
65  *	page->lru: links together first pages of various zspages.
66  *		Basically forming list of zspages in a fullness group.
67  *	page->mapping: class index and fullness group of the zspage
68  *
69  * Usage of struct page flags:
70  *	PG_private: identifies the first component page
71  *	PG_private2: identifies the last component page
72  *
73  */
74 
75 #ifdef CONFIG_ZSMALLOC_DEBUG
76 #define DEBUG
77 #endif
78 
79 #include <linux/module.h>
80 #include <linux/kernel.h>
81 #include <linux/bitops.h>
82 #include <linux/errno.h>
83 #include <linux/highmem.h>
84 #include <linux/string.h>
85 #include <linux/slab.h>
86 #include <asm/tlbflush.h>
87 #include <asm/pgtable.h>
88 #include <linux/cpumask.h>
89 #include <linux/cpu.h>
90 #include <linux/vmalloc.h>
91 #include <linux/hardirq.h>
92 #include <linux/spinlock.h>
93 #include <linux/types.h>
94 #include <linux/zsmalloc.h>
95 #include <linux/zpool.h>
96 
97 /*
98  * This must be power of 2 and greater than of equal to sizeof(link_free).
99  * These two conditions ensure that any 'struct link_free' itself doesn't
100  * span more than 1 page which avoids complex case of mapping 2 pages simply
101  * to restore link_free pointer values.
102  */
103 #define ZS_ALIGN		8
104 
105 /*
106  * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
107  * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
108  */
109 #define ZS_MAX_ZSPAGE_ORDER 2
110 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
111 
112 /*
113  * Object location (<PFN>, <obj_idx>) is encoded as
114  * as single (unsigned long) handle value.
115  *
116  * Note that object index <obj_idx> is relative to system
117  * page <PFN> it is stored in, so for each sub-page belonging
118  * to a zspage, obj_idx starts with 0.
119  *
120  * This is made more complicated by various memory models and PAE.
121  */
122 
123 #ifndef MAX_PHYSMEM_BITS
124 #ifdef CONFIG_HIGHMEM64G
125 #define MAX_PHYSMEM_BITS 36
126 #else /* !CONFIG_HIGHMEM64G */
127 /*
128  * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
129  * be PAGE_SHIFT
130  */
131 #define MAX_PHYSMEM_BITS BITS_PER_LONG
132 #endif
133 #endif
134 #define _PFN_BITS		(MAX_PHYSMEM_BITS - PAGE_SHIFT)
135 #define OBJ_INDEX_BITS	(BITS_PER_LONG - _PFN_BITS)
136 #define OBJ_INDEX_MASK	((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
137 
138 #define MAX(a, b) ((a) >= (b) ? (a) : (b))
139 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
140 #define ZS_MIN_ALLOC_SIZE \
141 	MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
142 #define ZS_MAX_ALLOC_SIZE	PAGE_SIZE
143 
144 /*
145  * On systems with 4K page size, this gives 255 size classes! There is a
146  * trader-off here:
147  *  - Large number of size classes is potentially wasteful as free page are
148  *    spread across these classes
149  *  - Small number of size classes causes large internal fragmentation
150  *  - Probably its better to use specific size classes (empirically
151  *    determined). NOTE: all those class sizes must be set as multiple of
152  *    ZS_ALIGN to make sure link_free itself never has to span 2 pages.
153  *
154  *  ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
155  *  (reason above)
156  */
157 #define ZS_SIZE_CLASS_DELTA	(PAGE_SIZE >> 8)
158 #define ZS_SIZE_CLASSES		((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
159 					ZS_SIZE_CLASS_DELTA + 1)
160 
161 /*
162  * We do not maintain any list for completely empty or full pages
163  */
164 enum fullness_group {
165 	ZS_ALMOST_FULL,
166 	ZS_ALMOST_EMPTY,
167 	_ZS_NR_FULLNESS_GROUPS,
168 
169 	ZS_EMPTY,
170 	ZS_FULL
171 };
172 
173 /*
174  * We assign a page to ZS_ALMOST_EMPTY fullness group when:
175  *	n <= N / f, where
176  * n = number of allocated objects
177  * N = total number of objects zspage can store
178  * f = fullness_threshold_frac
179  *
180  * Similarly, we assign zspage to:
181  *	ZS_ALMOST_FULL	when n > N / f
182  *	ZS_EMPTY	when n == 0
183  *	ZS_FULL		when n == N
184  *
185  * (see: fix_fullness_group())
186  */
187 static const int fullness_threshold_frac = 4;
188 
189 struct size_class {
190 	/*
191 	 * Size of objects stored in this class. Must be multiple
192 	 * of ZS_ALIGN.
193 	 */
194 	int size;
195 	unsigned int index;
196 
197 	/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
198 	int pages_per_zspage;
199 
200 	spinlock_t lock;
201 
202 	struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
203 };
204 
205 /*
206  * Placed within free objects to form a singly linked list.
207  * For every zspage, first_page->freelist gives head of this list.
208  *
209  * This must be power of 2 and less than or equal to ZS_ALIGN
210  */
211 struct link_free {
212 	/* Handle of next free chunk (encodes <PFN, obj_idx>) */
213 	void *next;
214 };
215 
216 struct zs_pool {
217 	struct size_class size_class[ZS_SIZE_CLASSES];
218 
219 	gfp_t flags;	/* allocation flags used when growing pool */
220 	atomic_long_t pages_allocated;
221 };
222 
223 /*
224  * A zspage's class index and fullness group
225  * are encoded in its (first)page->mapping
226  */
227 #define CLASS_IDX_BITS	28
228 #define FULLNESS_BITS	4
229 #define CLASS_IDX_MASK	((1 << CLASS_IDX_BITS) - 1)
230 #define FULLNESS_MASK	((1 << FULLNESS_BITS) - 1)
231 
232 struct mapping_area {
233 #ifdef CONFIG_PGTABLE_MAPPING
234 	struct vm_struct *vm; /* vm area for mapping object that span pages */
235 #else
236 	char *vm_buf; /* copy buffer for objects that span pages */
237 #endif
238 	char *vm_addr; /* address of kmap_atomic()'ed pages */
239 	enum zs_mapmode vm_mm; /* mapping mode */
240 };
241 
242 /* zpool driver */
243 
244 #ifdef CONFIG_ZPOOL
245 
246 static void *zs_zpool_create(gfp_t gfp, struct zpool_ops *zpool_ops)
247 {
248 	return zs_create_pool(gfp);
249 }
250 
251 static void zs_zpool_destroy(void *pool)
252 {
253 	zs_destroy_pool(pool);
254 }
255 
256 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
257 			unsigned long *handle)
258 {
259 	*handle = zs_malloc(pool, size);
260 	return *handle ? 0 : -1;
261 }
262 static void zs_zpool_free(void *pool, unsigned long handle)
263 {
264 	zs_free(pool, handle);
265 }
266 
267 static int zs_zpool_shrink(void *pool, unsigned int pages,
268 			unsigned int *reclaimed)
269 {
270 	return -EINVAL;
271 }
272 
273 static void *zs_zpool_map(void *pool, unsigned long handle,
274 			enum zpool_mapmode mm)
275 {
276 	enum zs_mapmode zs_mm;
277 
278 	switch (mm) {
279 	case ZPOOL_MM_RO:
280 		zs_mm = ZS_MM_RO;
281 		break;
282 	case ZPOOL_MM_WO:
283 		zs_mm = ZS_MM_WO;
284 		break;
285 	case ZPOOL_MM_RW: /* fallthru */
286 	default:
287 		zs_mm = ZS_MM_RW;
288 		break;
289 	}
290 
291 	return zs_map_object(pool, handle, zs_mm);
292 }
293 static void zs_zpool_unmap(void *pool, unsigned long handle)
294 {
295 	zs_unmap_object(pool, handle);
296 }
297 
298 static u64 zs_zpool_total_size(void *pool)
299 {
300 	return zs_get_total_pages(pool) << PAGE_SHIFT;
301 }
302 
303 static struct zpool_driver zs_zpool_driver = {
304 	.type =		"zsmalloc",
305 	.owner =	THIS_MODULE,
306 	.create =	zs_zpool_create,
307 	.destroy =	zs_zpool_destroy,
308 	.malloc =	zs_zpool_malloc,
309 	.free =		zs_zpool_free,
310 	.shrink =	zs_zpool_shrink,
311 	.map =		zs_zpool_map,
312 	.unmap =	zs_zpool_unmap,
313 	.total_size =	zs_zpool_total_size,
314 };
315 
316 MODULE_ALIAS("zpool-zsmalloc");
317 #endif /* CONFIG_ZPOOL */
318 
319 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
320 static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
321 
322 static int is_first_page(struct page *page)
323 {
324 	return PagePrivate(page);
325 }
326 
327 static int is_last_page(struct page *page)
328 {
329 	return PagePrivate2(page);
330 }
331 
332 static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
333 				enum fullness_group *fullness)
334 {
335 	unsigned long m;
336 	BUG_ON(!is_first_page(page));
337 
338 	m = (unsigned long)page->mapping;
339 	*fullness = m & FULLNESS_MASK;
340 	*class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
341 }
342 
343 static void set_zspage_mapping(struct page *page, unsigned int class_idx,
344 				enum fullness_group fullness)
345 {
346 	unsigned long m;
347 	BUG_ON(!is_first_page(page));
348 
349 	m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
350 			(fullness & FULLNESS_MASK);
351 	page->mapping = (struct address_space *)m;
352 }
353 
354 /*
355  * zsmalloc divides the pool into various size classes where each
356  * class maintains a list of zspages where each zspage is divided
357  * into equal sized chunks. Each allocation falls into one of these
358  * classes depending on its size. This function returns index of the
359  * size class which has chunk size big enough to hold the give size.
360  */
361 static int get_size_class_index(int size)
362 {
363 	int idx = 0;
364 
365 	if (likely(size > ZS_MIN_ALLOC_SIZE))
366 		idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
367 				ZS_SIZE_CLASS_DELTA);
368 
369 	return idx;
370 }
371 
372 /*
373  * For each size class, zspages are divided into different groups
374  * depending on how "full" they are. This was done so that we could
375  * easily find empty or nearly empty zspages when we try to shrink
376  * the pool (not yet implemented). This function returns fullness
377  * status of the given page.
378  */
379 static enum fullness_group get_fullness_group(struct page *page)
380 {
381 	int inuse, max_objects;
382 	enum fullness_group fg;
383 	BUG_ON(!is_first_page(page));
384 
385 	inuse = page->inuse;
386 	max_objects = page->objects;
387 
388 	if (inuse == 0)
389 		fg = ZS_EMPTY;
390 	else if (inuse == max_objects)
391 		fg = ZS_FULL;
392 	else if (inuse <= max_objects / fullness_threshold_frac)
393 		fg = ZS_ALMOST_EMPTY;
394 	else
395 		fg = ZS_ALMOST_FULL;
396 
397 	return fg;
398 }
399 
400 /*
401  * Each size class maintains various freelists and zspages are assigned
402  * to one of these freelists based on the number of live objects they
403  * have. This functions inserts the given zspage into the freelist
404  * identified by <class, fullness_group>.
405  */
406 static void insert_zspage(struct page *page, struct size_class *class,
407 				enum fullness_group fullness)
408 {
409 	struct page **head;
410 
411 	BUG_ON(!is_first_page(page));
412 
413 	if (fullness >= _ZS_NR_FULLNESS_GROUPS)
414 		return;
415 
416 	head = &class->fullness_list[fullness];
417 	if (*head)
418 		list_add_tail(&page->lru, &(*head)->lru);
419 
420 	*head = page;
421 }
422 
423 /*
424  * This function removes the given zspage from the freelist identified
425  * by <class, fullness_group>.
426  */
427 static void remove_zspage(struct page *page, struct size_class *class,
428 				enum fullness_group fullness)
429 {
430 	struct page **head;
431 
432 	BUG_ON(!is_first_page(page));
433 
434 	if (fullness >= _ZS_NR_FULLNESS_GROUPS)
435 		return;
436 
437 	head = &class->fullness_list[fullness];
438 	BUG_ON(!*head);
439 	if (list_empty(&(*head)->lru))
440 		*head = NULL;
441 	else if (*head == page)
442 		*head = (struct page *)list_entry((*head)->lru.next,
443 					struct page, lru);
444 
445 	list_del_init(&page->lru);
446 }
447 
448 /*
449  * Each size class maintains zspages in different fullness groups depending
450  * on the number of live objects they contain. When allocating or freeing
451  * objects, the fullness status of the page can change, say, from ALMOST_FULL
452  * to ALMOST_EMPTY when freeing an object. This function checks if such
453  * a status change has occurred for the given page and accordingly moves the
454  * page from the freelist of the old fullness group to that of the new
455  * fullness group.
456  */
457 static enum fullness_group fix_fullness_group(struct zs_pool *pool,
458 						struct page *page)
459 {
460 	int class_idx;
461 	struct size_class *class;
462 	enum fullness_group currfg, newfg;
463 
464 	BUG_ON(!is_first_page(page));
465 
466 	get_zspage_mapping(page, &class_idx, &currfg);
467 	newfg = get_fullness_group(page);
468 	if (newfg == currfg)
469 		goto out;
470 
471 	class = &pool->size_class[class_idx];
472 	remove_zspage(page, class, currfg);
473 	insert_zspage(page, class, newfg);
474 	set_zspage_mapping(page, class_idx, newfg);
475 
476 out:
477 	return newfg;
478 }
479 
480 /*
481  * We have to decide on how many pages to link together
482  * to form a zspage for each size class. This is important
483  * to reduce wastage due to unusable space left at end of
484  * each zspage which is given as:
485  *	wastage = Zp - Zp % size_class
486  * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
487  *
488  * For example, for size class of 3/8 * PAGE_SIZE, we should
489  * link together 3 PAGE_SIZE sized pages to form a zspage
490  * since then we can perfectly fit in 8 such objects.
491  */
492 static int get_pages_per_zspage(int class_size)
493 {
494 	int i, max_usedpc = 0;
495 	/* zspage order which gives maximum used size per KB */
496 	int max_usedpc_order = 1;
497 
498 	for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
499 		int zspage_size;
500 		int waste, usedpc;
501 
502 		zspage_size = i * PAGE_SIZE;
503 		waste = zspage_size % class_size;
504 		usedpc = (zspage_size - waste) * 100 / zspage_size;
505 
506 		if (usedpc > max_usedpc) {
507 			max_usedpc = usedpc;
508 			max_usedpc_order = i;
509 		}
510 	}
511 
512 	return max_usedpc_order;
513 }
514 
515 /*
516  * A single 'zspage' is composed of many system pages which are
517  * linked together using fields in struct page. This function finds
518  * the first/head page, given any component page of a zspage.
519  */
520 static struct page *get_first_page(struct page *page)
521 {
522 	if (is_first_page(page))
523 		return page;
524 	else
525 		return page->first_page;
526 }
527 
528 static struct page *get_next_page(struct page *page)
529 {
530 	struct page *next;
531 
532 	if (is_last_page(page))
533 		next = NULL;
534 	else if (is_first_page(page))
535 		next = (struct page *)page_private(page);
536 	else
537 		next = list_entry(page->lru.next, struct page, lru);
538 
539 	return next;
540 }
541 
542 /*
543  * Encode <page, obj_idx> as a single handle value.
544  * On hardware platforms with physical memory starting at 0x0 the pfn
545  * could be 0 so we ensure that the handle will never be 0 by adjusting the
546  * encoded obj_idx value before encoding.
547  */
548 static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
549 {
550 	unsigned long handle;
551 
552 	if (!page) {
553 		BUG_ON(obj_idx);
554 		return NULL;
555 	}
556 
557 	handle = page_to_pfn(page) << OBJ_INDEX_BITS;
558 	handle |= ((obj_idx + 1) & OBJ_INDEX_MASK);
559 
560 	return (void *)handle;
561 }
562 
563 /*
564  * Decode <page, obj_idx> pair from the given object handle. We adjust the
565  * decoded obj_idx back to its original value since it was adjusted in
566  * obj_location_to_handle().
567  */
568 static void obj_handle_to_location(unsigned long handle, struct page **page,
569 				unsigned long *obj_idx)
570 {
571 	*page = pfn_to_page(handle >> OBJ_INDEX_BITS);
572 	*obj_idx = (handle & OBJ_INDEX_MASK) - 1;
573 }
574 
575 static unsigned long obj_idx_to_offset(struct page *page,
576 				unsigned long obj_idx, int class_size)
577 {
578 	unsigned long off = 0;
579 
580 	if (!is_first_page(page))
581 		off = page->index;
582 
583 	return off + obj_idx * class_size;
584 }
585 
586 static void reset_page(struct page *page)
587 {
588 	clear_bit(PG_private, &page->flags);
589 	clear_bit(PG_private_2, &page->flags);
590 	set_page_private(page, 0);
591 	page->mapping = NULL;
592 	page->freelist = NULL;
593 	page_mapcount_reset(page);
594 }
595 
596 static void free_zspage(struct page *first_page)
597 {
598 	struct page *nextp, *tmp, *head_extra;
599 
600 	BUG_ON(!is_first_page(first_page));
601 	BUG_ON(first_page->inuse);
602 
603 	head_extra = (struct page *)page_private(first_page);
604 
605 	reset_page(first_page);
606 	__free_page(first_page);
607 
608 	/* zspage with only 1 system page */
609 	if (!head_extra)
610 		return;
611 
612 	list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) {
613 		list_del(&nextp->lru);
614 		reset_page(nextp);
615 		__free_page(nextp);
616 	}
617 	reset_page(head_extra);
618 	__free_page(head_extra);
619 }
620 
621 /* Initialize a newly allocated zspage */
622 static void init_zspage(struct page *first_page, struct size_class *class)
623 {
624 	unsigned long off = 0;
625 	struct page *page = first_page;
626 
627 	BUG_ON(!is_first_page(first_page));
628 	while (page) {
629 		struct page *next_page;
630 		struct link_free *link;
631 		unsigned int i = 1;
632 
633 		/*
634 		 * page->index stores offset of first object starting
635 		 * in the page. For the first page, this is always 0,
636 		 * so we use first_page->index (aka ->freelist) to store
637 		 * head of corresponding zspage's freelist.
638 		 */
639 		if (page != first_page)
640 			page->index = off;
641 
642 		link = (struct link_free *)kmap_atomic(page) +
643 						off / sizeof(*link);
644 
645 		while ((off += class->size) < PAGE_SIZE) {
646 			link->next = obj_location_to_handle(page, i++);
647 			link += class->size / sizeof(*link);
648 		}
649 
650 		/*
651 		 * We now come to the last (full or partial) object on this
652 		 * page, which must point to the first object on the next
653 		 * page (if present)
654 		 */
655 		next_page = get_next_page(page);
656 		link->next = obj_location_to_handle(next_page, 0);
657 		kunmap_atomic(link);
658 		page = next_page;
659 		off %= PAGE_SIZE;
660 	}
661 }
662 
663 /*
664  * Allocate a zspage for the given size class
665  */
666 static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
667 {
668 	int i, error;
669 	struct page *first_page = NULL, *uninitialized_var(prev_page);
670 
671 	/*
672 	 * Allocate individual pages and link them together as:
673 	 * 1. first page->private = first sub-page
674 	 * 2. all sub-pages are linked together using page->lru
675 	 * 3. each sub-page is linked to the first page using page->first_page
676 	 *
677 	 * For each size class, First/Head pages are linked together using
678 	 * page->lru. Also, we set PG_private to identify the first page
679 	 * (i.e. no other sub-page has this flag set) and PG_private_2 to
680 	 * identify the last page.
681 	 */
682 	error = -ENOMEM;
683 	for (i = 0; i < class->pages_per_zspage; i++) {
684 		struct page *page;
685 
686 		page = alloc_page(flags);
687 		if (!page)
688 			goto cleanup;
689 
690 		INIT_LIST_HEAD(&page->lru);
691 		if (i == 0) {	/* first page */
692 			SetPagePrivate(page);
693 			set_page_private(page, 0);
694 			first_page = page;
695 			first_page->inuse = 0;
696 		}
697 		if (i == 1)
698 			set_page_private(first_page, (unsigned long)page);
699 		if (i >= 1)
700 			page->first_page = first_page;
701 		if (i >= 2)
702 			list_add(&page->lru, &prev_page->lru);
703 		if (i == class->pages_per_zspage - 1)	/* last page */
704 			SetPagePrivate2(page);
705 		prev_page = page;
706 	}
707 
708 	init_zspage(first_page, class);
709 
710 	first_page->freelist = obj_location_to_handle(first_page, 0);
711 	/* Maximum number of objects we can store in this zspage */
712 	first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
713 
714 	error = 0; /* Success */
715 
716 cleanup:
717 	if (unlikely(error) && first_page) {
718 		free_zspage(first_page);
719 		first_page = NULL;
720 	}
721 
722 	return first_page;
723 }
724 
725 static struct page *find_get_zspage(struct size_class *class)
726 {
727 	int i;
728 	struct page *page;
729 
730 	for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
731 		page = class->fullness_list[i];
732 		if (page)
733 			break;
734 	}
735 
736 	return page;
737 }
738 
739 #ifdef CONFIG_PGTABLE_MAPPING
740 static inline int __zs_cpu_up(struct mapping_area *area)
741 {
742 	/*
743 	 * Make sure we don't leak memory if a cpu UP notification
744 	 * and zs_init() race and both call zs_cpu_up() on the same cpu
745 	 */
746 	if (area->vm)
747 		return 0;
748 	area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL);
749 	if (!area->vm)
750 		return -ENOMEM;
751 	return 0;
752 }
753 
754 static inline void __zs_cpu_down(struct mapping_area *area)
755 {
756 	if (area->vm)
757 		free_vm_area(area->vm);
758 	area->vm = NULL;
759 }
760 
761 static inline void *__zs_map_object(struct mapping_area *area,
762 				struct page *pages[2], int off, int size)
763 {
764 	BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages));
765 	area->vm_addr = area->vm->addr;
766 	return area->vm_addr + off;
767 }
768 
769 static inline void __zs_unmap_object(struct mapping_area *area,
770 				struct page *pages[2], int off, int size)
771 {
772 	unsigned long addr = (unsigned long)area->vm_addr;
773 
774 	unmap_kernel_range(addr, PAGE_SIZE * 2);
775 }
776 
777 #else /* CONFIG_PGTABLE_MAPPING */
778 
779 static inline int __zs_cpu_up(struct mapping_area *area)
780 {
781 	/*
782 	 * Make sure we don't leak memory if a cpu UP notification
783 	 * and zs_init() race and both call zs_cpu_up() on the same cpu
784 	 */
785 	if (area->vm_buf)
786 		return 0;
787 	area->vm_buf = (char *)__get_free_page(GFP_KERNEL);
788 	if (!area->vm_buf)
789 		return -ENOMEM;
790 	return 0;
791 }
792 
793 static inline void __zs_cpu_down(struct mapping_area *area)
794 {
795 	if (area->vm_buf)
796 		free_page((unsigned long)area->vm_buf);
797 	area->vm_buf = NULL;
798 }
799 
800 static void *__zs_map_object(struct mapping_area *area,
801 			struct page *pages[2], int off, int size)
802 {
803 	int sizes[2];
804 	void *addr;
805 	char *buf = area->vm_buf;
806 
807 	/* disable page faults to match kmap_atomic() return conditions */
808 	pagefault_disable();
809 
810 	/* no read fastpath */
811 	if (area->vm_mm == ZS_MM_WO)
812 		goto out;
813 
814 	sizes[0] = PAGE_SIZE - off;
815 	sizes[1] = size - sizes[0];
816 
817 	/* copy object to per-cpu buffer */
818 	addr = kmap_atomic(pages[0]);
819 	memcpy(buf, addr + off, sizes[0]);
820 	kunmap_atomic(addr);
821 	addr = kmap_atomic(pages[1]);
822 	memcpy(buf + sizes[0], addr, sizes[1]);
823 	kunmap_atomic(addr);
824 out:
825 	return area->vm_buf;
826 }
827 
828 static void __zs_unmap_object(struct mapping_area *area,
829 			struct page *pages[2], int off, int size)
830 {
831 	int sizes[2];
832 	void *addr;
833 	char *buf = area->vm_buf;
834 
835 	/* no write fastpath */
836 	if (area->vm_mm == ZS_MM_RO)
837 		goto out;
838 
839 	sizes[0] = PAGE_SIZE - off;
840 	sizes[1] = size - sizes[0];
841 
842 	/* copy per-cpu buffer to object */
843 	addr = kmap_atomic(pages[0]);
844 	memcpy(addr + off, buf, sizes[0]);
845 	kunmap_atomic(addr);
846 	addr = kmap_atomic(pages[1]);
847 	memcpy(addr, buf + sizes[0], sizes[1]);
848 	kunmap_atomic(addr);
849 
850 out:
851 	/* enable page faults to match kunmap_atomic() return conditions */
852 	pagefault_enable();
853 }
854 
855 #endif /* CONFIG_PGTABLE_MAPPING */
856 
857 static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
858 				void *pcpu)
859 {
860 	int ret, cpu = (long)pcpu;
861 	struct mapping_area *area;
862 
863 	switch (action) {
864 	case CPU_UP_PREPARE:
865 		area = &per_cpu(zs_map_area, cpu);
866 		ret = __zs_cpu_up(area);
867 		if (ret)
868 			return notifier_from_errno(ret);
869 		break;
870 	case CPU_DEAD:
871 	case CPU_UP_CANCELED:
872 		area = &per_cpu(zs_map_area, cpu);
873 		__zs_cpu_down(area);
874 		break;
875 	}
876 
877 	return NOTIFY_OK;
878 }
879 
880 static struct notifier_block zs_cpu_nb = {
881 	.notifier_call = zs_cpu_notifier
882 };
883 
884 static void zs_exit(void)
885 {
886 	int cpu;
887 
888 #ifdef CONFIG_ZPOOL
889 	zpool_unregister_driver(&zs_zpool_driver);
890 #endif
891 
892 	cpu_notifier_register_begin();
893 
894 	for_each_online_cpu(cpu)
895 		zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
896 	__unregister_cpu_notifier(&zs_cpu_nb);
897 
898 	cpu_notifier_register_done();
899 }
900 
901 static int zs_init(void)
902 {
903 	int cpu, ret;
904 
905 	cpu_notifier_register_begin();
906 
907 	__register_cpu_notifier(&zs_cpu_nb);
908 	for_each_online_cpu(cpu) {
909 		ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
910 		if (notifier_to_errno(ret)) {
911 			cpu_notifier_register_done();
912 			goto fail;
913 		}
914 	}
915 
916 	cpu_notifier_register_done();
917 
918 #ifdef CONFIG_ZPOOL
919 	zpool_register_driver(&zs_zpool_driver);
920 #endif
921 
922 	return 0;
923 fail:
924 	zs_exit();
925 	return notifier_to_errno(ret);
926 }
927 
928 /**
929  * zs_create_pool - Creates an allocation pool to work from.
930  * @flags: allocation flags used to allocate pool metadata
931  *
932  * This function must be called before anything when using
933  * the zsmalloc allocator.
934  *
935  * On success, a pointer to the newly created pool is returned,
936  * otherwise NULL.
937  */
938 struct zs_pool *zs_create_pool(gfp_t flags)
939 {
940 	int i, ovhd_size;
941 	struct zs_pool *pool;
942 
943 	ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
944 	pool = kzalloc(ovhd_size, GFP_KERNEL);
945 	if (!pool)
946 		return NULL;
947 
948 	for (i = 0; i < ZS_SIZE_CLASSES; i++) {
949 		int size;
950 		struct size_class *class;
951 
952 		size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
953 		if (size > ZS_MAX_ALLOC_SIZE)
954 			size = ZS_MAX_ALLOC_SIZE;
955 
956 		class = &pool->size_class[i];
957 		class->size = size;
958 		class->index = i;
959 		spin_lock_init(&class->lock);
960 		class->pages_per_zspage = get_pages_per_zspage(size);
961 
962 	}
963 
964 	pool->flags = flags;
965 
966 	return pool;
967 }
968 EXPORT_SYMBOL_GPL(zs_create_pool);
969 
970 void zs_destroy_pool(struct zs_pool *pool)
971 {
972 	int i;
973 
974 	for (i = 0; i < ZS_SIZE_CLASSES; i++) {
975 		int fg;
976 		struct size_class *class = &pool->size_class[i];
977 
978 		for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
979 			if (class->fullness_list[fg]) {
980 				pr_info("Freeing non-empty class with size %db, fullness group %d\n",
981 					class->size, fg);
982 			}
983 		}
984 	}
985 	kfree(pool);
986 }
987 EXPORT_SYMBOL_GPL(zs_destroy_pool);
988 
989 /**
990  * zs_malloc - Allocate block of given size from pool.
991  * @pool: pool to allocate from
992  * @size: size of block to allocate
993  *
994  * On success, handle to the allocated object is returned,
995  * otherwise 0.
996  * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
997  */
998 unsigned long zs_malloc(struct zs_pool *pool, size_t size)
999 {
1000 	unsigned long obj;
1001 	struct link_free *link;
1002 	int class_idx;
1003 	struct size_class *class;
1004 
1005 	struct page *first_page, *m_page;
1006 	unsigned long m_objidx, m_offset;
1007 
1008 	if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
1009 		return 0;
1010 
1011 	class_idx = get_size_class_index(size);
1012 	class = &pool->size_class[class_idx];
1013 	BUG_ON(class_idx != class->index);
1014 
1015 	spin_lock(&class->lock);
1016 	first_page = find_get_zspage(class);
1017 
1018 	if (!first_page) {
1019 		spin_unlock(&class->lock);
1020 		first_page = alloc_zspage(class, pool->flags);
1021 		if (unlikely(!first_page))
1022 			return 0;
1023 
1024 		set_zspage_mapping(first_page, class->index, ZS_EMPTY);
1025 		atomic_long_add(class->pages_per_zspage,
1026 					&pool->pages_allocated);
1027 		spin_lock(&class->lock);
1028 	}
1029 
1030 	obj = (unsigned long)first_page->freelist;
1031 	obj_handle_to_location(obj, &m_page, &m_objidx);
1032 	m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
1033 
1034 	link = (struct link_free *)kmap_atomic(m_page) +
1035 					m_offset / sizeof(*link);
1036 	first_page->freelist = link->next;
1037 	memset(link, POISON_INUSE, sizeof(*link));
1038 	kunmap_atomic(link);
1039 
1040 	first_page->inuse++;
1041 	/* Now move the zspage to another fullness group, if required */
1042 	fix_fullness_group(pool, first_page);
1043 	spin_unlock(&class->lock);
1044 
1045 	return obj;
1046 }
1047 EXPORT_SYMBOL_GPL(zs_malloc);
1048 
1049 void zs_free(struct zs_pool *pool, unsigned long obj)
1050 {
1051 	struct link_free *link;
1052 	struct page *first_page, *f_page;
1053 	unsigned long f_objidx, f_offset;
1054 
1055 	int class_idx;
1056 	struct size_class *class;
1057 	enum fullness_group fullness;
1058 
1059 	if (unlikely(!obj))
1060 		return;
1061 
1062 	obj_handle_to_location(obj, &f_page, &f_objidx);
1063 	first_page = get_first_page(f_page);
1064 
1065 	get_zspage_mapping(first_page, &class_idx, &fullness);
1066 	class = &pool->size_class[class_idx];
1067 	f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
1068 
1069 	spin_lock(&class->lock);
1070 
1071 	/* Insert this object in containing zspage's freelist */
1072 	link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
1073 							+ f_offset);
1074 	link->next = first_page->freelist;
1075 	kunmap_atomic(link);
1076 	first_page->freelist = (void *)obj;
1077 
1078 	first_page->inuse--;
1079 	fullness = fix_fullness_group(pool, first_page);
1080 	spin_unlock(&class->lock);
1081 
1082 	if (fullness == ZS_EMPTY) {
1083 		atomic_long_sub(class->pages_per_zspage,
1084 				&pool->pages_allocated);
1085 		free_zspage(first_page);
1086 	}
1087 }
1088 EXPORT_SYMBOL_GPL(zs_free);
1089 
1090 /**
1091  * zs_map_object - get address of allocated object from handle.
1092  * @pool: pool from which the object was allocated
1093  * @handle: handle returned from zs_malloc
1094  *
1095  * Before using an object allocated from zs_malloc, it must be mapped using
1096  * this function. When done with the object, it must be unmapped using
1097  * zs_unmap_object.
1098  *
1099  * Only one object can be mapped per cpu at a time. There is no protection
1100  * against nested mappings.
1101  *
1102  * This function returns with preemption and page faults disabled.
1103  */
1104 void *zs_map_object(struct zs_pool *pool, unsigned long handle,
1105 			enum zs_mapmode mm)
1106 {
1107 	struct page *page;
1108 	unsigned long obj_idx, off;
1109 
1110 	unsigned int class_idx;
1111 	enum fullness_group fg;
1112 	struct size_class *class;
1113 	struct mapping_area *area;
1114 	struct page *pages[2];
1115 
1116 	BUG_ON(!handle);
1117 
1118 	/*
1119 	 * Because we use per-cpu mapping areas shared among the
1120 	 * pools/users, we can't allow mapping in interrupt context
1121 	 * because it can corrupt another users mappings.
1122 	 */
1123 	BUG_ON(in_interrupt());
1124 
1125 	obj_handle_to_location(handle, &page, &obj_idx);
1126 	get_zspage_mapping(get_first_page(page), &class_idx, &fg);
1127 	class = &pool->size_class[class_idx];
1128 	off = obj_idx_to_offset(page, obj_idx, class->size);
1129 
1130 	area = &get_cpu_var(zs_map_area);
1131 	area->vm_mm = mm;
1132 	if (off + class->size <= PAGE_SIZE) {
1133 		/* this object is contained entirely within a page */
1134 		area->vm_addr = kmap_atomic(page);
1135 		return area->vm_addr + off;
1136 	}
1137 
1138 	/* this object spans two pages */
1139 	pages[0] = page;
1140 	pages[1] = get_next_page(page);
1141 	BUG_ON(!pages[1]);
1142 
1143 	return __zs_map_object(area, pages, off, class->size);
1144 }
1145 EXPORT_SYMBOL_GPL(zs_map_object);
1146 
1147 void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
1148 {
1149 	struct page *page;
1150 	unsigned long obj_idx, off;
1151 
1152 	unsigned int class_idx;
1153 	enum fullness_group fg;
1154 	struct size_class *class;
1155 	struct mapping_area *area;
1156 
1157 	BUG_ON(!handle);
1158 
1159 	obj_handle_to_location(handle, &page, &obj_idx);
1160 	get_zspage_mapping(get_first_page(page), &class_idx, &fg);
1161 	class = &pool->size_class[class_idx];
1162 	off = obj_idx_to_offset(page, obj_idx, class->size);
1163 
1164 	area = this_cpu_ptr(&zs_map_area);
1165 	if (off + class->size <= PAGE_SIZE)
1166 		kunmap_atomic(area->vm_addr);
1167 	else {
1168 		struct page *pages[2];
1169 
1170 		pages[0] = page;
1171 		pages[1] = get_next_page(page);
1172 		BUG_ON(!pages[1]);
1173 
1174 		__zs_unmap_object(area, pages, off, class->size);
1175 	}
1176 	put_cpu_var(zs_map_area);
1177 }
1178 EXPORT_SYMBOL_GPL(zs_unmap_object);
1179 
1180 unsigned long zs_get_total_pages(struct zs_pool *pool)
1181 {
1182 	return atomic_long_read(&pool->pages_allocated);
1183 }
1184 EXPORT_SYMBOL_GPL(zs_get_total_pages);
1185 
1186 module_init(zs_init);
1187 module_exit(zs_exit);
1188 
1189 MODULE_LICENSE("Dual BSD/GPL");
1190 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1191