xref: /openbmc/linux/mm/z3fold.c (revision 18afb028)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * z3fold.c
4  *
5  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6  * Copyright (C) 2016, Sony Mobile Communications Inc.
7  *
8  * This implementation is based on zbud written by Seth Jennings.
9  *
10  * z3fold is an special purpose allocator for storing compressed pages. It
11  * can store up to three compressed pages per page which improves the
12  * compression ratio of zbud while retaining its main concepts (e. g. always
13  * storing an integral number of objects per page) and simplicity.
14  * It still has simple and deterministic reclaim properties that make it
15  * preferable to a higher density approach (with no requirement on integral
16  * number of object per page) when reclaim is used.
17  *
18  * As in zbud, pages are divided into "chunks".  The size of the chunks is
19  * fixed at compile time and is determined by NCHUNKS_ORDER below.
20  *
21  * z3fold doesn't export any API and is meant to be used via zpool API.
22  */
23 
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
30 #include <linux/mm.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/preempt.h>
38 #include <linux/workqueue.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <linux/zpool.h>
42 #include <linux/kmemleak.h>
43 
44 /*
45  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
46  * adjusting internal fragmentation.  It also determines the number of
47  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
48  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
49  * in the beginning of an allocated page are occupied by z3fold header, so
50  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
51  * which shows the max number of free chunks in z3fold page, also there will
52  * be 63, or 62, respectively, freelists per pool.
53  */
54 #define NCHUNKS_ORDER	6
55 
56 #define CHUNK_SHIFT	(PAGE_SHIFT - NCHUNKS_ORDER)
57 #define CHUNK_SIZE	(1 << CHUNK_SHIFT)
58 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
59 #define ZHDR_CHUNKS	(ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
60 #define TOTAL_CHUNKS	(PAGE_SIZE >> CHUNK_SHIFT)
61 #define NCHUNKS		(TOTAL_CHUNKS - ZHDR_CHUNKS)
62 
63 #define BUDDY_MASK	(0x3)
64 #define BUDDY_SHIFT	2
65 #define SLOTS_ALIGN	(0x40)
66 
67 /*****************
68  * Structures
69 *****************/
70 struct z3fold_pool;
71 
72 enum buddy {
73 	HEADLESS = 0,
74 	FIRST,
75 	MIDDLE,
76 	LAST,
77 	BUDDIES_MAX = LAST
78 };
79 
80 struct z3fold_buddy_slots {
81 	/*
82 	 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
83 	 * be enough slots to hold all possible variants
84 	 */
85 	unsigned long slot[BUDDY_MASK + 1];
86 	unsigned long pool; /* back link */
87 	rwlock_t lock;
88 };
89 #define HANDLE_FLAG_MASK	(0x03)
90 
91 /*
92  * struct z3fold_header - z3fold page metadata occupying first chunks of each
93  *			z3fold page, except for HEADLESS pages
94  * @buddy:		links the z3fold page into the relevant list in the
95  *			pool
96  * @page_lock:		per-page lock
97  * @refcount:		reference count for the z3fold page
98  * @work:		work_struct for page layout optimization
99  * @slots:		pointer to the structure holding buddy slots
100  * @pool:		pointer to the containing pool
101  * @cpu:		CPU which this page "belongs" to
102  * @first_chunks:	the size of the first buddy in chunks, 0 if free
103  * @middle_chunks:	the size of the middle buddy in chunks, 0 if free
104  * @last_chunks:	the size of the last buddy in chunks, 0 if free
105  * @first_num:		the starting number (for the first handle)
106  * @mapped_count:	the number of objects currently mapped
107  */
108 struct z3fold_header {
109 	struct list_head buddy;
110 	spinlock_t page_lock;
111 	struct kref refcount;
112 	struct work_struct work;
113 	struct z3fold_buddy_slots *slots;
114 	struct z3fold_pool *pool;
115 	short cpu;
116 	unsigned short first_chunks;
117 	unsigned short middle_chunks;
118 	unsigned short last_chunks;
119 	unsigned short start_middle;
120 	unsigned short first_num:2;
121 	unsigned short mapped_count:2;
122 	unsigned short foreign_handles:2;
123 };
124 
125 /**
126  * struct z3fold_pool - stores metadata for each z3fold pool
127  * @name:	pool name
128  * @lock:	protects pool unbuddied lists
129  * @stale_lock:	protects pool stale page list
130  * @unbuddied:	per-cpu array of lists tracking z3fold pages that contain 2-
131  *		buddies; the list each z3fold page is added to depends on
132  *		the size of its free region.
133  * @stale:	list of pages marked for freeing
134  * @pages_nr:	number of z3fold pages in the pool.
135  * @c_handle:	cache for z3fold_buddy_slots allocation
136  * @compact_wq:	workqueue for page layout background optimization
137  * @release_wq:	workqueue for safe page release
138  * @work:	work_struct for safe page release
139  *
140  * This structure is allocated at pool creation time and maintains metadata
141  * pertaining to a particular z3fold pool.
142  */
143 struct z3fold_pool {
144 	const char *name;
145 	spinlock_t lock;
146 	spinlock_t stale_lock;
147 	struct list_head *unbuddied;
148 	struct list_head stale;
149 	atomic64_t pages_nr;
150 	struct kmem_cache *c_handle;
151 	struct workqueue_struct *compact_wq;
152 	struct workqueue_struct *release_wq;
153 	struct work_struct work;
154 };
155 
156 /*
157  * Internal z3fold page flags
158  */
159 enum z3fold_page_flags {
160 	PAGE_HEADLESS = 0,
161 	MIDDLE_CHUNK_MAPPED,
162 	NEEDS_COMPACTING,
163 	PAGE_STALE,
164 	PAGE_CLAIMED, /* by either reclaim or free */
165 	PAGE_MIGRATED, /* page is migrated and soon to be released */
166 };
167 
168 /*
169  * handle flags, go under HANDLE_FLAG_MASK
170  */
171 enum z3fold_handle_flags {
172 	HANDLES_NOFREE = 0,
173 };
174 
175 /*
176  * Forward declarations
177  */
178 static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
179 static void compact_page_work(struct work_struct *w);
180 
181 /*****************
182  * Helpers
183 *****************/
184 
185 /* Converts an allocation size in bytes to size in z3fold chunks */
186 static int size_to_chunks(size_t size)
187 {
188 	return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
189 }
190 
191 #define for_each_unbuddied_list(_iter, _begin) \
192 	for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
193 
194 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
195 							gfp_t gfp)
196 {
197 	struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle,
198 							     gfp);
199 
200 	if (slots) {
201 		/* It will be freed separately in free_handle(). */
202 		kmemleak_not_leak(slots);
203 		slots->pool = (unsigned long)pool;
204 		rwlock_init(&slots->lock);
205 	}
206 
207 	return slots;
208 }
209 
210 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
211 {
212 	return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
213 }
214 
215 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
216 {
217 	return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
218 }
219 
220 /* Lock a z3fold page */
221 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
222 {
223 	spin_lock(&zhdr->page_lock);
224 }
225 
226 /* Try to lock a z3fold page */
227 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
228 {
229 	return spin_trylock(&zhdr->page_lock);
230 }
231 
232 /* Unlock a z3fold page */
233 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
234 {
235 	spin_unlock(&zhdr->page_lock);
236 }
237 
238 /* return locked z3fold page if it's not headless */
239 static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
240 {
241 	struct z3fold_buddy_slots *slots;
242 	struct z3fold_header *zhdr;
243 	int locked = 0;
244 
245 	if (!(handle & (1 << PAGE_HEADLESS))) {
246 		slots = handle_to_slots(handle);
247 		do {
248 			unsigned long addr;
249 
250 			read_lock(&slots->lock);
251 			addr = *(unsigned long *)handle;
252 			zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
253 			locked = z3fold_page_trylock(zhdr);
254 			read_unlock(&slots->lock);
255 			if (locked) {
256 				struct page *page = virt_to_page(zhdr);
257 
258 				if (!test_bit(PAGE_MIGRATED, &page->private))
259 					break;
260 				z3fold_page_unlock(zhdr);
261 			}
262 			cpu_relax();
263 		} while (true);
264 	} else {
265 		zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
266 	}
267 
268 	return zhdr;
269 }
270 
271 static inline void put_z3fold_header(struct z3fold_header *zhdr)
272 {
273 	struct page *page = virt_to_page(zhdr);
274 
275 	if (!test_bit(PAGE_HEADLESS, &page->private))
276 		z3fold_page_unlock(zhdr);
277 }
278 
279 static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
280 {
281 	struct z3fold_buddy_slots *slots;
282 	int i;
283 	bool is_free;
284 
285 	if (WARN_ON(*(unsigned long *)handle == 0))
286 		return;
287 
288 	slots = handle_to_slots(handle);
289 	write_lock(&slots->lock);
290 	*(unsigned long *)handle = 0;
291 
292 	if (test_bit(HANDLES_NOFREE, &slots->pool)) {
293 		write_unlock(&slots->lock);
294 		return; /* simple case, nothing else to do */
295 	}
296 
297 	if (zhdr->slots != slots)
298 		zhdr->foreign_handles--;
299 
300 	is_free = true;
301 	for (i = 0; i <= BUDDY_MASK; i++) {
302 		if (slots->slot[i]) {
303 			is_free = false;
304 			break;
305 		}
306 	}
307 	write_unlock(&slots->lock);
308 
309 	if (is_free) {
310 		struct z3fold_pool *pool = slots_to_pool(slots);
311 
312 		if (zhdr->slots == slots)
313 			zhdr->slots = NULL;
314 		kmem_cache_free(pool->c_handle, slots);
315 	}
316 }
317 
318 /* Initializes the z3fold header of a newly allocated z3fold page */
319 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
320 					struct z3fold_pool *pool, gfp_t gfp)
321 {
322 	struct z3fold_header *zhdr = page_address(page);
323 	struct z3fold_buddy_slots *slots;
324 
325 	clear_bit(PAGE_HEADLESS, &page->private);
326 	clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
327 	clear_bit(NEEDS_COMPACTING, &page->private);
328 	clear_bit(PAGE_STALE, &page->private);
329 	clear_bit(PAGE_CLAIMED, &page->private);
330 	clear_bit(PAGE_MIGRATED, &page->private);
331 	if (headless)
332 		return zhdr;
333 
334 	slots = alloc_slots(pool, gfp);
335 	if (!slots)
336 		return NULL;
337 
338 	memset(zhdr, 0, sizeof(*zhdr));
339 	spin_lock_init(&zhdr->page_lock);
340 	kref_init(&zhdr->refcount);
341 	zhdr->cpu = -1;
342 	zhdr->slots = slots;
343 	zhdr->pool = pool;
344 	INIT_LIST_HEAD(&zhdr->buddy);
345 	INIT_WORK(&zhdr->work, compact_page_work);
346 	return zhdr;
347 }
348 
349 /* Resets the struct page fields and frees the page */
350 static void free_z3fold_page(struct page *page, bool headless)
351 {
352 	if (!headless) {
353 		lock_page(page);
354 		__ClearPageMovable(page);
355 		unlock_page(page);
356 	}
357 	__free_page(page);
358 }
359 
360 /* Helper function to build the index */
361 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
362 {
363 	return (bud + zhdr->first_num) & BUDDY_MASK;
364 }
365 
366 /*
367  * Encodes the handle of a particular buddy within a z3fold page
368  * Pool lock should be held as this function accesses first_num
369  */
370 static unsigned long __encode_handle(struct z3fold_header *zhdr,
371 				struct z3fold_buddy_slots *slots,
372 				enum buddy bud)
373 {
374 	unsigned long h = (unsigned long)zhdr;
375 	int idx = 0;
376 
377 	/*
378 	 * For a headless page, its handle is its pointer with the extra
379 	 * PAGE_HEADLESS bit set
380 	 */
381 	if (bud == HEADLESS)
382 		return h | (1 << PAGE_HEADLESS);
383 
384 	/* otherwise, return pointer to encoded handle */
385 	idx = __idx(zhdr, bud);
386 	h += idx;
387 	if (bud == LAST)
388 		h |= (zhdr->last_chunks << BUDDY_SHIFT);
389 
390 	write_lock(&slots->lock);
391 	slots->slot[idx] = h;
392 	write_unlock(&slots->lock);
393 	return (unsigned long)&slots->slot[idx];
394 }
395 
396 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
397 {
398 	return __encode_handle(zhdr, zhdr->slots, bud);
399 }
400 
401 /* only for LAST bud, returns zero otherwise */
402 static unsigned short handle_to_chunks(unsigned long handle)
403 {
404 	struct z3fold_buddy_slots *slots = handle_to_slots(handle);
405 	unsigned long addr;
406 
407 	read_lock(&slots->lock);
408 	addr = *(unsigned long *)handle;
409 	read_unlock(&slots->lock);
410 	return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
411 }
412 
413 /*
414  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
415  *  but that doesn't matter. because the masking will result in the
416  *  correct buddy number.
417  */
418 static enum buddy handle_to_buddy(unsigned long handle)
419 {
420 	struct z3fold_header *zhdr;
421 	struct z3fold_buddy_slots *slots = handle_to_slots(handle);
422 	unsigned long addr;
423 
424 	read_lock(&slots->lock);
425 	WARN_ON(handle & (1 << PAGE_HEADLESS));
426 	addr = *(unsigned long *)handle;
427 	read_unlock(&slots->lock);
428 	zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
429 	return (addr - zhdr->first_num) & BUDDY_MASK;
430 }
431 
432 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
433 {
434 	return zhdr->pool;
435 }
436 
437 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
438 {
439 	struct page *page = virt_to_page(zhdr);
440 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
441 
442 	WARN_ON(!list_empty(&zhdr->buddy));
443 	set_bit(PAGE_STALE, &page->private);
444 	clear_bit(NEEDS_COMPACTING, &page->private);
445 	spin_lock(&pool->lock);
446 	spin_unlock(&pool->lock);
447 
448 	if (locked)
449 		z3fold_page_unlock(zhdr);
450 
451 	spin_lock(&pool->stale_lock);
452 	list_add(&zhdr->buddy, &pool->stale);
453 	queue_work(pool->release_wq, &pool->work);
454 	spin_unlock(&pool->stale_lock);
455 
456 	atomic64_dec(&pool->pages_nr);
457 }
458 
459 static void release_z3fold_page_locked(struct kref *ref)
460 {
461 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
462 						refcount);
463 	WARN_ON(z3fold_page_trylock(zhdr));
464 	__release_z3fold_page(zhdr, true);
465 }
466 
467 static void release_z3fold_page_locked_list(struct kref *ref)
468 {
469 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
470 					       refcount);
471 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
472 
473 	spin_lock(&pool->lock);
474 	list_del_init(&zhdr->buddy);
475 	spin_unlock(&pool->lock);
476 
477 	WARN_ON(z3fold_page_trylock(zhdr));
478 	__release_z3fold_page(zhdr, true);
479 }
480 
481 static inline int put_z3fold_locked(struct z3fold_header *zhdr)
482 {
483 	return kref_put(&zhdr->refcount, release_z3fold_page_locked);
484 }
485 
486 static inline int put_z3fold_locked_list(struct z3fold_header *zhdr)
487 {
488 	return kref_put(&zhdr->refcount, release_z3fold_page_locked_list);
489 }
490 
491 static void free_pages_work(struct work_struct *w)
492 {
493 	struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
494 
495 	spin_lock(&pool->stale_lock);
496 	while (!list_empty(&pool->stale)) {
497 		struct z3fold_header *zhdr = list_first_entry(&pool->stale,
498 						struct z3fold_header, buddy);
499 		struct page *page = virt_to_page(zhdr);
500 
501 		list_del(&zhdr->buddy);
502 		if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
503 			continue;
504 		spin_unlock(&pool->stale_lock);
505 		cancel_work_sync(&zhdr->work);
506 		free_z3fold_page(page, false);
507 		cond_resched();
508 		spin_lock(&pool->stale_lock);
509 	}
510 	spin_unlock(&pool->stale_lock);
511 }
512 
513 /*
514  * Returns the number of free chunks in a z3fold page.
515  * NB: can't be used with HEADLESS pages.
516  */
517 static int num_free_chunks(struct z3fold_header *zhdr)
518 {
519 	int nfree;
520 	/*
521 	 * If there is a middle object, pick up the bigger free space
522 	 * either before or after it. Otherwise just subtract the number
523 	 * of chunks occupied by the first and the last objects.
524 	 */
525 	if (zhdr->middle_chunks != 0) {
526 		int nfree_before = zhdr->first_chunks ?
527 			0 : zhdr->start_middle - ZHDR_CHUNKS;
528 		int nfree_after = zhdr->last_chunks ?
529 			0 : TOTAL_CHUNKS -
530 				(zhdr->start_middle + zhdr->middle_chunks);
531 		nfree = max(nfree_before, nfree_after);
532 	} else
533 		nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
534 	return nfree;
535 }
536 
537 /* Add to the appropriate unbuddied list */
538 static inline void add_to_unbuddied(struct z3fold_pool *pool,
539 				struct z3fold_header *zhdr)
540 {
541 	if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
542 			zhdr->middle_chunks == 0) {
543 		struct list_head *unbuddied;
544 		int freechunks = num_free_chunks(zhdr);
545 
546 		migrate_disable();
547 		unbuddied = this_cpu_ptr(pool->unbuddied);
548 		spin_lock(&pool->lock);
549 		list_add(&zhdr->buddy, &unbuddied[freechunks]);
550 		spin_unlock(&pool->lock);
551 		zhdr->cpu = smp_processor_id();
552 		migrate_enable();
553 	}
554 }
555 
556 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
557 {
558 	enum buddy bud = HEADLESS;
559 
560 	if (zhdr->middle_chunks) {
561 		if (!zhdr->first_chunks &&
562 		    chunks <= zhdr->start_middle - ZHDR_CHUNKS)
563 			bud = FIRST;
564 		else if (!zhdr->last_chunks)
565 			bud = LAST;
566 	} else {
567 		if (!zhdr->first_chunks)
568 			bud = FIRST;
569 		else if (!zhdr->last_chunks)
570 			bud = LAST;
571 		else
572 			bud = MIDDLE;
573 	}
574 
575 	return bud;
576 }
577 
578 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
579 				unsigned short dst_chunk)
580 {
581 	void *beg = zhdr;
582 	return memmove(beg + (dst_chunk << CHUNK_SHIFT),
583 		       beg + (zhdr->start_middle << CHUNK_SHIFT),
584 		       zhdr->middle_chunks << CHUNK_SHIFT);
585 }
586 
587 static inline bool buddy_single(struct z3fold_header *zhdr)
588 {
589 	return !((zhdr->first_chunks && zhdr->middle_chunks) ||
590 			(zhdr->first_chunks && zhdr->last_chunks) ||
591 			(zhdr->middle_chunks && zhdr->last_chunks));
592 }
593 
594 static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
595 {
596 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
597 	void *p = zhdr;
598 	unsigned long old_handle = 0;
599 	size_t sz = 0;
600 	struct z3fold_header *new_zhdr = NULL;
601 	int first_idx = __idx(zhdr, FIRST);
602 	int middle_idx = __idx(zhdr, MIDDLE);
603 	int last_idx = __idx(zhdr, LAST);
604 	unsigned short *moved_chunks = NULL;
605 
606 	/*
607 	 * No need to protect slots here -- all the slots are "local" and
608 	 * the page lock is already taken
609 	 */
610 	if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
611 		p += ZHDR_SIZE_ALIGNED;
612 		sz = zhdr->first_chunks << CHUNK_SHIFT;
613 		old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
614 		moved_chunks = &zhdr->first_chunks;
615 	} else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
616 		p += zhdr->start_middle << CHUNK_SHIFT;
617 		sz = zhdr->middle_chunks << CHUNK_SHIFT;
618 		old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
619 		moved_chunks = &zhdr->middle_chunks;
620 	} else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
621 		p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
622 		sz = zhdr->last_chunks << CHUNK_SHIFT;
623 		old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
624 		moved_chunks = &zhdr->last_chunks;
625 	}
626 
627 	if (sz > 0) {
628 		enum buddy new_bud = HEADLESS;
629 		short chunks = size_to_chunks(sz);
630 		void *q;
631 
632 		new_zhdr = __z3fold_alloc(pool, sz, false);
633 		if (!new_zhdr)
634 			return NULL;
635 
636 		if (WARN_ON(new_zhdr == zhdr))
637 			goto out_fail;
638 
639 		new_bud = get_free_buddy(new_zhdr, chunks);
640 		q = new_zhdr;
641 		switch (new_bud) {
642 		case FIRST:
643 			new_zhdr->first_chunks = chunks;
644 			q += ZHDR_SIZE_ALIGNED;
645 			break;
646 		case MIDDLE:
647 			new_zhdr->middle_chunks = chunks;
648 			new_zhdr->start_middle =
649 				new_zhdr->first_chunks + ZHDR_CHUNKS;
650 			q += new_zhdr->start_middle << CHUNK_SHIFT;
651 			break;
652 		case LAST:
653 			new_zhdr->last_chunks = chunks;
654 			q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
655 			break;
656 		default:
657 			goto out_fail;
658 		}
659 		new_zhdr->foreign_handles++;
660 		memcpy(q, p, sz);
661 		write_lock(&zhdr->slots->lock);
662 		*(unsigned long *)old_handle = (unsigned long)new_zhdr +
663 			__idx(new_zhdr, new_bud);
664 		if (new_bud == LAST)
665 			*(unsigned long *)old_handle |=
666 					(new_zhdr->last_chunks << BUDDY_SHIFT);
667 		write_unlock(&zhdr->slots->lock);
668 		add_to_unbuddied(pool, new_zhdr);
669 		z3fold_page_unlock(new_zhdr);
670 
671 		*moved_chunks = 0;
672 	}
673 
674 	return new_zhdr;
675 
676 out_fail:
677 	if (new_zhdr && !put_z3fold_locked(new_zhdr)) {
678 		add_to_unbuddied(pool, new_zhdr);
679 		z3fold_page_unlock(new_zhdr);
680 	}
681 	return NULL;
682 
683 }
684 
685 #define BIG_CHUNK_GAP	3
686 /* Has to be called with lock held */
687 static int z3fold_compact_page(struct z3fold_header *zhdr)
688 {
689 	struct page *page = virt_to_page(zhdr);
690 
691 	if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
692 		return 0; /* can't move middle chunk, it's used */
693 
694 	if (unlikely(PageIsolated(page)))
695 		return 0;
696 
697 	if (zhdr->middle_chunks == 0)
698 		return 0; /* nothing to compact */
699 
700 	if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
701 		/* move to the beginning */
702 		mchunk_memmove(zhdr, ZHDR_CHUNKS);
703 		zhdr->first_chunks = zhdr->middle_chunks;
704 		zhdr->middle_chunks = 0;
705 		zhdr->start_middle = 0;
706 		zhdr->first_num++;
707 		return 1;
708 	}
709 
710 	/*
711 	 * moving data is expensive, so let's only do that if
712 	 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
713 	 */
714 	if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
715 	    zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
716 			BIG_CHUNK_GAP) {
717 		mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
718 		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
719 		return 1;
720 	} else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
721 		   TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
722 					+ zhdr->middle_chunks) >=
723 			BIG_CHUNK_GAP) {
724 		unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
725 			zhdr->middle_chunks;
726 		mchunk_memmove(zhdr, new_start);
727 		zhdr->start_middle = new_start;
728 		return 1;
729 	}
730 
731 	return 0;
732 }
733 
734 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
735 {
736 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
737 	struct page *page;
738 
739 	page = virt_to_page(zhdr);
740 	if (locked)
741 		WARN_ON(z3fold_page_trylock(zhdr));
742 	else
743 		z3fold_page_lock(zhdr);
744 	if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
745 		z3fold_page_unlock(zhdr);
746 		return;
747 	}
748 	spin_lock(&pool->lock);
749 	list_del_init(&zhdr->buddy);
750 	spin_unlock(&pool->lock);
751 
752 	if (put_z3fold_locked(zhdr))
753 		return;
754 
755 	if (test_bit(PAGE_STALE, &page->private) ||
756 	    test_and_set_bit(PAGE_CLAIMED, &page->private)) {
757 		z3fold_page_unlock(zhdr);
758 		return;
759 	}
760 
761 	if (!zhdr->foreign_handles && buddy_single(zhdr) &&
762 	    zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
763 		if (!put_z3fold_locked(zhdr)) {
764 			clear_bit(PAGE_CLAIMED, &page->private);
765 			z3fold_page_unlock(zhdr);
766 		}
767 		return;
768 	}
769 
770 	z3fold_compact_page(zhdr);
771 	add_to_unbuddied(pool, zhdr);
772 	clear_bit(PAGE_CLAIMED, &page->private);
773 	z3fold_page_unlock(zhdr);
774 }
775 
776 static void compact_page_work(struct work_struct *w)
777 {
778 	struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
779 						work);
780 
781 	do_compact_page(zhdr, false);
782 }
783 
784 /* returns _locked_ z3fold page header or NULL */
785 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
786 						size_t size, bool can_sleep)
787 {
788 	struct z3fold_header *zhdr = NULL;
789 	struct page *page;
790 	struct list_head *unbuddied;
791 	int chunks = size_to_chunks(size), i;
792 
793 lookup:
794 	migrate_disable();
795 	/* First, try to find an unbuddied z3fold page. */
796 	unbuddied = this_cpu_ptr(pool->unbuddied);
797 	for_each_unbuddied_list(i, chunks) {
798 		struct list_head *l = &unbuddied[i];
799 
800 		zhdr = list_first_entry_or_null(READ_ONCE(l),
801 					struct z3fold_header, buddy);
802 
803 		if (!zhdr)
804 			continue;
805 
806 		/* Re-check under lock. */
807 		spin_lock(&pool->lock);
808 		if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
809 						struct z3fold_header, buddy)) ||
810 		    !z3fold_page_trylock(zhdr)) {
811 			spin_unlock(&pool->lock);
812 			zhdr = NULL;
813 			migrate_enable();
814 			if (can_sleep)
815 				cond_resched();
816 			goto lookup;
817 		}
818 		list_del_init(&zhdr->buddy);
819 		zhdr->cpu = -1;
820 		spin_unlock(&pool->lock);
821 
822 		page = virt_to_page(zhdr);
823 		if (test_bit(NEEDS_COMPACTING, &page->private) ||
824 		    test_bit(PAGE_CLAIMED, &page->private)) {
825 			z3fold_page_unlock(zhdr);
826 			zhdr = NULL;
827 			migrate_enable();
828 			if (can_sleep)
829 				cond_resched();
830 			goto lookup;
831 		}
832 
833 		/*
834 		 * this page could not be removed from its unbuddied
835 		 * list while pool lock was held, and then we've taken
836 		 * page lock so kref_put could not be called before
837 		 * we got here, so it's safe to just call kref_get()
838 		 */
839 		kref_get(&zhdr->refcount);
840 		break;
841 	}
842 	migrate_enable();
843 
844 	if (!zhdr) {
845 		int cpu;
846 
847 		/* look for _exact_ match on other cpus' lists */
848 		for_each_online_cpu(cpu) {
849 			struct list_head *l;
850 
851 			unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
852 			spin_lock(&pool->lock);
853 			l = &unbuddied[chunks];
854 
855 			zhdr = list_first_entry_or_null(READ_ONCE(l),
856 						struct z3fold_header, buddy);
857 
858 			if (!zhdr || !z3fold_page_trylock(zhdr)) {
859 				spin_unlock(&pool->lock);
860 				zhdr = NULL;
861 				continue;
862 			}
863 			list_del_init(&zhdr->buddy);
864 			zhdr->cpu = -1;
865 			spin_unlock(&pool->lock);
866 
867 			page = virt_to_page(zhdr);
868 			if (test_bit(NEEDS_COMPACTING, &page->private) ||
869 			    test_bit(PAGE_CLAIMED, &page->private)) {
870 				z3fold_page_unlock(zhdr);
871 				zhdr = NULL;
872 				if (can_sleep)
873 					cond_resched();
874 				continue;
875 			}
876 			kref_get(&zhdr->refcount);
877 			break;
878 		}
879 	}
880 
881 	if (zhdr && !zhdr->slots) {
882 		zhdr->slots = alloc_slots(pool, GFP_ATOMIC);
883 		if (!zhdr->slots)
884 			goto out_fail;
885 	}
886 	return zhdr;
887 
888 out_fail:
889 	if (!put_z3fold_locked(zhdr)) {
890 		add_to_unbuddied(pool, zhdr);
891 		z3fold_page_unlock(zhdr);
892 	}
893 	return NULL;
894 }
895 
896 /*
897  * API Functions
898  */
899 
900 /**
901  * z3fold_create_pool() - create a new z3fold pool
902  * @name:	pool name
903  * @gfp:	gfp flags when allocating the z3fold pool structure
904  *
905  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
906  * failed.
907  */
908 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp)
909 {
910 	struct z3fold_pool *pool = NULL;
911 	int i, cpu;
912 
913 	pool = kzalloc(sizeof(struct z3fold_pool), gfp);
914 	if (!pool)
915 		goto out;
916 	pool->c_handle = kmem_cache_create("z3fold_handle",
917 				sizeof(struct z3fold_buddy_slots),
918 				SLOTS_ALIGN, 0, NULL);
919 	if (!pool->c_handle)
920 		goto out_c;
921 	spin_lock_init(&pool->lock);
922 	spin_lock_init(&pool->stale_lock);
923 	pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS,
924 					 __alignof__(struct list_head));
925 	if (!pool->unbuddied)
926 		goto out_pool;
927 	for_each_possible_cpu(cpu) {
928 		struct list_head *unbuddied =
929 				per_cpu_ptr(pool->unbuddied, cpu);
930 		for_each_unbuddied_list(i, 0)
931 			INIT_LIST_HEAD(&unbuddied[i]);
932 	}
933 	INIT_LIST_HEAD(&pool->stale);
934 	atomic64_set(&pool->pages_nr, 0);
935 	pool->name = name;
936 	pool->compact_wq = create_singlethread_workqueue(pool->name);
937 	if (!pool->compact_wq)
938 		goto out_unbuddied;
939 	pool->release_wq = create_singlethread_workqueue(pool->name);
940 	if (!pool->release_wq)
941 		goto out_wq;
942 	INIT_WORK(&pool->work, free_pages_work);
943 	return pool;
944 
945 out_wq:
946 	destroy_workqueue(pool->compact_wq);
947 out_unbuddied:
948 	free_percpu(pool->unbuddied);
949 out_pool:
950 	kmem_cache_destroy(pool->c_handle);
951 out_c:
952 	kfree(pool);
953 out:
954 	return NULL;
955 }
956 
957 /**
958  * z3fold_destroy_pool() - destroys an existing z3fold pool
959  * @pool:	the z3fold pool to be destroyed
960  *
961  * The pool should be emptied before this function is called.
962  */
963 static void z3fold_destroy_pool(struct z3fold_pool *pool)
964 {
965 	kmem_cache_destroy(pool->c_handle);
966 
967 	/*
968 	 * We need to destroy pool->compact_wq before pool->release_wq,
969 	 * as any pending work on pool->compact_wq will call
970 	 * queue_work(pool->release_wq, &pool->work).
971 	 *
972 	 * There are still outstanding pages until both workqueues are drained,
973 	 * so we cannot unregister migration until then.
974 	 */
975 
976 	destroy_workqueue(pool->compact_wq);
977 	destroy_workqueue(pool->release_wq);
978 	free_percpu(pool->unbuddied);
979 	kfree(pool);
980 }
981 
982 static const struct movable_operations z3fold_mops;
983 
984 /**
985  * z3fold_alloc() - allocates a region of a given size
986  * @pool:	z3fold pool from which to allocate
987  * @size:	size in bytes of the desired allocation
988  * @gfp:	gfp flags used if the pool needs to grow
989  * @handle:	handle of the new allocation
990  *
991  * This function will attempt to find a free region in the pool large enough to
992  * satisfy the allocation request.  A search of the unbuddied lists is
993  * performed first. If no suitable free region is found, then a new page is
994  * allocated and added to the pool to satisfy the request.
995  *
996  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
997  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
998  * a new page.
999  */
1000 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
1001 			unsigned long *handle)
1002 {
1003 	int chunks = size_to_chunks(size);
1004 	struct z3fold_header *zhdr = NULL;
1005 	struct page *page = NULL;
1006 	enum buddy bud;
1007 	bool can_sleep = gfpflags_allow_blocking(gfp);
1008 
1009 	if (!size || (gfp & __GFP_HIGHMEM))
1010 		return -EINVAL;
1011 
1012 	if (size > PAGE_SIZE)
1013 		return -ENOSPC;
1014 
1015 	if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
1016 		bud = HEADLESS;
1017 	else {
1018 retry:
1019 		zhdr = __z3fold_alloc(pool, size, can_sleep);
1020 		if (zhdr) {
1021 			bud = get_free_buddy(zhdr, chunks);
1022 			if (bud == HEADLESS) {
1023 				if (!put_z3fold_locked(zhdr))
1024 					z3fold_page_unlock(zhdr);
1025 				pr_err("No free chunks in unbuddied\n");
1026 				WARN_ON(1);
1027 				goto retry;
1028 			}
1029 			page = virt_to_page(zhdr);
1030 			goto found;
1031 		}
1032 		bud = FIRST;
1033 	}
1034 
1035 	page = alloc_page(gfp);
1036 	if (!page)
1037 		return -ENOMEM;
1038 
1039 	zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1040 	if (!zhdr) {
1041 		__free_page(page);
1042 		return -ENOMEM;
1043 	}
1044 	atomic64_inc(&pool->pages_nr);
1045 
1046 	if (bud == HEADLESS) {
1047 		set_bit(PAGE_HEADLESS, &page->private);
1048 		goto headless;
1049 	}
1050 	if (can_sleep) {
1051 		lock_page(page);
1052 		__SetPageMovable(page, &z3fold_mops);
1053 		unlock_page(page);
1054 	} else {
1055 		WARN_ON(!trylock_page(page));
1056 		__SetPageMovable(page, &z3fold_mops);
1057 		unlock_page(page);
1058 	}
1059 	z3fold_page_lock(zhdr);
1060 
1061 found:
1062 	if (bud == FIRST)
1063 		zhdr->first_chunks = chunks;
1064 	else if (bud == LAST)
1065 		zhdr->last_chunks = chunks;
1066 	else {
1067 		zhdr->middle_chunks = chunks;
1068 		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
1069 	}
1070 	add_to_unbuddied(pool, zhdr);
1071 
1072 headless:
1073 	spin_lock(&pool->lock);
1074 	*handle = encode_handle(zhdr, bud);
1075 	spin_unlock(&pool->lock);
1076 	if (bud != HEADLESS)
1077 		z3fold_page_unlock(zhdr);
1078 
1079 	return 0;
1080 }
1081 
1082 /**
1083  * z3fold_free() - frees the allocation associated with the given handle
1084  * @pool:	pool in which the allocation resided
1085  * @handle:	handle associated with the allocation returned by z3fold_alloc()
1086  *
1087  * In the case that the z3fold page in which the allocation resides is under
1088  * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function
1089  * only sets the first|middle|last_chunks to 0.  The page is actually freed
1090  * once all buddies are evicted (see z3fold_reclaim_page() below).
1091  */
1092 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1093 {
1094 	struct z3fold_header *zhdr;
1095 	struct page *page;
1096 	enum buddy bud;
1097 	bool page_claimed;
1098 
1099 	zhdr = get_z3fold_header(handle);
1100 	page = virt_to_page(zhdr);
1101 	page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
1102 
1103 	if (test_bit(PAGE_HEADLESS, &page->private)) {
1104 		/* if a headless page is under reclaim, just leave.
1105 		 * NB: we use test_and_set_bit for a reason: if the bit
1106 		 * has not been set before, we release this page
1107 		 * immediately so we don't care about its value any more.
1108 		 */
1109 		if (!page_claimed) {
1110 			put_z3fold_header(zhdr);
1111 			free_z3fold_page(page, true);
1112 			atomic64_dec(&pool->pages_nr);
1113 		}
1114 		return;
1115 	}
1116 
1117 	/* Non-headless case */
1118 	bud = handle_to_buddy(handle);
1119 
1120 	switch (bud) {
1121 	case FIRST:
1122 		zhdr->first_chunks = 0;
1123 		break;
1124 	case MIDDLE:
1125 		zhdr->middle_chunks = 0;
1126 		break;
1127 	case LAST:
1128 		zhdr->last_chunks = 0;
1129 		break;
1130 	default:
1131 		pr_err("%s: unknown bud %d\n", __func__, bud);
1132 		WARN_ON(1);
1133 		put_z3fold_header(zhdr);
1134 		return;
1135 	}
1136 
1137 	if (!page_claimed)
1138 		free_handle(handle, zhdr);
1139 	if (put_z3fold_locked_list(zhdr))
1140 		return;
1141 	if (page_claimed) {
1142 		/* the page has not been claimed by us */
1143 		put_z3fold_header(zhdr);
1144 		return;
1145 	}
1146 	if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1147 		clear_bit(PAGE_CLAIMED, &page->private);
1148 		put_z3fold_header(zhdr);
1149 		return;
1150 	}
1151 	if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1152 		zhdr->cpu = -1;
1153 		kref_get(&zhdr->refcount);
1154 		clear_bit(PAGE_CLAIMED, &page->private);
1155 		do_compact_page(zhdr, true);
1156 		return;
1157 	}
1158 	kref_get(&zhdr->refcount);
1159 	clear_bit(PAGE_CLAIMED, &page->private);
1160 	queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1161 	put_z3fold_header(zhdr);
1162 }
1163 
1164 /**
1165  * z3fold_map() - maps the allocation associated with the given handle
1166  * @pool:	pool in which the allocation resides
1167  * @handle:	handle associated with the allocation to be mapped
1168  *
1169  * Extracts the buddy number from handle and constructs the pointer to the
1170  * correct starting chunk within the page.
1171  *
1172  * Returns: a pointer to the mapped allocation
1173  */
1174 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1175 {
1176 	struct z3fold_header *zhdr;
1177 	struct page *page;
1178 	void *addr;
1179 	enum buddy buddy;
1180 
1181 	zhdr = get_z3fold_header(handle);
1182 	addr = zhdr;
1183 	page = virt_to_page(zhdr);
1184 
1185 	if (test_bit(PAGE_HEADLESS, &page->private))
1186 		goto out;
1187 
1188 	buddy = handle_to_buddy(handle);
1189 	switch (buddy) {
1190 	case FIRST:
1191 		addr += ZHDR_SIZE_ALIGNED;
1192 		break;
1193 	case MIDDLE:
1194 		addr += zhdr->start_middle << CHUNK_SHIFT;
1195 		set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1196 		break;
1197 	case LAST:
1198 		addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1199 		break;
1200 	default:
1201 		pr_err("unknown buddy id %d\n", buddy);
1202 		WARN_ON(1);
1203 		addr = NULL;
1204 		break;
1205 	}
1206 
1207 	if (addr)
1208 		zhdr->mapped_count++;
1209 out:
1210 	put_z3fold_header(zhdr);
1211 	return addr;
1212 }
1213 
1214 /**
1215  * z3fold_unmap() - unmaps the allocation associated with the given handle
1216  * @pool:	pool in which the allocation resides
1217  * @handle:	handle associated with the allocation to be unmapped
1218  */
1219 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1220 {
1221 	struct z3fold_header *zhdr;
1222 	struct page *page;
1223 	enum buddy buddy;
1224 
1225 	zhdr = get_z3fold_header(handle);
1226 	page = virt_to_page(zhdr);
1227 
1228 	if (test_bit(PAGE_HEADLESS, &page->private))
1229 		return;
1230 
1231 	buddy = handle_to_buddy(handle);
1232 	if (buddy == MIDDLE)
1233 		clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1234 	zhdr->mapped_count--;
1235 	put_z3fold_header(zhdr);
1236 }
1237 
1238 /**
1239  * z3fold_get_pool_size() - gets the z3fold pool size in pages
1240  * @pool:	pool whose size is being queried
1241  *
1242  * Returns: size in pages of the given pool.
1243  */
1244 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1245 {
1246 	return atomic64_read(&pool->pages_nr);
1247 }
1248 
1249 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1250 {
1251 	struct z3fold_header *zhdr;
1252 	struct z3fold_pool *pool;
1253 
1254 	VM_BUG_ON_PAGE(PageIsolated(page), page);
1255 
1256 	if (test_bit(PAGE_HEADLESS, &page->private))
1257 		return false;
1258 
1259 	zhdr = page_address(page);
1260 	z3fold_page_lock(zhdr);
1261 	if (test_bit(NEEDS_COMPACTING, &page->private) ||
1262 	    test_bit(PAGE_STALE, &page->private))
1263 		goto out;
1264 
1265 	if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1266 		goto out;
1267 
1268 	if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1269 		goto out;
1270 	pool = zhdr_to_pool(zhdr);
1271 	spin_lock(&pool->lock);
1272 	if (!list_empty(&zhdr->buddy))
1273 		list_del_init(&zhdr->buddy);
1274 	spin_unlock(&pool->lock);
1275 
1276 	kref_get(&zhdr->refcount);
1277 	z3fold_page_unlock(zhdr);
1278 	return true;
1279 
1280 out:
1281 	z3fold_page_unlock(zhdr);
1282 	return false;
1283 }
1284 
1285 static int z3fold_page_migrate(struct page *newpage, struct page *page,
1286 		enum migrate_mode mode)
1287 {
1288 	struct z3fold_header *zhdr, *new_zhdr;
1289 	struct z3fold_pool *pool;
1290 
1291 	VM_BUG_ON_PAGE(!PageIsolated(page), page);
1292 	VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
1293 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1294 
1295 	zhdr = page_address(page);
1296 	pool = zhdr_to_pool(zhdr);
1297 
1298 	if (!z3fold_page_trylock(zhdr))
1299 		return -EAGAIN;
1300 	if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
1301 		clear_bit(PAGE_CLAIMED, &page->private);
1302 		z3fold_page_unlock(zhdr);
1303 		return -EBUSY;
1304 	}
1305 	if (work_pending(&zhdr->work)) {
1306 		z3fold_page_unlock(zhdr);
1307 		return -EAGAIN;
1308 	}
1309 	new_zhdr = page_address(newpage);
1310 	memcpy(new_zhdr, zhdr, PAGE_SIZE);
1311 	newpage->private = page->private;
1312 	set_bit(PAGE_MIGRATED, &page->private);
1313 	z3fold_page_unlock(zhdr);
1314 	spin_lock_init(&new_zhdr->page_lock);
1315 	INIT_WORK(&new_zhdr->work, compact_page_work);
1316 	/*
1317 	 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1318 	 * so we only have to reinitialize it.
1319 	 */
1320 	INIT_LIST_HEAD(&new_zhdr->buddy);
1321 	__ClearPageMovable(page);
1322 
1323 	get_page(newpage);
1324 	z3fold_page_lock(new_zhdr);
1325 	if (new_zhdr->first_chunks)
1326 		encode_handle(new_zhdr, FIRST);
1327 	if (new_zhdr->last_chunks)
1328 		encode_handle(new_zhdr, LAST);
1329 	if (new_zhdr->middle_chunks)
1330 		encode_handle(new_zhdr, MIDDLE);
1331 	set_bit(NEEDS_COMPACTING, &newpage->private);
1332 	new_zhdr->cpu = smp_processor_id();
1333 	__SetPageMovable(newpage, &z3fold_mops);
1334 	z3fold_page_unlock(new_zhdr);
1335 
1336 	queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1337 
1338 	/* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
1339 	page->private = 0;
1340 	put_page(page);
1341 	return 0;
1342 }
1343 
1344 static void z3fold_page_putback(struct page *page)
1345 {
1346 	struct z3fold_header *zhdr;
1347 	struct z3fold_pool *pool;
1348 
1349 	zhdr = page_address(page);
1350 	pool = zhdr_to_pool(zhdr);
1351 
1352 	z3fold_page_lock(zhdr);
1353 	if (!list_empty(&zhdr->buddy))
1354 		list_del_init(&zhdr->buddy);
1355 	INIT_LIST_HEAD(&page->lru);
1356 	if (put_z3fold_locked(zhdr))
1357 		return;
1358 	if (list_empty(&zhdr->buddy))
1359 		add_to_unbuddied(pool, zhdr);
1360 	clear_bit(PAGE_CLAIMED, &page->private);
1361 	z3fold_page_unlock(zhdr);
1362 }
1363 
1364 static const struct movable_operations z3fold_mops = {
1365 	.isolate_page = z3fold_page_isolate,
1366 	.migrate_page = z3fold_page_migrate,
1367 	.putback_page = z3fold_page_putback,
1368 };
1369 
1370 /*****************
1371  * zpool
1372  ****************/
1373 
1374 static void *z3fold_zpool_create(const char *name, gfp_t gfp)
1375 {
1376 	return z3fold_create_pool(name, gfp);
1377 }
1378 
1379 static void z3fold_zpool_destroy(void *pool)
1380 {
1381 	z3fold_destroy_pool(pool);
1382 }
1383 
1384 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1385 			unsigned long *handle)
1386 {
1387 	return z3fold_alloc(pool, size, gfp, handle);
1388 }
1389 static void z3fold_zpool_free(void *pool, unsigned long handle)
1390 {
1391 	z3fold_free(pool, handle);
1392 }
1393 
1394 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1395 			enum zpool_mapmode mm)
1396 {
1397 	return z3fold_map(pool, handle);
1398 }
1399 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1400 {
1401 	z3fold_unmap(pool, handle);
1402 }
1403 
1404 static u64 z3fold_zpool_total_size(void *pool)
1405 {
1406 	return z3fold_get_pool_size(pool) * PAGE_SIZE;
1407 }
1408 
1409 static struct zpool_driver z3fold_zpool_driver = {
1410 	.type =		"z3fold",
1411 	.sleep_mapped = true,
1412 	.owner =	THIS_MODULE,
1413 	.create =	z3fold_zpool_create,
1414 	.destroy =	z3fold_zpool_destroy,
1415 	.malloc =	z3fold_zpool_malloc,
1416 	.free =		z3fold_zpool_free,
1417 	.map =		z3fold_zpool_map,
1418 	.unmap =	z3fold_zpool_unmap,
1419 	.total_size =	z3fold_zpool_total_size,
1420 };
1421 
1422 MODULE_ALIAS("zpool-z3fold");
1423 
1424 static int __init init_z3fold(void)
1425 {
1426 	/*
1427 	 * Make sure the z3fold header is not larger than the page size and
1428 	 * there has remaining spaces for its buddy.
1429 	 */
1430 	BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE);
1431 	zpool_register_driver(&z3fold_zpool_driver);
1432 
1433 	return 0;
1434 }
1435 
1436 static void __exit exit_z3fold(void)
1437 {
1438 	zpool_unregister_driver(&z3fold_zpool_driver);
1439 }
1440 
1441 module_init(init_z3fold);
1442 module_exit(exit_z3fold);
1443 
1444 MODULE_LICENSE("GPL");
1445 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1446 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");
1447