xref: /openbmc/linux/mm/z3fold.c (revision 59bd9ded)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * z3fold.c
4  *
5  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6  * Copyright (C) 2016, Sony Mobile Communications Inc.
7  *
8  * This implementation is based on zbud written by Seth Jennings.
9  *
10  * z3fold is an special purpose allocator for storing compressed pages. It
11  * can store up to three compressed pages per page which improves the
12  * compression ratio of zbud while retaining its main concepts (e. g. always
13  * storing an integral number of objects per page) and simplicity.
14  * It still has simple and deterministic reclaim properties that make it
15  * preferable to a higher density approach (with no requirement on integral
16  * number of object per page) when reclaim is used.
17  *
18  * As in zbud, pages are divided into "chunks".  The size of the chunks is
19  * fixed at compile time and is determined by NCHUNKS_ORDER below.
20  *
21  * z3fold doesn't export any API and is meant to be used via zpool API.
22  */
23 
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/dcache.h>
30 #include <linux/list.h>
31 #include <linux/mm.h>
32 #include <linux/module.h>
33 #include <linux/page-flags.h>
34 #include <linux/migrate.h>
35 #include <linux/node.h>
36 #include <linux/compaction.h>
37 #include <linux/percpu.h>
38 #include <linux/mount.h>
39 #include <linux/fs.h>
40 #include <linux/preempt.h>
41 #include <linux/workqueue.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/zpool.h>
45 
46 /*
47  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
48  * adjusting internal fragmentation.  It also determines the number of
49  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
50  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
51  * in the beginning of an allocated page are occupied by z3fold header, so
52  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
53  * which shows the max number of free chunks in z3fold page, also there will
54  * be 63, or 62, respectively, freelists per pool.
55  */
56 #define NCHUNKS_ORDER	6
57 
58 #define CHUNK_SHIFT	(PAGE_SHIFT - NCHUNKS_ORDER)
59 #define CHUNK_SIZE	(1 << CHUNK_SHIFT)
60 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
61 #define ZHDR_CHUNKS	(ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
62 #define TOTAL_CHUNKS	(PAGE_SIZE >> CHUNK_SHIFT)
63 #define NCHUNKS		((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
64 
65 #define BUDDY_MASK	(0x3)
66 #define BUDDY_SHIFT	2
67 #define SLOTS_ALIGN	(0x40)
68 
69 /*****************
70  * Structures
71 *****************/
72 struct z3fold_pool;
73 struct z3fold_ops {
74 	int (*evict)(struct z3fold_pool *pool, unsigned long handle);
75 };
76 
77 enum buddy {
78 	HEADLESS = 0,
79 	FIRST,
80 	MIDDLE,
81 	LAST,
82 	BUDDIES_MAX = LAST
83 };
84 
85 struct z3fold_buddy_slots {
86 	/*
87 	 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
88 	 * be enough slots to hold all possible variants
89 	 */
90 	unsigned long slot[BUDDY_MASK + 1];
91 	unsigned long pool; /* back link + flags */
92 };
93 #define HANDLE_FLAG_MASK	(0x03)
94 
95 /*
96  * struct z3fold_header - z3fold page metadata occupying first chunks of each
97  *			z3fold page, except for HEADLESS pages
98  * @buddy:		links the z3fold page into the relevant list in the
99  *			pool
100  * @page_lock:		per-page lock
101  * @refcount:		reference count for the z3fold page
102  * @work:		work_struct for page layout optimization
103  * @slots:		pointer to the structure holding buddy slots
104  * @cpu:		CPU which this page "belongs" to
105  * @first_chunks:	the size of the first buddy in chunks, 0 if free
106  * @middle_chunks:	the size of the middle buddy in chunks, 0 if free
107  * @last_chunks:	the size of the last buddy in chunks, 0 if free
108  * @first_num:		the starting number (for the first handle)
109  * @mapped_count:	the number of objects currently mapped
110  */
111 struct z3fold_header {
112 	struct list_head buddy;
113 	spinlock_t page_lock;
114 	struct kref refcount;
115 	struct work_struct work;
116 	struct z3fold_buddy_slots *slots;
117 	short cpu;
118 	unsigned short first_chunks;
119 	unsigned short middle_chunks;
120 	unsigned short last_chunks;
121 	unsigned short start_middle;
122 	unsigned short first_num:2;
123 	unsigned short mapped_count:2;
124 };
125 
126 /**
127  * struct z3fold_pool - stores metadata for each z3fold pool
128  * @name:	pool name
129  * @lock:	protects pool unbuddied/lru lists
130  * @stale_lock:	protects pool stale page list
131  * @unbuddied:	per-cpu array of lists tracking z3fold pages that contain 2-
132  *		buddies; the list each z3fold page is added to depends on
133  *		the size of its free region.
134  * @lru:	list tracking the z3fold pages in LRU order by most recently
135  *		added buddy.
136  * @stale:	list of pages marked for freeing
137  * @pages_nr:	number of z3fold pages in the pool.
138  * @c_handle:	cache for z3fold_buddy_slots allocation
139  * @ops:	pointer to a structure of user defined operations specified at
140  *		pool creation time.
141  * @compact_wq:	workqueue for page layout background optimization
142  * @release_wq:	workqueue for safe page release
143  * @work:	work_struct for safe page release
144  * @inode:	inode for z3fold pseudo filesystem
145  *
146  * This structure is allocated at pool creation time and maintains metadata
147  * pertaining to a particular z3fold pool.
148  */
149 struct z3fold_pool {
150 	const char *name;
151 	spinlock_t lock;
152 	spinlock_t stale_lock;
153 	struct list_head *unbuddied;
154 	struct list_head lru;
155 	struct list_head stale;
156 	atomic64_t pages_nr;
157 	struct kmem_cache *c_handle;
158 	const struct z3fold_ops *ops;
159 	struct zpool *zpool;
160 	const struct zpool_ops *zpool_ops;
161 	struct workqueue_struct *compact_wq;
162 	struct workqueue_struct *release_wq;
163 	struct work_struct work;
164 	struct inode *inode;
165 };
166 
167 /*
168  * Internal z3fold page flags
169  */
170 enum z3fold_page_flags {
171 	PAGE_HEADLESS = 0,
172 	MIDDLE_CHUNK_MAPPED,
173 	NEEDS_COMPACTING,
174 	PAGE_STALE,
175 	PAGE_CLAIMED, /* by either reclaim or free */
176 };
177 
178 /*****************
179  * Helpers
180 *****************/
181 
182 /* Converts an allocation size in bytes to size in z3fold chunks */
183 static int size_to_chunks(size_t size)
184 {
185 	return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
186 }
187 
188 #define for_each_unbuddied_list(_iter, _begin) \
189 	for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
190 
191 static void compact_page_work(struct work_struct *w);
192 
193 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool)
194 {
195 	struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle,
196 							GFP_KERNEL);
197 
198 	if (slots) {
199 		memset(slots->slot, 0, sizeof(slots->slot));
200 		slots->pool = (unsigned long)pool;
201 	}
202 
203 	return slots;
204 }
205 
206 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
207 {
208 	return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
209 }
210 
211 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
212 {
213 	return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
214 }
215 
216 static inline void free_handle(unsigned long handle)
217 {
218 	struct z3fold_buddy_slots *slots;
219 	int i;
220 	bool is_free;
221 
222 	if (handle & (1 << PAGE_HEADLESS))
223 		return;
224 
225 	WARN_ON(*(unsigned long *)handle == 0);
226 	*(unsigned long *)handle = 0;
227 	slots = handle_to_slots(handle);
228 	is_free = true;
229 	for (i = 0; i <= BUDDY_MASK; i++) {
230 		if (slots->slot[i]) {
231 			is_free = false;
232 			break;
233 		}
234 	}
235 
236 	if (is_free) {
237 		struct z3fold_pool *pool = slots_to_pool(slots);
238 
239 		kmem_cache_free(pool->c_handle, slots);
240 	}
241 }
242 
243 static struct dentry *z3fold_do_mount(struct file_system_type *fs_type,
244 				int flags, const char *dev_name, void *data)
245 {
246 	static const struct dentry_operations ops = {
247 		.d_dname = simple_dname,
248 	};
249 
250 	return mount_pseudo(fs_type, "z3fold:", NULL, &ops, 0x33);
251 }
252 
253 static struct file_system_type z3fold_fs = {
254 	.name		= "z3fold",
255 	.mount		= z3fold_do_mount,
256 	.kill_sb	= kill_anon_super,
257 };
258 
259 static struct vfsmount *z3fold_mnt;
260 static int z3fold_mount(void)
261 {
262 	int ret = 0;
263 
264 	z3fold_mnt = kern_mount(&z3fold_fs);
265 	if (IS_ERR(z3fold_mnt))
266 		ret = PTR_ERR(z3fold_mnt);
267 
268 	return ret;
269 }
270 
271 static void z3fold_unmount(void)
272 {
273 	kern_unmount(z3fold_mnt);
274 }
275 
276 static const struct address_space_operations z3fold_aops;
277 static int z3fold_register_migration(struct z3fold_pool *pool)
278 {
279 	pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
280 	if (IS_ERR(pool->inode)) {
281 		pool->inode = NULL;
282 		return 1;
283 	}
284 
285 	pool->inode->i_mapping->private_data = pool;
286 	pool->inode->i_mapping->a_ops = &z3fold_aops;
287 	return 0;
288 }
289 
290 static void z3fold_unregister_migration(struct z3fold_pool *pool)
291 {
292 	if (pool->inode)
293 		iput(pool->inode);
294  }
295 
296 /* Initializes the z3fold header of a newly allocated z3fold page */
297 static struct z3fold_header *init_z3fold_page(struct page *page,
298 					struct z3fold_pool *pool)
299 {
300 	struct z3fold_header *zhdr = page_address(page);
301 	struct z3fold_buddy_slots *slots = alloc_slots(pool);
302 
303 	if (!slots)
304 		return NULL;
305 
306 	INIT_LIST_HEAD(&page->lru);
307 	clear_bit(PAGE_HEADLESS, &page->private);
308 	clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
309 	clear_bit(NEEDS_COMPACTING, &page->private);
310 	clear_bit(PAGE_STALE, &page->private);
311 	clear_bit(PAGE_CLAIMED, &page->private);
312 
313 	spin_lock_init(&zhdr->page_lock);
314 	kref_init(&zhdr->refcount);
315 	zhdr->first_chunks = 0;
316 	zhdr->middle_chunks = 0;
317 	zhdr->last_chunks = 0;
318 	zhdr->first_num = 0;
319 	zhdr->start_middle = 0;
320 	zhdr->cpu = -1;
321 	zhdr->slots = slots;
322 	INIT_LIST_HEAD(&zhdr->buddy);
323 	INIT_WORK(&zhdr->work, compact_page_work);
324 	return zhdr;
325 }
326 
327 /* Resets the struct page fields and frees the page */
328 static void free_z3fold_page(struct page *page, bool headless)
329 {
330 	if (!headless) {
331 		lock_page(page);
332 		__ClearPageMovable(page);
333 		unlock_page(page);
334 	}
335 	ClearPagePrivate(page);
336 	__free_page(page);
337 }
338 
339 /* Lock a z3fold page */
340 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
341 {
342 	spin_lock(&zhdr->page_lock);
343 }
344 
345 /* Try to lock a z3fold page */
346 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
347 {
348 	return spin_trylock(&zhdr->page_lock);
349 }
350 
351 /* Unlock a z3fold page */
352 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
353 {
354 	spin_unlock(&zhdr->page_lock);
355 }
356 
357 /* Helper function to build the index */
358 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
359 {
360 	return (bud + zhdr->first_num) & BUDDY_MASK;
361 }
362 
363 /*
364  * Encodes the handle of a particular buddy within a z3fold page
365  * Pool lock should be held as this function accesses first_num
366  */
367 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
368 {
369 	struct z3fold_buddy_slots *slots;
370 	unsigned long h = (unsigned long)zhdr;
371 	int idx = 0;
372 
373 	/*
374 	 * For a headless page, its handle is its pointer with the extra
375 	 * PAGE_HEADLESS bit set
376 	 */
377 	if (bud == HEADLESS)
378 		return h | (1 << PAGE_HEADLESS);
379 
380 	/* otherwise, return pointer to encoded handle */
381 	idx = __idx(zhdr, bud);
382 	h += idx;
383 	if (bud == LAST)
384 		h |= (zhdr->last_chunks << BUDDY_SHIFT);
385 
386 	slots = zhdr->slots;
387 	slots->slot[idx] = h;
388 	return (unsigned long)&slots->slot[idx];
389 }
390 
391 /* Returns the z3fold page where a given handle is stored */
392 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
393 {
394 	unsigned long addr = h;
395 
396 	if (!(addr & (1 << PAGE_HEADLESS)))
397 		addr = *(unsigned long *)h;
398 
399 	return (struct z3fold_header *)(addr & PAGE_MASK);
400 }
401 
402 /* only for LAST bud, returns zero otherwise */
403 static unsigned short handle_to_chunks(unsigned long handle)
404 {
405 	unsigned long addr = *(unsigned long *)handle;
406 
407 	return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
408 }
409 
410 /*
411  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
412  *  but that doesn't matter. because the masking will result in the
413  *  correct buddy number.
414  */
415 static enum buddy handle_to_buddy(unsigned long handle)
416 {
417 	struct z3fold_header *zhdr;
418 	unsigned long addr;
419 
420 	WARN_ON(handle & (1 << PAGE_HEADLESS));
421 	addr = *(unsigned long *)handle;
422 	zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
423 	return (addr - zhdr->first_num) & BUDDY_MASK;
424 }
425 
426 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
427 {
428 	return slots_to_pool(zhdr->slots);
429 }
430 
431 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
432 {
433 	struct page *page = virt_to_page(zhdr);
434 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
435 
436 	WARN_ON(!list_empty(&zhdr->buddy));
437 	set_bit(PAGE_STALE, &page->private);
438 	clear_bit(NEEDS_COMPACTING, &page->private);
439 	spin_lock(&pool->lock);
440 	if (!list_empty(&page->lru))
441 		list_del_init(&page->lru);
442 	spin_unlock(&pool->lock);
443 	if (locked)
444 		z3fold_page_unlock(zhdr);
445 	spin_lock(&pool->stale_lock);
446 	list_add(&zhdr->buddy, &pool->stale);
447 	queue_work(pool->release_wq, &pool->work);
448 	spin_unlock(&pool->stale_lock);
449 }
450 
451 static void __attribute__((__unused__))
452 			release_z3fold_page(struct kref *ref)
453 {
454 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
455 						refcount);
456 	__release_z3fold_page(zhdr, false);
457 }
458 
459 static void release_z3fold_page_locked(struct kref *ref)
460 {
461 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
462 						refcount);
463 	WARN_ON(z3fold_page_trylock(zhdr));
464 	__release_z3fold_page(zhdr, true);
465 }
466 
467 static void release_z3fold_page_locked_list(struct kref *ref)
468 {
469 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
470 					       refcount);
471 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
472 	spin_lock(&pool->lock);
473 	list_del_init(&zhdr->buddy);
474 	spin_unlock(&pool->lock);
475 
476 	WARN_ON(z3fold_page_trylock(zhdr));
477 	__release_z3fold_page(zhdr, true);
478 }
479 
480 static void free_pages_work(struct work_struct *w)
481 {
482 	struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
483 
484 	spin_lock(&pool->stale_lock);
485 	while (!list_empty(&pool->stale)) {
486 		struct z3fold_header *zhdr = list_first_entry(&pool->stale,
487 						struct z3fold_header, buddy);
488 		struct page *page = virt_to_page(zhdr);
489 
490 		list_del(&zhdr->buddy);
491 		if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
492 			continue;
493 		spin_unlock(&pool->stale_lock);
494 		cancel_work_sync(&zhdr->work);
495 		free_z3fold_page(page, false);
496 		cond_resched();
497 		spin_lock(&pool->stale_lock);
498 	}
499 	spin_unlock(&pool->stale_lock);
500 }
501 
502 /*
503  * Returns the number of free chunks in a z3fold page.
504  * NB: can't be used with HEADLESS pages.
505  */
506 static int num_free_chunks(struct z3fold_header *zhdr)
507 {
508 	int nfree;
509 	/*
510 	 * If there is a middle object, pick up the bigger free space
511 	 * either before or after it. Otherwise just subtract the number
512 	 * of chunks occupied by the first and the last objects.
513 	 */
514 	if (zhdr->middle_chunks != 0) {
515 		int nfree_before = zhdr->first_chunks ?
516 			0 : zhdr->start_middle - ZHDR_CHUNKS;
517 		int nfree_after = zhdr->last_chunks ?
518 			0 : TOTAL_CHUNKS -
519 				(zhdr->start_middle + zhdr->middle_chunks);
520 		nfree = max(nfree_before, nfree_after);
521 	} else
522 		nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
523 	return nfree;
524 }
525 
526 /* Add to the appropriate unbuddied list */
527 static inline void add_to_unbuddied(struct z3fold_pool *pool,
528 				struct z3fold_header *zhdr)
529 {
530 	if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
531 			zhdr->middle_chunks == 0) {
532 		struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
533 
534 		int freechunks = num_free_chunks(zhdr);
535 		spin_lock(&pool->lock);
536 		list_add(&zhdr->buddy, &unbuddied[freechunks]);
537 		spin_unlock(&pool->lock);
538 		zhdr->cpu = smp_processor_id();
539 		put_cpu_ptr(pool->unbuddied);
540 	}
541 }
542 
543 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
544 				unsigned short dst_chunk)
545 {
546 	void *beg = zhdr;
547 	return memmove(beg + (dst_chunk << CHUNK_SHIFT),
548 		       beg + (zhdr->start_middle << CHUNK_SHIFT),
549 		       zhdr->middle_chunks << CHUNK_SHIFT);
550 }
551 
552 #define BIG_CHUNK_GAP	3
553 /* Has to be called with lock held */
554 static int z3fold_compact_page(struct z3fold_header *zhdr)
555 {
556 	struct page *page = virt_to_page(zhdr);
557 
558 	if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
559 		return 0; /* can't move middle chunk, it's used */
560 
561 	if (unlikely(PageIsolated(page)))
562 		return 0;
563 
564 	if (zhdr->middle_chunks == 0)
565 		return 0; /* nothing to compact */
566 
567 	if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
568 		/* move to the beginning */
569 		mchunk_memmove(zhdr, ZHDR_CHUNKS);
570 		zhdr->first_chunks = zhdr->middle_chunks;
571 		zhdr->middle_chunks = 0;
572 		zhdr->start_middle = 0;
573 		zhdr->first_num++;
574 		return 1;
575 	}
576 
577 	/*
578 	 * moving data is expensive, so let's only do that if
579 	 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
580 	 */
581 	if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
582 	    zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
583 			BIG_CHUNK_GAP) {
584 		mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
585 		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
586 		return 1;
587 	} else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
588 		   TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
589 					+ zhdr->middle_chunks) >=
590 			BIG_CHUNK_GAP) {
591 		unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
592 			zhdr->middle_chunks;
593 		mchunk_memmove(zhdr, new_start);
594 		zhdr->start_middle = new_start;
595 		return 1;
596 	}
597 
598 	return 0;
599 }
600 
601 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
602 {
603 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
604 	struct page *page;
605 
606 	page = virt_to_page(zhdr);
607 	if (locked)
608 		WARN_ON(z3fold_page_trylock(zhdr));
609 	else
610 		z3fold_page_lock(zhdr);
611 	if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
612 		z3fold_page_unlock(zhdr);
613 		return;
614 	}
615 	spin_lock(&pool->lock);
616 	list_del_init(&zhdr->buddy);
617 	spin_unlock(&pool->lock);
618 
619 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
620 		atomic64_dec(&pool->pages_nr);
621 		return;
622 	}
623 
624 	if (unlikely(PageIsolated(page) ||
625 		     test_bit(PAGE_STALE, &page->private))) {
626 		z3fold_page_unlock(zhdr);
627 		return;
628 	}
629 
630 	z3fold_compact_page(zhdr);
631 	add_to_unbuddied(pool, zhdr);
632 	z3fold_page_unlock(zhdr);
633 }
634 
635 static void compact_page_work(struct work_struct *w)
636 {
637 	struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
638 						work);
639 
640 	do_compact_page(zhdr, false);
641 }
642 
643 /* returns _locked_ z3fold page header or NULL */
644 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
645 						size_t size, bool can_sleep)
646 {
647 	struct z3fold_header *zhdr = NULL;
648 	struct page *page;
649 	struct list_head *unbuddied;
650 	int chunks = size_to_chunks(size), i;
651 
652 lookup:
653 	/* First, try to find an unbuddied z3fold page. */
654 	unbuddied = get_cpu_ptr(pool->unbuddied);
655 	for_each_unbuddied_list(i, chunks) {
656 		struct list_head *l = &unbuddied[i];
657 
658 		zhdr = list_first_entry_or_null(READ_ONCE(l),
659 					struct z3fold_header, buddy);
660 
661 		if (!zhdr)
662 			continue;
663 
664 		/* Re-check under lock. */
665 		spin_lock(&pool->lock);
666 		l = &unbuddied[i];
667 		if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
668 						struct z3fold_header, buddy)) ||
669 		    !z3fold_page_trylock(zhdr)) {
670 			spin_unlock(&pool->lock);
671 			zhdr = NULL;
672 			put_cpu_ptr(pool->unbuddied);
673 			if (can_sleep)
674 				cond_resched();
675 			goto lookup;
676 		}
677 		list_del_init(&zhdr->buddy);
678 		zhdr->cpu = -1;
679 		spin_unlock(&pool->lock);
680 
681 		page = virt_to_page(zhdr);
682 		if (test_bit(NEEDS_COMPACTING, &page->private)) {
683 			z3fold_page_unlock(zhdr);
684 			zhdr = NULL;
685 			put_cpu_ptr(pool->unbuddied);
686 			if (can_sleep)
687 				cond_resched();
688 			goto lookup;
689 		}
690 
691 		/*
692 		 * this page could not be removed from its unbuddied
693 		 * list while pool lock was held, and then we've taken
694 		 * page lock so kref_put could not be called before
695 		 * we got here, so it's safe to just call kref_get()
696 		 */
697 		kref_get(&zhdr->refcount);
698 		break;
699 	}
700 	put_cpu_ptr(pool->unbuddied);
701 
702 	if (!zhdr) {
703 		int cpu;
704 
705 		/* look for _exact_ match on other cpus' lists */
706 		for_each_online_cpu(cpu) {
707 			struct list_head *l;
708 
709 			unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
710 			spin_lock(&pool->lock);
711 			l = &unbuddied[chunks];
712 
713 			zhdr = list_first_entry_or_null(READ_ONCE(l),
714 						struct z3fold_header, buddy);
715 
716 			if (!zhdr || !z3fold_page_trylock(zhdr)) {
717 				spin_unlock(&pool->lock);
718 				zhdr = NULL;
719 				continue;
720 			}
721 			list_del_init(&zhdr->buddy);
722 			zhdr->cpu = -1;
723 			spin_unlock(&pool->lock);
724 
725 			page = virt_to_page(zhdr);
726 			if (test_bit(NEEDS_COMPACTING, &page->private)) {
727 				z3fold_page_unlock(zhdr);
728 				zhdr = NULL;
729 				if (can_sleep)
730 					cond_resched();
731 				continue;
732 			}
733 			kref_get(&zhdr->refcount);
734 			break;
735 		}
736 	}
737 
738 	return zhdr;
739 }
740 
741 /*
742  * API Functions
743  */
744 
745 /**
746  * z3fold_create_pool() - create a new z3fold pool
747  * @name:	pool name
748  * @gfp:	gfp flags when allocating the z3fold pool structure
749  * @ops:	user-defined operations for the z3fold pool
750  *
751  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
752  * failed.
753  */
754 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
755 		const struct z3fold_ops *ops)
756 {
757 	struct z3fold_pool *pool = NULL;
758 	int i, cpu;
759 
760 	pool = kzalloc(sizeof(struct z3fold_pool), gfp);
761 	if (!pool)
762 		goto out;
763 	pool->c_handle = kmem_cache_create("z3fold_handle",
764 				sizeof(struct z3fold_buddy_slots),
765 				SLOTS_ALIGN, 0, NULL);
766 	if (!pool->c_handle)
767 		goto out_c;
768 	spin_lock_init(&pool->lock);
769 	spin_lock_init(&pool->stale_lock);
770 	pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
771 	if (!pool->unbuddied)
772 		goto out_pool;
773 	for_each_possible_cpu(cpu) {
774 		struct list_head *unbuddied =
775 				per_cpu_ptr(pool->unbuddied, cpu);
776 		for_each_unbuddied_list(i, 0)
777 			INIT_LIST_HEAD(&unbuddied[i]);
778 	}
779 	INIT_LIST_HEAD(&pool->lru);
780 	INIT_LIST_HEAD(&pool->stale);
781 	atomic64_set(&pool->pages_nr, 0);
782 	pool->name = name;
783 	pool->compact_wq = create_singlethread_workqueue(pool->name);
784 	if (!pool->compact_wq)
785 		goto out_unbuddied;
786 	pool->release_wq = create_singlethread_workqueue(pool->name);
787 	if (!pool->release_wq)
788 		goto out_wq;
789 	if (z3fold_register_migration(pool))
790 		goto out_rwq;
791 	INIT_WORK(&pool->work, free_pages_work);
792 	pool->ops = ops;
793 	return pool;
794 
795 out_rwq:
796 	destroy_workqueue(pool->release_wq);
797 out_wq:
798 	destroy_workqueue(pool->compact_wq);
799 out_unbuddied:
800 	free_percpu(pool->unbuddied);
801 out_pool:
802 	kmem_cache_destroy(pool->c_handle);
803 out_c:
804 	kfree(pool);
805 out:
806 	return NULL;
807 }
808 
809 /**
810  * z3fold_destroy_pool() - destroys an existing z3fold pool
811  * @pool:	the z3fold pool to be destroyed
812  *
813  * The pool should be emptied before this function is called.
814  */
815 static void z3fold_destroy_pool(struct z3fold_pool *pool)
816 {
817 	kmem_cache_destroy(pool->c_handle);
818 	z3fold_unregister_migration(pool);
819 	destroy_workqueue(pool->release_wq);
820 	destroy_workqueue(pool->compact_wq);
821 	kfree(pool);
822 }
823 
824 /**
825  * z3fold_alloc() - allocates a region of a given size
826  * @pool:	z3fold pool from which to allocate
827  * @size:	size in bytes of the desired allocation
828  * @gfp:	gfp flags used if the pool needs to grow
829  * @handle:	handle of the new allocation
830  *
831  * This function will attempt to find a free region in the pool large enough to
832  * satisfy the allocation request.  A search of the unbuddied lists is
833  * performed first. If no suitable free region is found, then a new page is
834  * allocated and added to the pool to satisfy the request.
835  *
836  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
837  * as z3fold pool pages.
838  *
839  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
840  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
841  * a new page.
842  */
843 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
844 			unsigned long *handle)
845 {
846 	int chunks = size_to_chunks(size);
847 	struct z3fold_header *zhdr = NULL;
848 	struct page *page = NULL;
849 	enum buddy bud;
850 	bool can_sleep = gfpflags_allow_blocking(gfp);
851 
852 	if (!size || (gfp & __GFP_HIGHMEM))
853 		return -EINVAL;
854 
855 	if (size > PAGE_SIZE)
856 		return -ENOSPC;
857 
858 	if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
859 		bud = HEADLESS;
860 	else {
861 retry:
862 		zhdr = __z3fold_alloc(pool, size, can_sleep);
863 		if (zhdr) {
864 			if (zhdr->first_chunks == 0) {
865 				if (zhdr->middle_chunks != 0 &&
866 				    chunks >= zhdr->start_middle)
867 					bud = LAST;
868 				else
869 					bud = FIRST;
870 			} else if (zhdr->last_chunks == 0)
871 				bud = LAST;
872 			else if (zhdr->middle_chunks == 0)
873 				bud = MIDDLE;
874 			else {
875 				if (kref_put(&zhdr->refcount,
876 					     release_z3fold_page_locked))
877 					atomic64_dec(&pool->pages_nr);
878 				else
879 					z3fold_page_unlock(zhdr);
880 				pr_err("No free chunks in unbuddied\n");
881 				WARN_ON(1);
882 				goto retry;
883 			}
884 			page = virt_to_page(zhdr);
885 			goto found;
886 		}
887 		bud = FIRST;
888 	}
889 
890 	page = NULL;
891 	if (can_sleep) {
892 		spin_lock(&pool->stale_lock);
893 		zhdr = list_first_entry_or_null(&pool->stale,
894 						struct z3fold_header, buddy);
895 		/*
896 		 * Before allocating a page, let's see if we can take one from
897 		 * the stale pages list. cancel_work_sync() can sleep so we
898 		 * limit this case to the contexts where we can sleep
899 		 */
900 		if (zhdr) {
901 			list_del(&zhdr->buddy);
902 			spin_unlock(&pool->stale_lock);
903 			cancel_work_sync(&zhdr->work);
904 			page = virt_to_page(zhdr);
905 		} else {
906 			spin_unlock(&pool->stale_lock);
907 		}
908 	}
909 	if (!page)
910 		page = alloc_page(gfp);
911 
912 	if (!page)
913 		return -ENOMEM;
914 
915 	zhdr = init_z3fold_page(page, pool);
916 	if (!zhdr) {
917 		__free_page(page);
918 		return -ENOMEM;
919 	}
920 	atomic64_inc(&pool->pages_nr);
921 
922 	if (bud == HEADLESS) {
923 		set_bit(PAGE_HEADLESS, &page->private);
924 		goto headless;
925 	}
926 	__SetPageMovable(page, pool->inode->i_mapping);
927 	z3fold_page_lock(zhdr);
928 
929 found:
930 	if (bud == FIRST)
931 		zhdr->first_chunks = chunks;
932 	else if (bud == LAST)
933 		zhdr->last_chunks = chunks;
934 	else {
935 		zhdr->middle_chunks = chunks;
936 		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
937 	}
938 	add_to_unbuddied(pool, zhdr);
939 
940 headless:
941 	spin_lock(&pool->lock);
942 	/* Add/move z3fold page to beginning of LRU */
943 	if (!list_empty(&page->lru))
944 		list_del(&page->lru);
945 
946 	list_add(&page->lru, &pool->lru);
947 
948 	*handle = encode_handle(zhdr, bud);
949 	spin_unlock(&pool->lock);
950 	if (bud != HEADLESS)
951 		z3fold_page_unlock(zhdr);
952 
953 	return 0;
954 }
955 
956 /**
957  * z3fold_free() - frees the allocation associated with the given handle
958  * @pool:	pool in which the allocation resided
959  * @handle:	handle associated with the allocation returned by z3fold_alloc()
960  *
961  * In the case that the z3fold page in which the allocation resides is under
962  * reclaim, as indicated by the PG_reclaim flag being set, this function
963  * only sets the first|last_chunks to 0.  The page is actually freed
964  * once both buddies are evicted (see z3fold_reclaim_page() below).
965  */
966 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
967 {
968 	struct z3fold_header *zhdr;
969 	struct page *page;
970 	enum buddy bud;
971 
972 	zhdr = handle_to_z3fold_header(handle);
973 	page = virt_to_page(zhdr);
974 
975 	if (test_bit(PAGE_HEADLESS, &page->private)) {
976 		/* if a headless page is under reclaim, just leave.
977 		 * NB: we use test_and_set_bit for a reason: if the bit
978 		 * has not been set before, we release this page
979 		 * immediately so we don't care about its value any more.
980 		 */
981 		if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
982 			spin_lock(&pool->lock);
983 			list_del(&page->lru);
984 			spin_unlock(&pool->lock);
985 			free_z3fold_page(page, true);
986 			atomic64_dec(&pool->pages_nr);
987 		}
988 		return;
989 	}
990 
991 	/* Non-headless case */
992 	z3fold_page_lock(zhdr);
993 	bud = handle_to_buddy(handle);
994 
995 	switch (bud) {
996 	case FIRST:
997 		zhdr->first_chunks = 0;
998 		break;
999 	case MIDDLE:
1000 		zhdr->middle_chunks = 0;
1001 		break;
1002 	case LAST:
1003 		zhdr->last_chunks = 0;
1004 		break;
1005 	default:
1006 		pr_err("%s: unknown bud %d\n", __func__, bud);
1007 		WARN_ON(1);
1008 		z3fold_page_unlock(zhdr);
1009 		return;
1010 	}
1011 
1012 	free_handle(handle);
1013 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1014 		atomic64_dec(&pool->pages_nr);
1015 		return;
1016 	}
1017 	if (test_bit(PAGE_CLAIMED, &page->private)) {
1018 		z3fold_page_unlock(zhdr);
1019 		return;
1020 	}
1021 	if (unlikely(PageIsolated(page)) ||
1022 	    test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1023 		z3fold_page_unlock(zhdr);
1024 		return;
1025 	}
1026 	if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1027 		spin_lock(&pool->lock);
1028 		list_del_init(&zhdr->buddy);
1029 		spin_unlock(&pool->lock);
1030 		zhdr->cpu = -1;
1031 		kref_get(&zhdr->refcount);
1032 		do_compact_page(zhdr, true);
1033 		return;
1034 	}
1035 	kref_get(&zhdr->refcount);
1036 	queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1037 	z3fold_page_unlock(zhdr);
1038 }
1039 
1040 /**
1041  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1042  * @pool:	pool from which a page will attempt to be evicted
1043  * @retries:	number of pages on the LRU list for which eviction will
1044  *		be attempted before failing
1045  *
1046  * z3fold reclaim is different from normal system reclaim in that it is done
1047  * from the bottom, up. This is because only the bottom layer, z3fold, has
1048  * information on how the allocations are organized within each z3fold page.
1049  * This has the potential to create interesting locking situations between
1050  * z3fold and the user, however.
1051  *
1052  * To avoid these, this is how z3fold_reclaim_page() should be called:
1053  *
1054  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1055  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1056  * call the user-defined eviction handler with the pool and handle as
1057  * arguments.
1058  *
1059  * If the handle can not be evicted, the eviction handler should return
1060  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1061  * appropriate list and try the next z3fold page on the LRU up to
1062  * a user defined number of retries.
1063  *
1064  * If the handle is successfully evicted, the eviction handler should
1065  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1066  * contains logic to delay freeing the page if the page is under reclaim,
1067  * as indicated by the setting of the PG_reclaim flag on the underlying page.
1068  *
1069  * If all buddies in the z3fold page are successfully evicted, then the
1070  * z3fold page can be freed.
1071  *
1072  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1073  * no pages to evict or an eviction handler is not registered, -EAGAIN if
1074  * the retry limit was hit.
1075  */
1076 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1077 {
1078 	int i, ret = 0;
1079 	struct z3fold_header *zhdr = NULL;
1080 	struct page *page = NULL;
1081 	struct list_head *pos;
1082 	unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1083 
1084 	spin_lock(&pool->lock);
1085 	if (!pool->ops || !pool->ops->evict || retries == 0) {
1086 		spin_unlock(&pool->lock);
1087 		return -EINVAL;
1088 	}
1089 	for (i = 0; i < retries; i++) {
1090 		if (list_empty(&pool->lru)) {
1091 			spin_unlock(&pool->lock);
1092 			return -EINVAL;
1093 		}
1094 		list_for_each_prev(pos, &pool->lru) {
1095 			page = list_entry(pos, struct page, lru);
1096 
1097 			/* this bit could have been set by free, in which case
1098 			 * we pass over to the next page in the pool.
1099 			 */
1100 			if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1101 				continue;
1102 
1103 			if (unlikely(PageIsolated(page)))
1104 				continue;
1105 			if (test_bit(PAGE_HEADLESS, &page->private))
1106 				break;
1107 
1108 			zhdr = page_address(page);
1109 			if (!z3fold_page_trylock(zhdr)) {
1110 				zhdr = NULL;
1111 				continue; /* can't evict at this point */
1112 			}
1113 			kref_get(&zhdr->refcount);
1114 			list_del_init(&zhdr->buddy);
1115 			zhdr->cpu = -1;
1116 			break;
1117 		}
1118 
1119 		if (!zhdr)
1120 			break;
1121 
1122 		list_del_init(&page->lru);
1123 		spin_unlock(&pool->lock);
1124 
1125 		if (!test_bit(PAGE_HEADLESS, &page->private)) {
1126 			/*
1127 			 * We need encode the handles before unlocking, since
1128 			 * we can race with free that will set
1129 			 * (first|last)_chunks to 0
1130 			 */
1131 			first_handle = 0;
1132 			last_handle = 0;
1133 			middle_handle = 0;
1134 			if (zhdr->first_chunks)
1135 				first_handle = encode_handle(zhdr, FIRST);
1136 			if (zhdr->middle_chunks)
1137 				middle_handle = encode_handle(zhdr, MIDDLE);
1138 			if (zhdr->last_chunks)
1139 				last_handle = encode_handle(zhdr, LAST);
1140 			/*
1141 			 * it's safe to unlock here because we hold a
1142 			 * reference to this page
1143 			 */
1144 			z3fold_page_unlock(zhdr);
1145 		} else {
1146 			first_handle = encode_handle(zhdr, HEADLESS);
1147 			last_handle = middle_handle = 0;
1148 		}
1149 
1150 		/* Issue the eviction callback(s) */
1151 		if (middle_handle) {
1152 			ret = pool->ops->evict(pool, middle_handle);
1153 			if (ret)
1154 				goto next;
1155 		}
1156 		if (first_handle) {
1157 			ret = pool->ops->evict(pool, first_handle);
1158 			if (ret)
1159 				goto next;
1160 		}
1161 		if (last_handle) {
1162 			ret = pool->ops->evict(pool, last_handle);
1163 			if (ret)
1164 				goto next;
1165 		}
1166 next:
1167 		if (test_bit(PAGE_HEADLESS, &page->private)) {
1168 			if (ret == 0) {
1169 				free_z3fold_page(page, true);
1170 				atomic64_dec(&pool->pages_nr);
1171 				return 0;
1172 			}
1173 			spin_lock(&pool->lock);
1174 			list_add(&page->lru, &pool->lru);
1175 			spin_unlock(&pool->lock);
1176 		} else {
1177 			z3fold_page_lock(zhdr);
1178 			clear_bit(PAGE_CLAIMED, &page->private);
1179 			if (kref_put(&zhdr->refcount,
1180 					release_z3fold_page_locked)) {
1181 				atomic64_dec(&pool->pages_nr);
1182 				return 0;
1183 			}
1184 			/*
1185 			 * if we are here, the page is still not completely
1186 			 * free. Take the global pool lock then to be able
1187 			 * to add it back to the lru list
1188 			 */
1189 			spin_lock(&pool->lock);
1190 			list_add(&page->lru, &pool->lru);
1191 			spin_unlock(&pool->lock);
1192 			z3fold_page_unlock(zhdr);
1193 		}
1194 
1195 		/* We started off locked to we need to lock the pool back */
1196 		spin_lock(&pool->lock);
1197 	}
1198 	spin_unlock(&pool->lock);
1199 	return -EAGAIN;
1200 }
1201 
1202 /**
1203  * z3fold_map() - maps the allocation associated with the given handle
1204  * @pool:	pool in which the allocation resides
1205  * @handle:	handle associated with the allocation to be mapped
1206  *
1207  * Extracts the buddy number from handle and constructs the pointer to the
1208  * correct starting chunk within the page.
1209  *
1210  * Returns: a pointer to the mapped allocation
1211  */
1212 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1213 {
1214 	struct z3fold_header *zhdr;
1215 	struct page *page;
1216 	void *addr;
1217 	enum buddy buddy;
1218 
1219 	zhdr = handle_to_z3fold_header(handle);
1220 	addr = zhdr;
1221 	page = virt_to_page(zhdr);
1222 
1223 	if (test_bit(PAGE_HEADLESS, &page->private))
1224 		goto out;
1225 
1226 	z3fold_page_lock(zhdr);
1227 	buddy = handle_to_buddy(handle);
1228 	switch (buddy) {
1229 	case FIRST:
1230 		addr += ZHDR_SIZE_ALIGNED;
1231 		break;
1232 	case MIDDLE:
1233 		addr += zhdr->start_middle << CHUNK_SHIFT;
1234 		set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1235 		break;
1236 	case LAST:
1237 		addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1238 		break;
1239 	default:
1240 		pr_err("unknown buddy id %d\n", buddy);
1241 		WARN_ON(1);
1242 		addr = NULL;
1243 		break;
1244 	}
1245 
1246 	if (addr)
1247 		zhdr->mapped_count++;
1248 	z3fold_page_unlock(zhdr);
1249 out:
1250 	return addr;
1251 }
1252 
1253 /**
1254  * z3fold_unmap() - unmaps the allocation associated with the given handle
1255  * @pool:	pool in which the allocation resides
1256  * @handle:	handle associated with the allocation to be unmapped
1257  */
1258 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1259 {
1260 	struct z3fold_header *zhdr;
1261 	struct page *page;
1262 	enum buddy buddy;
1263 
1264 	zhdr = handle_to_z3fold_header(handle);
1265 	page = virt_to_page(zhdr);
1266 
1267 	if (test_bit(PAGE_HEADLESS, &page->private))
1268 		return;
1269 
1270 	z3fold_page_lock(zhdr);
1271 	buddy = handle_to_buddy(handle);
1272 	if (buddy == MIDDLE)
1273 		clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1274 	zhdr->mapped_count--;
1275 	z3fold_page_unlock(zhdr);
1276 }
1277 
1278 /**
1279  * z3fold_get_pool_size() - gets the z3fold pool size in pages
1280  * @pool:	pool whose size is being queried
1281  *
1282  * Returns: size in pages of the given pool.
1283  */
1284 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1285 {
1286 	return atomic64_read(&pool->pages_nr);
1287 }
1288 
1289 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1290 {
1291 	struct z3fold_header *zhdr;
1292 	struct z3fold_pool *pool;
1293 
1294 	VM_BUG_ON_PAGE(!PageMovable(page), page);
1295 	VM_BUG_ON_PAGE(PageIsolated(page), page);
1296 
1297 	if (test_bit(PAGE_HEADLESS, &page->private))
1298 		return false;
1299 
1300 	zhdr = page_address(page);
1301 	z3fold_page_lock(zhdr);
1302 	if (test_bit(NEEDS_COMPACTING, &page->private) ||
1303 	    test_bit(PAGE_STALE, &page->private))
1304 		goto out;
1305 
1306 	pool = zhdr_to_pool(zhdr);
1307 
1308 	if (zhdr->mapped_count == 0) {
1309 		kref_get(&zhdr->refcount);
1310 		if (!list_empty(&zhdr->buddy))
1311 			list_del_init(&zhdr->buddy);
1312 		spin_lock(&pool->lock);
1313 		if (!list_empty(&page->lru))
1314 			list_del(&page->lru);
1315 		spin_unlock(&pool->lock);
1316 		z3fold_page_unlock(zhdr);
1317 		return true;
1318 	}
1319 out:
1320 	z3fold_page_unlock(zhdr);
1321 	return false;
1322 }
1323 
1324 static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1325 			       struct page *page, enum migrate_mode mode)
1326 {
1327 	struct z3fold_header *zhdr, *new_zhdr;
1328 	struct z3fold_pool *pool;
1329 	struct address_space *new_mapping;
1330 
1331 	VM_BUG_ON_PAGE(!PageMovable(page), page);
1332 	VM_BUG_ON_PAGE(!PageIsolated(page), page);
1333 
1334 	zhdr = page_address(page);
1335 	pool = zhdr_to_pool(zhdr);
1336 
1337 	if (!trylock_page(page))
1338 		return -EAGAIN;
1339 
1340 	if (!z3fold_page_trylock(zhdr)) {
1341 		unlock_page(page);
1342 		return -EAGAIN;
1343 	}
1344 	if (zhdr->mapped_count != 0) {
1345 		z3fold_page_unlock(zhdr);
1346 		unlock_page(page);
1347 		return -EBUSY;
1348 	}
1349 	new_zhdr = page_address(newpage);
1350 	memcpy(new_zhdr, zhdr, PAGE_SIZE);
1351 	newpage->private = page->private;
1352 	page->private = 0;
1353 	z3fold_page_unlock(zhdr);
1354 	spin_lock_init(&new_zhdr->page_lock);
1355 	new_mapping = page_mapping(page);
1356 	__ClearPageMovable(page);
1357 	ClearPagePrivate(page);
1358 
1359 	get_page(newpage);
1360 	z3fold_page_lock(new_zhdr);
1361 	if (new_zhdr->first_chunks)
1362 		encode_handle(new_zhdr, FIRST);
1363 	if (new_zhdr->last_chunks)
1364 		encode_handle(new_zhdr, LAST);
1365 	if (new_zhdr->middle_chunks)
1366 		encode_handle(new_zhdr, MIDDLE);
1367 	set_bit(NEEDS_COMPACTING, &newpage->private);
1368 	new_zhdr->cpu = smp_processor_id();
1369 	spin_lock(&pool->lock);
1370 	list_add(&newpage->lru, &pool->lru);
1371 	spin_unlock(&pool->lock);
1372 	__SetPageMovable(newpage, new_mapping);
1373 	z3fold_page_unlock(new_zhdr);
1374 
1375 	queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1376 
1377 	page_mapcount_reset(page);
1378 	unlock_page(page);
1379 	put_page(page);
1380 	return 0;
1381 }
1382 
1383 static void z3fold_page_putback(struct page *page)
1384 {
1385 	struct z3fold_header *zhdr;
1386 	struct z3fold_pool *pool;
1387 
1388 	zhdr = page_address(page);
1389 	pool = zhdr_to_pool(zhdr);
1390 
1391 	z3fold_page_lock(zhdr);
1392 	if (!list_empty(&zhdr->buddy))
1393 		list_del_init(&zhdr->buddy);
1394 	INIT_LIST_HEAD(&page->lru);
1395 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1396 		atomic64_dec(&pool->pages_nr);
1397 		return;
1398 	}
1399 	spin_lock(&pool->lock);
1400 	list_add(&page->lru, &pool->lru);
1401 	spin_unlock(&pool->lock);
1402 	z3fold_page_unlock(zhdr);
1403 }
1404 
1405 static const struct address_space_operations z3fold_aops = {
1406 	.isolate_page = z3fold_page_isolate,
1407 	.migratepage = z3fold_page_migrate,
1408 	.putback_page = z3fold_page_putback,
1409 };
1410 
1411 /*****************
1412  * zpool
1413  ****************/
1414 
1415 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1416 {
1417 	if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1418 		return pool->zpool_ops->evict(pool->zpool, handle);
1419 	else
1420 		return -ENOENT;
1421 }
1422 
1423 static const struct z3fold_ops z3fold_zpool_ops = {
1424 	.evict =	z3fold_zpool_evict
1425 };
1426 
1427 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1428 			       const struct zpool_ops *zpool_ops,
1429 			       struct zpool *zpool)
1430 {
1431 	struct z3fold_pool *pool;
1432 
1433 	pool = z3fold_create_pool(name, gfp,
1434 				zpool_ops ? &z3fold_zpool_ops : NULL);
1435 	if (pool) {
1436 		pool->zpool = zpool;
1437 		pool->zpool_ops = zpool_ops;
1438 	}
1439 	return pool;
1440 }
1441 
1442 static void z3fold_zpool_destroy(void *pool)
1443 {
1444 	z3fold_destroy_pool(pool);
1445 }
1446 
1447 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1448 			unsigned long *handle)
1449 {
1450 	return z3fold_alloc(pool, size, gfp, handle);
1451 }
1452 static void z3fold_zpool_free(void *pool, unsigned long handle)
1453 {
1454 	z3fold_free(pool, handle);
1455 }
1456 
1457 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1458 			unsigned int *reclaimed)
1459 {
1460 	unsigned int total = 0;
1461 	int ret = -EINVAL;
1462 
1463 	while (total < pages) {
1464 		ret = z3fold_reclaim_page(pool, 8);
1465 		if (ret < 0)
1466 			break;
1467 		total++;
1468 	}
1469 
1470 	if (reclaimed)
1471 		*reclaimed = total;
1472 
1473 	return ret;
1474 }
1475 
1476 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1477 			enum zpool_mapmode mm)
1478 {
1479 	return z3fold_map(pool, handle);
1480 }
1481 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1482 {
1483 	z3fold_unmap(pool, handle);
1484 }
1485 
1486 static u64 z3fold_zpool_total_size(void *pool)
1487 {
1488 	return z3fold_get_pool_size(pool) * PAGE_SIZE;
1489 }
1490 
1491 static struct zpool_driver z3fold_zpool_driver = {
1492 	.type =		"z3fold",
1493 	.owner =	THIS_MODULE,
1494 	.create =	z3fold_zpool_create,
1495 	.destroy =	z3fold_zpool_destroy,
1496 	.malloc =	z3fold_zpool_malloc,
1497 	.free =		z3fold_zpool_free,
1498 	.shrink =	z3fold_zpool_shrink,
1499 	.map =		z3fold_zpool_map,
1500 	.unmap =	z3fold_zpool_unmap,
1501 	.total_size =	z3fold_zpool_total_size,
1502 };
1503 
1504 MODULE_ALIAS("zpool-z3fold");
1505 
1506 static int __init init_z3fold(void)
1507 {
1508 	int ret;
1509 
1510 	/* Make sure the z3fold header is not larger than the page size */
1511 	BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1512 	ret = z3fold_mount();
1513 	if (ret)
1514 		return ret;
1515 
1516 	zpool_register_driver(&z3fold_zpool_driver);
1517 
1518 	return 0;
1519 }
1520 
1521 static void __exit exit_z3fold(void)
1522 {
1523 	z3fold_unmount();
1524 	zpool_unregister_driver(&z3fold_zpool_driver);
1525 }
1526 
1527 module_init(init_z3fold);
1528 module_exit(exit_z3fold);
1529 
1530 MODULE_LICENSE("GPL");
1531 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1532 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");
1533