xref: /openbmc/linux/mm/z3fold.c (revision e0f6d1a5)
1 /*
2  * z3fold.c
3  *
4  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5  * Copyright (C) 2016, Sony Mobile Communications Inc.
6  *
7  * This implementation is based on zbud written by Seth Jennings.
8  *
9  * z3fold is an special purpose allocator for storing compressed pages. It
10  * can store up to three compressed pages per page which improves the
11  * compression ratio of zbud while retaining its main concepts (e. g. always
12  * storing an integral number of objects per page) and simplicity.
13  * It still has simple and deterministic reclaim properties that make it
14  * preferable to a higher density approach (with no requirement on integral
15  * number of object per page) when reclaim is used.
16  *
17  * As in zbud, pages are divided into "chunks".  The size of the chunks is
18  * fixed at compile time and is determined by NCHUNKS_ORDER below.
19  *
20  * z3fold doesn't export any API and is meant to be used via zpool API.
21  */
22 
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 
25 #include <linux/atomic.h>
26 #include <linux/sched.h>
27 #include <linux/list.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/preempt.h>
32 #include <linux/workqueue.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/zpool.h>
36 
37 /*****************
38  * Structures
39 *****************/
40 struct z3fold_pool;
41 struct z3fold_ops {
42 	int (*evict)(struct z3fold_pool *pool, unsigned long handle);
43 };
44 
45 enum buddy {
46 	HEADLESS = 0,
47 	FIRST,
48 	MIDDLE,
49 	LAST,
50 	BUDDIES_MAX
51 };
52 
53 /*
54  * struct z3fold_header - z3fold page metadata occupying first chunks of each
55  *			z3fold page, except for HEADLESS pages
56  * @buddy:		links the z3fold page into the relevant list in the
57  *			pool
58  * @page_lock:		per-page lock
59  * @refcount:		reference count for the z3fold page
60  * @work:		work_struct for page layout optimization
61  * @pool:		pointer to the pool which this page belongs to
62  * @cpu:		CPU which this page "belongs" to
63  * @first_chunks:	the size of the first buddy in chunks, 0 if free
64  * @middle_chunks:	the size of the middle buddy in chunks, 0 if free
65  * @last_chunks:	the size of the last buddy in chunks, 0 if free
66  * @first_num:		the starting number (for the first handle)
67  */
68 struct z3fold_header {
69 	struct list_head buddy;
70 	spinlock_t page_lock;
71 	struct kref refcount;
72 	struct work_struct work;
73 	struct z3fold_pool *pool;
74 	short cpu;
75 	unsigned short first_chunks;
76 	unsigned short middle_chunks;
77 	unsigned short last_chunks;
78 	unsigned short start_middle;
79 	unsigned short first_num:2;
80 };
81 
82 /*
83  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
84  * adjusting internal fragmentation.  It also determines the number of
85  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
86  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
87  * in the beginning of an allocated page are occupied by z3fold header, so
88  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
89  * which shows the max number of free chunks in z3fold page, also there will
90  * be 63, or 62, respectively, freelists per pool.
91  */
92 #define NCHUNKS_ORDER	6
93 
94 #define CHUNK_SHIFT	(PAGE_SHIFT - NCHUNKS_ORDER)
95 #define CHUNK_SIZE	(1 << CHUNK_SHIFT)
96 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
97 #define ZHDR_CHUNKS	(ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
98 #define TOTAL_CHUNKS	(PAGE_SIZE >> CHUNK_SHIFT)
99 #define NCHUNKS		((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
100 
101 #define BUDDY_MASK	(0x3)
102 
103 /**
104  * struct z3fold_pool - stores metadata for each z3fold pool
105  * @name:	pool name
106  * @lock:	protects pool unbuddied/lru lists
107  * @stale_lock:	protects pool stale page list
108  * @unbuddied:	per-cpu array of lists tracking z3fold pages that contain 2-
109  *		buddies; the list each z3fold page is added to depends on
110  *		the size of its free region.
111  * @lru:	list tracking the z3fold pages in LRU order by most recently
112  *		added buddy.
113  * @stale:	list of pages marked for freeing
114  * @pages_nr:	number of z3fold pages in the pool.
115  * @ops:	pointer to a structure of user defined operations specified at
116  *		pool creation time.
117  * @compact_wq:	workqueue for page layout background optimization
118  * @release_wq:	workqueue for safe page release
119  * @work:	work_struct for safe page release
120  *
121  * This structure is allocated at pool creation time and maintains metadata
122  * pertaining to a particular z3fold pool.
123  */
124 struct z3fold_pool {
125 	const char *name;
126 	spinlock_t lock;
127 	spinlock_t stale_lock;
128 	struct list_head *unbuddied;
129 	struct list_head lru;
130 	struct list_head stale;
131 	atomic64_t pages_nr;
132 	const struct z3fold_ops *ops;
133 	struct zpool *zpool;
134 	const struct zpool_ops *zpool_ops;
135 	struct workqueue_struct *compact_wq;
136 	struct workqueue_struct *release_wq;
137 	struct work_struct work;
138 };
139 
140 /*
141  * Internal z3fold page flags
142  */
143 enum z3fold_page_flags {
144 	PAGE_HEADLESS = 0,
145 	MIDDLE_CHUNK_MAPPED,
146 	NEEDS_COMPACTING,
147 	PAGE_STALE
148 };
149 
150 /*****************
151  * Helpers
152 *****************/
153 
154 /* Converts an allocation size in bytes to size in z3fold chunks */
155 static int size_to_chunks(size_t size)
156 {
157 	return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
158 }
159 
160 #define for_each_unbuddied_list(_iter, _begin) \
161 	for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
162 
163 static void compact_page_work(struct work_struct *w);
164 
165 /* Initializes the z3fold header of a newly allocated z3fold page */
166 static struct z3fold_header *init_z3fold_page(struct page *page,
167 					struct z3fold_pool *pool)
168 {
169 	struct z3fold_header *zhdr = page_address(page);
170 
171 	INIT_LIST_HEAD(&page->lru);
172 	clear_bit(PAGE_HEADLESS, &page->private);
173 	clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
174 	clear_bit(NEEDS_COMPACTING, &page->private);
175 	clear_bit(PAGE_STALE, &page->private);
176 
177 	spin_lock_init(&zhdr->page_lock);
178 	kref_init(&zhdr->refcount);
179 	zhdr->first_chunks = 0;
180 	zhdr->middle_chunks = 0;
181 	zhdr->last_chunks = 0;
182 	zhdr->first_num = 0;
183 	zhdr->start_middle = 0;
184 	zhdr->cpu = -1;
185 	zhdr->pool = pool;
186 	INIT_LIST_HEAD(&zhdr->buddy);
187 	INIT_WORK(&zhdr->work, compact_page_work);
188 	return zhdr;
189 }
190 
191 /* Resets the struct page fields and frees the page */
192 static void free_z3fold_page(struct page *page)
193 {
194 	__free_page(page);
195 }
196 
197 /* Lock a z3fold page */
198 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
199 {
200 	spin_lock(&zhdr->page_lock);
201 }
202 
203 /* Try to lock a z3fold page */
204 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
205 {
206 	return spin_trylock(&zhdr->page_lock);
207 }
208 
209 /* Unlock a z3fold page */
210 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
211 {
212 	spin_unlock(&zhdr->page_lock);
213 }
214 
215 /*
216  * Encodes the handle of a particular buddy within a z3fold page
217  * Pool lock should be held as this function accesses first_num
218  */
219 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
220 {
221 	unsigned long handle;
222 
223 	handle = (unsigned long)zhdr;
224 	if (bud != HEADLESS)
225 		handle += (bud + zhdr->first_num) & BUDDY_MASK;
226 	return handle;
227 }
228 
229 /* Returns the z3fold page where a given handle is stored */
230 static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
231 {
232 	return (struct z3fold_header *)(handle & PAGE_MASK);
233 }
234 
235 /*
236  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
237  *  but that doesn't matter. because the masking will result in the
238  *  correct buddy number.
239  */
240 static enum buddy handle_to_buddy(unsigned long handle)
241 {
242 	struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
243 	return (handle - zhdr->first_num) & BUDDY_MASK;
244 }
245 
246 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
247 {
248 	struct page *page = virt_to_page(zhdr);
249 	struct z3fold_pool *pool = zhdr->pool;
250 
251 	WARN_ON(!list_empty(&zhdr->buddy));
252 	set_bit(PAGE_STALE, &page->private);
253 	clear_bit(NEEDS_COMPACTING, &page->private);
254 	spin_lock(&pool->lock);
255 	if (!list_empty(&page->lru))
256 		list_del(&page->lru);
257 	spin_unlock(&pool->lock);
258 	if (locked)
259 		z3fold_page_unlock(zhdr);
260 	spin_lock(&pool->stale_lock);
261 	list_add(&zhdr->buddy, &pool->stale);
262 	queue_work(pool->release_wq, &pool->work);
263 	spin_unlock(&pool->stale_lock);
264 }
265 
266 static void __attribute__((__unused__))
267 			release_z3fold_page(struct kref *ref)
268 {
269 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
270 						refcount);
271 	__release_z3fold_page(zhdr, false);
272 }
273 
274 static void release_z3fold_page_locked(struct kref *ref)
275 {
276 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
277 						refcount);
278 	WARN_ON(z3fold_page_trylock(zhdr));
279 	__release_z3fold_page(zhdr, true);
280 }
281 
282 static void release_z3fold_page_locked_list(struct kref *ref)
283 {
284 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
285 					       refcount);
286 	spin_lock(&zhdr->pool->lock);
287 	list_del_init(&zhdr->buddy);
288 	spin_unlock(&zhdr->pool->lock);
289 
290 	WARN_ON(z3fold_page_trylock(zhdr));
291 	__release_z3fold_page(zhdr, true);
292 }
293 
294 static void free_pages_work(struct work_struct *w)
295 {
296 	struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
297 
298 	spin_lock(&pool->stale_lock);
299 	while (!list_empty(&pool->stale)) {
300 		struct z3fold_header *zhdr = list_first_entry(&pool->stale,
301 						struct z3fold_header, buddy);
302 		struct page *page = virt_to_page(zhdr);
303 
304 		list_del(&zhdr->buddy);
305 		if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
306 			continue;
307 		spin_unlock(&pool->stale_lock);
308 		cancel_work_sync(&zhdr->work);
309 		free_z3fold_page(page);
310 		cond_resched();
311 		spin_lock(&pool->stale_lock);
312 	}
313 	spin_unlock(&pool->stale_lock);
314 }
315 
316 /*
317  * Returns the number of free chunks in a z3fold page.
318  * NB: can't be used with HEADLESS pages.
319  */
320 static int num_free_chunks(struct z3fold_header *zhdr)
321 {
322 	int nfree;
323 	/*
324 	 * If there is a middle object, pick up the bigger free space
325 	 * either before or after it. Otherwise just subtract the number
326 	 * of chunks occupied by the first and the last objects.
327 	 */
328 	if (zhdr->middle_chunks != 0) {
329 		int nfree_before = zhdr->first_chunks ?
330 			0 : zhdr->start_middle - ZHDR_CHUNKS;
331 		int nfree_after = zhdr->last_chunks ?
332 			0 : TOTAL_CHUNKS -
333 				(zhdr->start_middle + zhdr->middle_chunks);
334 		nfree = max(nfree_before, nfree_after);
335 	} else
336 		nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
337 	return nfree;
338 }
339 
340 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
341 				unsigned short dst_chunk)
342 {
343 	void *beg = zhdr;
344 	return memmove(beg + (dst_chunk << CHUNK_SHIFT),
345 		       beg + (zhdr->start_middle << CHUNK_SHIFT),
346 		       zhdr->middle_chunks << CHUNK_SHIFT);
347 }
348 
349 #define BIG_CHUNK_GAP	3
350 /* Has to be called with lock held */
351 static int z3fold_compact_page(struct z3fold_header *zhdr)
352 {
353 	struct page *page = virt_to_page(zhdr);
354 
355 	if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
356 		return 0; /* can't move middle chunk, it's used */
357 
358 	if (zhdr->middle_chunks == 0)
359 		return 0; /* nothing to compact */
360 
361 	if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
362 		/* move to the beginning */
363 		mchunk_memmove(zhdr, ZHDR_CHUNKS);
364 		zhdr->first_chunks = zhdr->middle_chunks;
365 		zhdr->middle_chunks = 0;
366 		zhdr->start_middle = 0;
367 		zhdr->first_num++;
368 		return 1;
369 	}
370 
371 	/*
372 	 * moving data is expensive, so let's only do that if
373 	 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
374 	 */
375 	if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
376 	    zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
377 			BIG_CHUNK_GAP) {
378 		mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
379 		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
380 		return 1;
381 	} else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
382 		   TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
383 					+ zhdr->middle_chunks) >=
384 			BIG_CHUNK_GAP) {
385 		unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
386 			zhdr->middle_chunks;
387 		mchunk_memmove(zhdr, new_start);
388 		zhdr->start_middle = new_start;
389 		return 1;
390 	}
391 
392 	return 0;
393 }
394 
395 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
396 {
397 	struct z3fold_pool *pool = zhdr->pool;
398 	struct page *page;
399 	struct list_head *unbuddied;
400 	int fchunks;
401 
402 	page = virt_to_page(zhdr);
403 	if (locked)
404 		WARN_ON(z3fold_page_trylock(zhdr));
405 	else
406 		z3fold_page_lock(zhdr);
407 	if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
408 		z3fold_page_unlock(zhdr);
409 		return;
410 	}
411 	spin_lock(&pool->lock);
412 	list_del_init(&zhdr->buddy);
413 	spin_unlock(&pool->lock);
414 
415 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
416 		atomic64_dec(&pool->pages_nr);
417 		return;
418 	}
419 
420 	z3fold_compact_page(zhdr);
421 	unbuddied = get_cpu_ptr(pool->unbuddied);
422 	fchunks = num_free_chunks(zhdr);
423 	if (fchunks < NCHUNKS &&
424 	    (!zhdr->first_chunks || !zhdr->middle_chunks ||
425 			!zhdr->last_chunks)) {
426 		/* the page's not completely free and it's unbuddied */
427 		spin_lock(&pool->lock);
428 		list_add(&zhdr->buddy, &unbuddied[fchunks]);
429 		spin_unlock(&pool->lock);
430 		zhdr->cpu = smp_processor_id();
431 	}
432 	put_cpu_ptr(pool->unbuddied);
433 	z3fold_page_unlock(zhdr);
434 }
435 
436 static void compact_page_work(struct work_struct *w)
437 {
438 	struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
439 						work);
440 
441 	do_compact_page(zhdr, false);
442 }
443 
444 
445 /*
446  * API Functions
447  */
448 
449 /**
450  * z3fold_create_pool() - create a new z3fold pool
451  * @name:	pool name
452  * @gfp:	gfp flags when allocating the z3fold pool structure
453  * @ops:	user-defined operations for the z3fold pool
454  *
455  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
456  * failed.
457  */
458 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
459 		const struct z3fold_ops *ops)
460 {
461 	struct z3fold_pool *pool = NULL;
462 	int i, cpu;
463 
464 	pool = kzalloc(sizeof(struct z3fold_pool), gfp);
465 	if (!pool)
466 		goto out;
467 	spin_lock_init(&pool->lock);
468 	spin_lock_init(&pool->stale_lock);
469 	pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
470 	if (!pool->unbuddied)
471 		goto out_pool;
472 	for_each_possible_cpu(cpu) {
473 		struct list_head *unbuddied =
474 				per_cpu_ptr(pool->unbuddied, cpu);
475 		for_each_unbuddied_list(i, 0)
476 			INIT_LIST_HEAD(&unbuddied[i]);
477 	}
478 	INIT_LIST_HEAD(&pool->lru);
479 	INIT_LIST_HEAD(&pool->stale);
480 	atomic64_set(&pool->pages_nr, 0);
481 	pool->name = name;
482 	pool->compact_wq = create_singlethread_workqueue(pool->name);
483 	if (!pool->compact_wq)
484 		goto out_unbuddied;
485 	pool->release_wq = create_singlethread_workqueue(pool->name);
486 	if (!pool->release_wq)
487 		goto out_wq;
488 	INIT_WORK(&pool->work, free_pages_work);
489 	pool->ops = ops;
490 	return pool;
491 
492 out_wq:
493 	destroy_workqueue(pool->compact_wq);
494 out_unbuddied:
495 	free_percpu(pool->unbuddied);
496 out_pool:
497 	kfree(pool);
498 out:
499 	return NULL;
500 }
501 
502 /**
503  * z3fold_destroy_pool() - destroys an existing z3fold pool
504  * @pool:	the z3fold pool to be destroyed
505  *
506  * The pool should be emptied before this function is called.
507  */
508 static void z3fold_destroy_pool(struct z3fold_pool *pool)
509 {
510 	destroy_workqueue(pool->release_wq);
511 	destroy_workqueue(pool->compact_wq);
512 	kfree(pool);
513 }
514 
515 /**
516  * z3fold_alloc() - allocates a region of a given size
517  * @pool:	z3fold pool from which to allocate
518  * @size:	size in bytes of the desired allocation
519  * @gfp:	gfp flags used if the pool needs to grow
520  * @handle:	handle of the new allocation
521  *
522  * This function will attempt to find a free region in the pool large enough to
523  * satisfy the allocation request.  A search of the unbuddied lists is
524  * performed first. If no suitable free region is found, then a new page is
525  * allocated and added to the pool to satisfy the request.
526  *
527  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
528  * as z3fold pool pages.
529  *
530  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
531  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
532  * a new page.
533  */
534 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
535 			unsigned long *handle)
536 {
537 	int chunks = 0, i, freechunks;
538 	struct z3fold_header *zhdr = NULL;
539 	struct page *page = NULL;
540 	enum buddy bud;
541 	bool can_sleep = gfpflags_allow_blocking(gfp);
542 
543 	if (!size || (gfp & __GFP_HIGHMEM))
544 		return -EINVAL;
545 
546 	if (size > PAGE_SIZE)
547 		return -ENOSPC;
548 
549 	if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
550 		bud = HEADLESS;
551 	else {
552 		struct list_head *unbuddied;
553 		chunks = size_to_chunks(size);
554 
555 lookup:
556 		/* First, try to find an unbuddied z3fold page. */
557 		unbuddied = get_cpu_ptr(pool->unbuddied);
558 		for_each_unbuddied_list(i, chunks) {
559 			struct list_head *l = &unbuddied[i];
560 
561 			zhdr = list_first_entry_or_null(READ_ONCE(l),
562 						struct z3fold_header, buddy);
563 
564 			if (!zhdr)
565 				continue;
566 
567 			/* Re-check under lock. */
568 			spin_lock(&pool->lock);
569 			l = &unbuddied[i];
570 			if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
571 					struct z3fold_header, buddy)) ||
572 			    !z3fold_page_trylock(zhdr)) {
573 				spin_unlock(&pool->lock);
574 				put_cpu_ptr(pool->unbuddied);
575 				goto lookup;
576 			}
577 			list_del_init(&zhdr->buddy);
578 			zhdr->cpu = -1;
579 			spin_unlock(&pool->lock);
580 
581 			page = virt_to_page(zhdr);
582 			if (test_bit(NEEDS_COMPACTING, &page->private)) {
583 				z3fold_page_unlock(zhdr);
584 				zhdr = NULL;
585 				put_cpu_ptr(pool->unbuddied);
586 				if (can_sleep)
587 					cond_resched();
588 				goto lookup;
589 			}
590 
591 			/*
592 			 * this page could not be removed from its unbuddied
593 			 * list while pool lock was held, and then we've taken
594 			 * page lock so kref_put could not be called before
595 			 * we got here, so it's safe to just call kref_get()
596 			 */
597 			kref_get(&zhdr->refcount);
598 			break;
599 		}
600 		put_cpu_ptr(pool->unbuddied);
601 
602 		if (zhdr) {
603 			if (zhdr->first_chunks == 0) {
604 				if (zhdr->middle_chunks != 0 &&
605 				    chunks >= zhdr->start_middle)
606 					bud = LAST;
607 				else
608 					bud = FIRST;
609 			} else if (zhdr->last_chunks == 0)
610 				bud = LAST;
611 			else if (zhdr->middle_chunks == 0)
612 				bud = MIDDLE;
613 			else {
614 				if (kref_put(&zhdr->refcount,
615 					     release_z3fold_page_locked))
616 					atomic64_dec(&pool->pages_nr);
617 				else
618 					z3fold_page_unlock(zhdr);
619 				pr_err("No free chunks in unbuddied\n");
620 				WARN_ON(1);
621 				goto lookup;
622 			}
623 			goto found;
624 		}
625 		bud = FIRST;
626 	}
627 
628 	page = NULL;
629 	if (can_sleep) {
630 		spin_lock(&pool->stale_lock);
631 		zhdr = list_first_entry_or_null(&pool->stale,
632 						struct z3fold_header, buddy);
633 		/*
634 		 * Before allocating a page, let's see if we can take one from
635 		 * the stale pages list. cancel_work_sync() can sleep so we
636 		 * limit this case to the contexts where we can sleep
637 		 */
638 		if (zhdr) {
639 			list_del(&zhdr->buddy);
640 			spin_unlock(&pool->stale_lock);
641 			cancel_work_sync(&zhdr->work);
642 			page = virt_to_page(zhdr);
643 		} else {
644 			spin_unlock(&pool->stale_lock);
645 		}
646 	}
647 	if (!page)
648 		page = alloc_page(gfp);
649 
650 	if (!page)
651 		return -ENOMEM;
652 
653 	atomic64_inc(&pool->pages_nr);
654 	zhdr = init_z3fold_page(page, pool);
655 
656 	if (bud == HEADLESS) {
657 		set_bit(PAGE_HEADLESS, &page->private);
658 		goto headless;
659 	}
660 	z3fold_page_lock(zhdr);
661 
662 found:
663 	if (bud == FIRST)
664 		zhdr->first_chunks = chunks;
665 	else if (bud == LAST)
666 		zhdr->last_chunks = chunks;
667 	else {
668 		zhdr->middle_chunks = chunks;
669 		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
670 	}
671 
672 	if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
673 			zhdr->middle_chunks == 0) {
674 		struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
675 
676 		/* Add to unbuddied list */
677 		freechunks = num_free_chunks(zhdr);
678 		spin_lock(&pool->lock);
679 		list_add(&zhdr->buddy, &unbuddied[freechunks]);
680 		spin_unlock(&pool->lock);
681 		zhdr->cpu = smp_processor_id();
682 		put_cpu_ptr(pool->unbuddied);
683 	}
684 
685 headless:
686 	spin_lock(&pool->lock);
687 	/* Add/move z3fold page to beginning of LRU */
688 	if (!list_empty(&page->lru))
689 		list_del(&page->lru);
690 
691 	list_add(&page->lru, &pool->lru);
692 
693 	*handle = encode_handle(zhdr, bud);
694 	spin_unlock(&pool->lock);
695 	if (bud != HEADLESS)
696 		z3fold_page_unlock(zhdr);
697 
698 	return 0;
699 }
700 
701 /**
702  * z3fold_free() - frees the allocation associated with the given handle
703  * @pool:	pool in which the allocation resided
704  * @handle:	handle associated with the allocation returned by z3fold_alloc()
705  *
706  * In the case that the z3fold page in which the allocation resides is under
707  * reclaim, as indicated by the PG_reclaim flag being set, this function
708  * only sets the first|last_chunks to 0.  The page is actually freed
709  * once both buddies are evicted (see z3fold_reclaim_page() below).
710  */
711 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
712 {
713 	struct z3fold_header *zhdr;
714 	struct page *page;
715 	enum buddy bud;
716 
717 	zhdr = handle_to_z3fold_header(handle);
718 	page = virt_to_page(zhdr);
719 
720 	if (test_bit(PAGE_HEADLESS, &page->private)) {
721 		/* HEADLESS page stored */
722 		bud = HEADLESS;
723 	} else {
724 		z3fold_page_lock(zhdr);
725 		bud = handle_to_buddy(handle);
726 
727 		switch (bud) {
728 		case FIRST:
729 			zhdr->first_chunks = 0;
730 			break;
731 		case MIDDLE:
732 			zhdr->middle_chunks = 0;
733 			zhdr->start_middle = 0;
734 			break;
735 		case LAST:
736 			zhdr->last_chunks = 0;
737 			break;
738 		default:
739 			pr_err("%s: unknown bud %d\n", __func__, bud);
740 			WARN_ON(1);
741 			z3fold_page_unlock(zhdr);
742 			return;
743 		}
744 	}
745 
746 	if (bud == HEADLESS) {
747 		spin_lock(&pool->lock);
748 		list_del(&page->lru);
749 		spin_unlock(&pool->lock);
750 		free_z3fold_page(page);
751 		atomic64_dec(&pool->pages_nr);
752 		return;
753 	}
754 
755 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
756 		atomic64_dec(&pool->pages_nr);
757 		return;
758 	}
759 	if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
760 		z3fold_page_unlock(zhdr);
761 		return;
762 	}
763 	if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
764 		spin_lock(&pool->lock);
765 		list_del_init(&zhdr->buddy);
766 		spin_unlock(&pool->lock);
767 		zhdr->cpu = -1;
768 		kref_get(&zhdr->refcount);
769 		do_compact_page(zhdr, true);
770 		return;
771 	}
772 	kref_get(&zhdr->refcount);
773 	queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
774 	z3fold_page_unlock(zhdr);
775 }
776 
777 /**
778  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
779  * @pool:	pool from which a page will attempt to be evicted
780  * @retries:	number of pages on the LRU list for which eviction will
781  *		be attempted before failing
782  *
783  * z3fold reclaim is different from normal system reclaim in that it is done
784  * from the bottom, up. This is because only the bottom layer, z3fold, has
785  * information on how the allocations are organized within each z3fold page.
786  * This has the potential to create interesting locking situations between
787  * z3fold and the user, however.
788  *
789  * To avoid these, this is how z3fold_reclaim_page() should be called:
790  *
791  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
792  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
793  * call the user-defined eviction handler with the pool and handle as
794  * arguments.
795  *
796  * If the handle can not be evicted, the eviction handler should return
797  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
798  * appropriate list and try the next z3fold page on the LRU up to
799  * a user defined number of retries.
800  *
801  * If the handle is successfully evicted, the eviction handler should
802  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
803  * contains logic to delay freeing the page if the page is under reclaim,
804  * as indicated by the setting of the PG_reclaim flag on the underlying page.
805  *
806  * If all buddies in the z3fold page are successfully evicted, then the
807  * z3fold page can be freed.
808  *
809  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
810  * no pages to evict or an eviction handler is not registered, -EAGAIN if
811  * the retry limit was hit.
812  */
813 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
814 {
815 	int i, ret = 0;
816 	struct z3fold_header *zhdr = NULL;
817 	struct page *page = NULL;
818 	struct list_head *pos;
819 	unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
820 
821 	spin_lock(&pool->lock);
822 	if (!pool->ops || !pool->ops->evict || retries == 0) {
823 		spin_unlock(&pool->lock);
824 		return -EINVAL;
825 	}
826 	for (i = 0; i < retries; i++) {
827 		if (list_empty(&pool->lru)) {
828 			spin_unlock(&pool->lock);
829 			return -EINVAL;
830 		}
831 		list_for_each_prev(pos, &pool->lru) {
832 			page = list_entry(pos, struct page, lru);
833 			if (test_bit(PAGE_HEADLESS, &page->private))
834 				/* candidate found */
835 				break;
836 
837 			zhdr = page_address(page);
838 			if (!z3fold_page_trylock(zhdr))
839 				continue; /* can't evict at this point */
840 			kref_get(&zhdr->refcount);
841 			list_del_init(&zhdr->buddy);
842 			zhdr->cpu = -1;
843 		}
844 
845 		list_del_init(&page->lru);
846 		spin_unlock(&pool->lock);
847 
848 		if (!test_bit(PAGE_HEADLESS, &page->private)) {
849 			/*
850 			 * We need encode the handles before unlocking, since
851 			 * we can race with free that will set
852 			 * (first|last)_chunks to 0
853 			 */
854 			first_handle = 0;
855 			last_handle = 0;
856 			middle_handle = 0;
857 			if (zhdr->first_chunks)
858 				first_handle = encode_handle(zhdr, FIRST);
859 			if (zhdr->middle_chunks)
860 				middle_handle = encode_handle(zhdr, MIDDLE);
861 			if (zhdr->last_chunks)
862 				last_handle = encode_handle(zhdr, LAST);
863 			/*
864 			 * it's safe to unlock here because we hold a
865 			 * reference to this page
866 			 */
867 			z3fold_page_unlock(zhdr);
868 		} else {
869 			first_handle = encode_handle(zhdr, HEADLESS);
870 			last_handle = middle_handle = 0;
871 		}
872 
873 		/* Issue the eviction callback(s) */
874 		if (middle_handle) {
875 			ret = pool->ops->evict(pool, middle_handle);
876 			if (ret)
877 				goto next;
878 		}
879 		if (first_handle) {
880 			ret = pool->ops->evict(pool, first_handle);
881 			if (ret)
882 				goto next;
883 		}
884 		if (last_handle) {
885 			ret = pool->ops->evict(pool, last_handle);
886 			if (ret)
887 				goto next;
888 		}
889 next:
890 		spin_lock(&pool->lock);
891 		if (test_bit(PAGE_HEADLESS, &page->private)) {
892 			if (ret == 0) {
893 				spin_unlock(&pool->lock);
894 				free_z3fold_page(page);
895 				return 0;
896 			}
897 		} else if (kref_put(&zhdr->refcount, release_z3fold_page)) {
898 			atomic64_dec(&pool->pages_nr);
899 			spin_unlock(&pool->lock);
900 			return 0;
901 		}
902 
903 		/*
904 		 * Add to the beginning of LRU.
905 		 * Pool lock has to be kept here to ensure the page has
906 		 * not already been released
907 		 */
908 		list_add(&page->lru, &pool->lru);
909 	}
910 	spin_unlock(&pool->lock);
911 	return -EAGAIN;
912 }
913 
914 /**
915  * z3fold_map() - maps the allocation associated with the given handle
916  * @pool:	pool in which the allocation resides
917  * @handle:	handle associated with the allocation to be mapped
918  *
919  * Extracts the buddy number from handle and constructs the pointer to the
920  * correct starting chunk within the page.
921  *
922  * Returns: a pointer to the mapped allocation
923  */
924 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
925 {
926 	struct z3fold_header *zhdr;
927 	struct page *page;
928 	void *addr;
929 	enum buddy buddy;
930 
931 	zhdr = handle_to_z3fold_header(handle);
932 	addr = zhdr;
933 	page = virt_to_page(zhdr);
934 
935 	if (test_bit(PAGE_HEADLESS, &page->private))
936 		goto out;
937 
938 	z3fold_page_lock(zhdr);
939 	buddy = handle_to_buddy(handle);
940 	switch (buddy) {
941 	case FIRST:
942 		addr += ZHDR_SIZE_ALIGNED;
943 		break;
944 	case MIDDLE:
945 		addr += zhdr->start_middle << CHUNK_SHIFT;
946 		set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
947 		break;
948 	case LAST:
949 		addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
950 		break;
951 	default:
952 		pr_err("unknown buddy id %d\n", buddy);
953 		WARN_ON(1);
954 		addr = NULL;
955 		break;
956 	}
957 
958 	z3fold_page_unlock(zhdr);
959 out:
960 	return addr;
961 }
962 
963 /**
964  * z3fold_unmap() - unmaps the allocation associated with the given handle
965  * @pool:	pool in which the allocation resides
966  * @handle:	handle associated with the allocation to be unmapped
967  */
968 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
969 {
970 	struct z3fold_header *zhdr;
971 	struct page *page;
972 	enum buddy buddy;
973 
974 	zhdr = handle_to_z3fold_header(handle);
975 	page = virt_to_page(zhdr);
976 
977 	if (test_bit(PAGE_HEADLESS, &page->private))
978 		return;
979 
980 	z3fold_page_lock(zhdr);
981 	buddy = handle_to_buddy(handle);
982 	if (buddy == MIDDLE)
983 		clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
984 	z3fold_page_unlock(zhdr);
985 }
986 
987 /**
988  * z3fold_get_pool_size() - gets the z3fold pool size in pages
989  * @pool:	pool whose size is being queried
990  *
991  * Returns: size in pages of the given pool.
992  */
993 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
994 {
995 	return atomic64_read(&pool->pages_nr);
996 }
997 
998 /*****************
999  * zpool
1000  ****************/
1001 
1002 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1003 {
1004 	if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1005 		return pool->zpool_ops->evict(pool->zpool, handle);
1006 	else
1007 		return -ENOENT;
1008 }
1009 
1010 static const struct z3fold_ops z3fold_zpool_ops = {
1011 	.evict =	z3fold_zpool_evict
1012 };
1013 
1014 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1015 			       const struct zpool_ops *zpool_ops,
1016 			       struct zpool *zpool)
1017 {
1018 	struct z3fold_pool *pool;
1019 
1020 	pool = z3fold_create_pool(name, gfp,
1021 				zpool_ops ? &z3fold_zpool_ops : NULL);
1022 	if (pool) {
1023 		pool->zpool = zpool;
1024 		pool->zpool_ops = zpool_ops;
1025 	}
1026 	return pool;
1027 }
1028 
1029 static void z3fold_zpool_destroy(void *pool)
1030 {
1031 	z3fold_destroy_pool(pool);
1032 }
1033 
1034 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1035 			unsigned long *handle)
1036 {
1037 	return z3fold_alloc(pool, size, gfp, handle);
1038 }
1039 static void z3fold_zpool_free(void *pool, unsigned long handle)
1040 {
1041 	z3fold_free(pool, handle);
1042 }
1043 
1044 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1045 			unsigned int *reclaimed)
1046 {
1047 	unsigned int total = 0;
1048 	int ret = -EINVAL;
1049 
1050 	while (total < pages) {
1051 		ret = z3fold_reclaim_page(pool, 8);
1052 		if (ret < 0)
1053 			break;
1054 		total++;
1055 	}
1056 
1057 	if (reclaimed)
1058 		*reclaimed = total;
1059 
1060 	return ret;
1061 }
1062 
1063 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1064 			enum zpool_mapmode mm)
1065 {
1066 	return z3fold_map(pool, handle);
1067 }
1068 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1069 {
1070 	z3fold_unmap(pool, handle);
1071 }
1072 
1073 static u64 z3fold_zpool_total_size(void *pool)
1074 {
1075 	return z3fold_get_pool_size(pool) * PAGE_SIZE;
1076 }
1077 
1078 static struct zpool_driver z3fold_zpool_driver = {
1079 	.type =		"z3fold",
1080 	.owner =	THIS_MODULE,
1081 	.create =	z3fold_zpool_create,
1082 	.destroy =	z3fold_zpool_destroy,
1083 	.malloc =	z3fold_zpool_malloc,
1084 	.free =		z3fold_zpool_free,
1085 	.shrink =	z3fold_zpool_shrink,
1086 	.map =		z3fold_zpool_map,
1087 	.unmap =	z3fold_zpool_unmap,
1088 	.total_size =	z3fold_zpool_total_size,
1089 };
1090 
1091 MODULE_ALIAS("zpool-z3fold");
1092 
1093 static int __init init_z3fold(void)
1094 {
1095 	/* Make sure the z3fold header is not larger than the page size */
1096 	BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1097 	zpool_register_driver(&z3fold_zpool_driver);
1098 
1099 	return 0;
1100 }
1101 
1102 static void __exit exit_z3fold(void)
1103 {
1104 	zpool_unregister_driver(&z3fold_zpool_driver);
1105 }
1106 
1107 module_init(init_z3fold);
1108 module_exit(exit_z3fold);
1109 
1110 MODULE_LICENSE("GPL");
1111 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1112 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");
1113