xref: /openbmc/linux/mm/zbud.c (revision 588b48ca)
1 /*
2  * zbud.c
3  *
4  * Copyright (C) 2013, Seth Jennings, IBM
5  *
6  * Concepts based on zcache internal zbud allocator by Dan Magenheimer.
7  *
8  * zbud is an special purpose allocator for storing compressed pages.  Contrary
9  * to what its name may suggest, zbud is not a buddy allocator, but rather an
10  * allocator that "buddies" two compressed pages together in a single memory
11  * page.
12  *
13  * While this design limits storage density, it has simple and deterministic
14  * reclaim properties that make it preferable to a higher density approach when
15  * reclaim will be used.
16  *
17  * zbud works by storing compressed pages, or "zpages", together in pairs in a
18  * single memory page called a "zbud page".  The first buddy is "left
19  * justified" at the beginning of the zbud page, and the last buddy is "right
20  * justified" at the end of the zbud page.  The benefit is that if either
21  * buddy is freed, the freed buddy space, coalesced with whatever slack space
22  * that existed between the buddies, results in the largest possible free region
23  * within the zbud page.
24  *
25  * zbud also provides an attractive lower bound on density. The ratio of zpages
26  * to zbud pages can not be less than 1.  This ensures that zbud can never "do
27  * harm" by using more pages to store zpages than the uncompressed zpages would
28  * have used on their own.
29  *
30  * zbud pages are divided into "chunks".  The size of the chunks is fixed at
31  * compile time and determined by NCHUNKS_ORDER below.  Dividing zbud pages
32  * into chunks allows organizing unbuddied zbud pages into a manageable number
33  * of unbuddied lists according to the number of free chunks available in the
34  * zbud page.
35  *
36  * The zbud API differs from that of conventional allocators in that the
37  * allocation function, zbud_alloc(), returns an opaque handle to the user,
38  * not a dereferenceable pointer.  The user must map the handle using
39  * zbud_map() in order to get a usable pointer by which to access the
40  * allocation data and unmap the handle with zbud_unmap() when operations
41  * on the allocation data are complete.
42  */
43 
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 
46 #include <linux/atomic.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/module.h>
50 #include <linux/preempt.h>
51 #include <linux/slab.h>
52 #include <linux/spinlock.h>
53 #include <linux/zbud.h>
54 #include <linux/zpool.h>
55 
56 /*****************
57  * Structures
58 *****************/
59 /*
60  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
61  * adjusting internal fragmentation.  It also determines the number of
62  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
63  * allocation granularity will be in chunks of size PAGE_SIZE/64, and there
64  * will be 64 freelists per pool.
65  */
66 #define NCHUNKS_ORDER	6
67 
68 #define CHUNK_SHIFT	(PAGE_SHIFT - NCHUNKS_ORDER)
69 #define CHUNK_SIZE	(1 << CHUNK_SHIFT)
70 #define NCHUNKS		(PAGE_SIZE >> CHUNK_SHIFT)
71 #define ZHDR_SIZE_ALIGNED CHUNK_SIZE
72 
73 /**
74  * struct zbud_pool - stores metadata for each zbud pool
75  * @lock:	protects all pool fields and first|last_chunk fields of any
76  *		zbud page in the pool
77  * @unbuddied:	array of lists tracking zbud pages that only contain one buddy;
78  *		the lists each zbud page is added to depends on the size of
79  *		its free region.
80  * @buddied:	list tracking the zbud pages that contain two buddies;
81  *		these zbud pages are full
82  * @lru:	list tracking the zbud pages in LRU order by most recently
83  *		added buddy.
84  * @pages_nr:	number of zbud pages in the pool.
85  * @ops:	pointer to a structure of user defined operations specified at
86  *		pool creation time.
87  *
88  * This structure is allocated at pool creation time and maintains metadata
89  * pertaining to a particular zbud pool.
90  */
91 struct zbud_pool {
92 	spinlock_t lock;
93 	struct list_head unbuddied[NCHUNKS];
94 	struct list_head buddied;
95 	struct list_head lru;
96 	u64 pages_nr;
97 	struct zbud_ops *ops;
98 };
99 
100 /*
101  * struct zbud_header - zbud page metadata occupying the first chunk of each
102  *			zbud page.
103  * @buddy:	links the zbud page into the unbuddied/buddied lists in the pool
104  * @lru:	links the zbud page into the lru list in the pool
105  * @first_chunks:	the size of the first buddy in chunks, 0 if free
106  * @last_chunks:	the size of the last buddy in chunks, 0 if free
107  */
108 struct zbud_header {
109 	struct list_head buddy;
110 	struct list_head lru;
111 	unsigned int first_chunks;
112 	unsigned int last_chunks;
113 	bool under_reclaim;
114 };
115 
116 /*****************
117  * zpool
118  ****************/
119 
120 #ifdef CONFIG_ZPOOL
121 
122 static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle)
123 {
124 	return zpool_evict(pool, handle);
125 }
126 
127 static struct zbud_ops zbud_zpool_ops = {
128 	.evict =	zbud_zpool_evict
129 };
130 
131 static void *zbud_zpool_create(gfp_t gfp, struct zpool_ops *zpool_ops)
132 {
133 	return zbud_create_pool(gfp, &zbud_zpool_ops);
134 }
135 
136 static void zbud_zpool_destroy(void *pool)
137 {
138 	zbud_destroy_pool(pool);
139 }
140 
141 static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp,
142 			unsigned long *handle)
143 {
144 	return zbud_alloc(pool, size, gfp, handle);
145 }
146 static void zbud_zpool_free(void *pool, unsigned long handle)
147 {
148 	zbud_free(pool, handle);
149 }
150 
151 static int zbud_zpool_shrink(void *pool, unsigned int pages,
152 			unsigned int *reclaimed)
153 {
154 	unsigned int total = 0;
155 	int ret = -EINVAL;
156 
157 	while (total < pages) {
158 		ret = zbud_reclaim_page(pool, 8);
159 		if (ret < 0)
160 			break;
161 		total++;
162 	}
163 
164 	if (reclaimed)
165 		*reclaimed = total;
166 
167 	return ret;
168 }
169 
170 static void *zbud_zpool_map(void *pool, unsigned long handle,
171 			enum zpool_mapmode mm)
172 {
173 	return zbud_map(pool, handle);
174 }
175 static void zbud_zpool_unmap(void *pool, unsigned long handle)
176 {
177 	zbud_unmap(pool, handle);
178 }
179 
180 static u64 zbud_zpool_total_size(void *pool)
181 {
182 	return zbud_get_pool_size(pool) * PAGE_SIZE;
183 }
184 
185 static struct zpool_driver zbud_zpool_driver = {
186 	.type =		"zbud",
187 	.owner =	THIS_MODULE,
188 	.create =	zbud_zpool_create,
189 	.destroy =	zbud_zpool_destroy,
190 	.malloc =	zbud_zpool_malloc,
191 	.free =		zbud_zpool_free,
192 	.shrink =	zbud_zpool_shrink,
193 	.map =		zbud_zpool_map,
194 	.unmap =	zbud_zpool_unmap,
195 	.total_size =	zbud_zpool_total_size,
196 };
197 
198 #endif /* CONFIG_ZPOOL */
199 
200 /*****************
201  * Helpers
202 *****************/
203 /* Just to make the code easier to read */
204 enum buddy {
205 	FIRST,
206 	LAST
207 };
208 
209 /* Converts an allocation size in bytes to size in zbud chunks */
210 static int size_to_chunks(size_t size)
211 {
212 	return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
213 }
214 
215 #define for_each_unbuddied_list(_iter, _begin) \
216 	for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
217 
218 /* Initializes the zbud header of a newly allocated zbud page */
219 static struct zbud_header *init_zbud_page(struct page *page)
220 {
221 	struct zbud_header *zhdr = page_address(page);
222 	zhdr->first_chunks = 0;
223 	zhdr->last_chunks = 0;
224 	INIT_LIST_HEAD(&zhdr->buddy);
225 	INIT_LIST_HEAD(&zhdr->lru);
226 	zhdr->under_reclaim = 0;
227 	return zhdr;
228 }
229 
230 /* Resets the struct page fields and frees the page */
231 static void free_zbud_page(struct zbud_header *zhdr)
232 {
233 	__free_page(virt_to_page(zhdr));
234 }
235 
236 /*
237  * Encodes the handle of a particular buddy within a zbud page
238  * Pool lock should be held as this function accesses first|last_chunks
239  */
240 static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud)
241 {
242 	unsigned long handle;
243 
244 	/*
245 	 * For now, the encoded handle is actually just the pointer to the data
246 	 * but this might not always be the case.  A little information hiding.
247 	 * Add CHUNK_SIZE to the handle if it is the first allocation to jump
248 	 * over the zbud header in the first chunk.
249 	 */
250 	handle = (unsigned long)zhdr;
251 	if (bud == FIRST)
252 		/* skip over zbud header */
253 		handle += ZHDR_SIZE_ALIGNED;
254 	else /* bud == LAST */
255 		handle += PAGE_SIZE - (zhdr->last_chunks  << CHUNK_SHIFT);
256 	return handle;
257 }
258 
259 /* Returns the zbud page where a given handle is stored */
260 static struct zbud_header *handle_to_zbud_header(unsigned long handle)
261 {
262 	return (struct zbud_header *)(handle & PAGE_MASK);
263 }
264 
265 /* Returns the number of free chunks in a zbud page */
266 static int num_free_chunks(struct zbud_header *zhdr)
267 {
268 	/*
269 	 * Rather than branch for different situations, just use the fact that
270 	 * free buddies have a length of zero to simplify everything. -1 at the
271 	 * end for the zbud header.
272 	 */
273 	return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks - 1;
274 }
275 
276 /*****************
277  * API Functions
278 *****************/
279 /**
280  * zbud_create_pool() - create a new zbud pool
281  * @gfp:	gfp flags when allocating the zbud pool structure
282  * @ops:	user-defined operations for the zbud pool
283  *
284  * Return: pointer to the new zbud pool or NULL if the metadata allocation
285  * failed.
286  */
287 struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops)
288 {
289 	struct zbud_pool *pool;
290 	int i;
291 
292 	pool = kmalloc(sizeof(struct zbud_pool), gfp);
293 	if (!pool)
294 		return NULL;
295 	spin_lock_init(&pool->lock);
296 	for_each_unbuddied_list(i, 0)
297 		INIT_LIST_HEAD(&pool->unbuddied[i]);
298 	INIT_LIST_HEAD(&pool->buddied);
299 	INIT_LIST_HEAD(&pool->lru);
300 	pool->pages_nr = 0;
301 	pool->ops = ops;
302 	return pool;
303 }
304 
305 /**
306  * zbud_destroy_pool() - destroys an existing zbud pool
307  * @pool:	the zbud pool to be destroyed
308  *
309  * The pool should be emptied before this function is called.
310  */
311 void zbud_destroy_pool(struct zbud_pool *pool)
312 {
313 	kfree(pool);
314 }
315 
316 /**
317  * zbud_alloc() - allocates a region of a given size
318  * @pool:	zbud pool from which to allocate
319  * @size:	size in bytes of the desired allocation
320  * @gfp:	gfp flags used if the pool needs to grow
321  * @handle:	handle of the new allocation
322  *
323  * This function will attempt to find a free region in the pool large enough to
324  * satisfy the allocation request.  A search of the unbuddied lists is
325  * performed first. If no suitable free region is found, then a new page is
326  * allocated and added to the pool to satisfy the request.
327  *
328  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
329  * as zbud pool pages.
330  *
331  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
332  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
333  * a new page.
334  */
335 int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
336 			unsigned long *handle)
337 {
338 	int chunks, i, freechunks;
339 	struct zbud_header *zhdr = NULL;
340 	enum buddy bud;
341 	struct page *page;
342 
343 	if (!size || (gfp & __GFP_HIGHMEM))
344 		return -EINVAL;
345 	if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
346 		return -ENOSPC;
347 	chunks = size_to_chunks(size);
348 	spin_lock(&pool->lock);
349 
350 	/* First, try to find an unbuddied zbud page. */
351 	zhdr = NULL;
352 	for_each_unbuddied_list(i, chunks) {
353 		if (!list_empty(&pool->unbuddied[i])) {
354 			zhdr = list_first_entry(&pool->unbuddied[i],
355 					struct zbud_header, buddy);
356 			list_del(&zhdr->buddy);
357 			if (zhdr->first_chunks == 0)
358 				bud = FIRST;
359 			else
360 				bud = LAST;
361 			goto found;
362 		}
363 	}
364 
365 	/* Couldn't find unbuddied zbud page, create new one */
366 	spin_unlock(&pool->lock);
367 	page = alloc_page(gfp);
368 	if (!page)
369 		return -ENOMEM;
370 	spin_lock(&pool->lock);
371 	pool->pages_nr++;
372 	zhdr = init_zbud_page(page);
373 	bud = FIRST;
374 
375 found:
376 	if (bud == FIRST)
377 		zhdr->first_chunks = chunks;
378 	else
379 		zhdr->last_chunks = chunks;
380 
381 	if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) {
382 		/* Add to unbuddied list */
383 		freechunks = num_free_chunks(zhdr);
384 		list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
385 	} else {
386 		/* Add to buddied list */
387 		list_add(&zhdr->buddy, &pool->buddied);
388 	}
389 
390 	/* Add/move zbud page to beginning of LRU */
391 	if (!list_empty(&zhdr->lru))
392 		list_del(&zhdr->lru);
393 	list_add(&zhdr->lru, &pool->lru);
394 
395 	*handle = encode_handle(zhdr, bud);
396 	spin_unlock(&pool->lock);
397 
398 	return 0;
399 }
400 
401 /**
402  * zbud_free() - frees the allocation associated with the given handle
403  * @pool:	pool in which the allocation resided
404  * @handle:	handle associated with the allocation returned by zbud_alloc()
405  *
406  * In the case that the zbud page in which the allocation resides is under
407  * reclaim, as indicated by the PG_reclaim flag being set, this function
408  * only sets the first|last_chunks to 0.  The page is actually freed
409  * once both buddies are evicted (see zbud_reclaim_page() below).
410  */
411 void zbud_free(struct zbud_pool *pool, unsigned long handle)
412 {
413 	struct zbud_header *zhdr;
414 	int freechunks;
415 
416 	spin_lock(&pool->lock);
417 	zhdr = handle_to_zbud_header(handle);
418 
419 	/* If first buddy, handle will be page aligned */
420 	if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK)
421 		zhdr->last_chunks = 0;
422 	else
423 		zhdr->first_chunks = 0;
424 
425 	if (zhdr->under_reclaim) {
426 		/* zbud page is under reclaim, reclaim will free */
427 		spin_unlock(&pool->lock);
428 		return;
429 	}
430 
431 	/* Remove from existing buddy list */
432 	list_del(&zhdr->buddy);
433 
434 	if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
435 		/* zbud page is empty, free */
436 		list_del(&zhdr->lru);
437 		free_zbud_page(zhdr);
438 		pool->pages_nr--;
439 	} else {
440 		/* Add to unbuddied list */
441 		freechunks = num_free_chunks(zhdr);
442 		list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
443 	}
444 
445 	spin_unlock(&pool->lock);
446 }
447 
448 #define list_tail_entry(ptr, type, member) \
449 	list_entry((ptr)->prev, type, member)
450 
451 /**
452  * zbud_reclaim_page() - evicts allocations from a pool page and frees it
453  * @pool:	pool from which a page will attempt to be evicted
454  * @retires:	number of pages on the LRU list for which eviction will
455  *		be attempted before failing
456  *
457  * zbud reclaim is different from normal system reclaim in that the reclaim is
458  * done from the bottom, up.  This is because only the bottom layer, zbud, has
459  * information on how the allocations are organized within each zbud page. This
460  * has the potential to create interesting locking situations between zbud and
461  * the user, however.
462  *
463  * To avoid these, this is how zbud_reclaim_page() should be called:
464 
465  * The user detects a page should be reclaimed and calls zbud_reclaim_page().
466  * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call
467  * the user-defined eviction handler with the pool and handle as arguments.
468  *
469  * If the handle can not be evicted, the eviction handler should return
470  * non-zero. zbud_reclaim_page() will add the zbud page back to the
471  * appropriate list and try the next zbud page on the LRU up to
472  * a user defined number of retries.
473  *
474  * If the handle is successfully evicted, the eviction handler should
475  * return 0 _and_ should have called zbud_free() on the handle. zbud_free()
476  * contains logic to delay freeing the page if the page is under reclaim,
477  * as indicated by the setting of the PG_reclaim flag on the underlying page.
478  *
479  * If all buddies in the zbud page are successfully evicted, then the
480  * zbud page can be freed.
481  *
482  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
483  * no pages to evict or an eviction handler is not registered, -EAGAIN if
484  * the retry limit was hit.
485  */
486 int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
487 {
488 	int i, ret, freechunks;
489 	struct zbud_header *zhdr;
490 	unsigned long first_handle = 0, last_handle = 0;
491 
492 	spin_lock(&pool->lock);
493 	if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
494 			retries == 0) {
495 		spin_unlock(&pool->lock);
496 		return -EINVAL;
497 	}
498 	for (i = 0; i < retries; i++) {
499 		zhdr = list_tail_entry(&pool->lru, struct zbud_header, lru);
500 		list_del(&zhdr->lru);
501 		list_del(&zhdr->buddy);
502 		/* Protect zbud page against free */
503 		zhdr->under_reclaim = true;
504 		/*
505 		 * We need encode the handles before unlocking, since we can
506 		 * race with free that will set (first|last)_chunks to 0
507 		 */
508 		first_handle = 0;
509 		last_handle = 0;
510 		if (zhdr->first_chunks)
511 			first_handle = encode_handle(zhdr, FIRST);
512 		if (zhdr->last_chunks)
513 			last_handle = encode_handle(zhdr, LAST);
514 		spin_unlock(&pool->lock);
515 
516 		/* Issue the eviction callback(s) */
517 		if (first_handle) {
518 			ret = pool->ops->evict(pool, first_handle);
519 			if (ret)
520 				goto next;
521 		}
522 		if (last_handle) {
523 			ret = pool->ops->evict(pool, last_handle);
524 			if (ret)
525 				goto next;
526 		}
527 next:
528 		spin_lock(&pool->lock);
529 		zhdr->under_reclaim = false;
530 		if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
531 			/*
532 			 * Both buddies are now free, free the zbud page and
533 			 * return success.
534 			 */
535 			free_zbud_page(zhdr);
536 			pool->pages_nr--;
537 			spin_unlock(&pool->lock);
538 			return 0;
539 		} else if (zhdr->first_chunks == 0 ||
540 				zhdr->last_chunks == 0) {
541 			/* add to unbuddied list */
542 			freechunks = num_free_chunks(zhdr);
543 			list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
544 		} else {
545 			/* add to buddied list */
546 			list_add(&zhdr->buddy, &pool->buddied);
547 		}
548 
549 		/* add to beginning of LRU */
550 		list_add(&zhdr->lru, &pool->lru);
551 	}
552 	spin_unlock(&pool->lock);
553 	return -EAGAIN;
554 }
555 
556 /**
557  * zbud_map() - maps the allocation associated with the given handle
558  * @pool:	pool in which the allocation resides
559  * @handle:	handle associated with the allocation to be mapped
560  *
561  * While trivial for zbud, the mapping functions for others allocators
562  * implementing this allocation API could have more complex information encoded
563  * in the handle and could create temporary mappings to make the data
564  * accessible to the user.
565  *
566  * Returns: a pointer to the mapped allocation
567  */
568 void *zbud_map(struct zbud_pool *pool, unsigned long handle)
569 {
570 	return (void *)(handle);
571 }
572 
573 /**
574  * zbud_unmap() - maps the allocation associated with the given handle
575  * @pool:	pool in which the allocation resides
576  * @handle:	handle associated with the allocation to be unmapped
577  */
578 void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
579 {
580 }
581 
582 /**
583  * zbud_get_pool_size() - gets the zbud pool size in pages
584  * @pool:	pool whose size is being queried
585  *
586  * Returns: size in pages of the given pool.  The pool lock need not be
587  * taken to access pages_nr.
588  */
589 u64 zbud_get_pool_size(struct zbud_pool *pool)
590 {
591 	return pool->pages_nr;
592 }
593 
594 static int __init init_zbud(void)
595 {
596 	/* Make sure the zbud header will fit in one chunk */
597 	BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED);
598 	pr_info("loaded\n");
599 
600 #ifdef CONFIG_ZPOOL
601 	zpool_register_driver(&zbud_zpool_driver);
602 #endif
603 
604 	return 0;
605 }
606 
607 static void __exit exit_zbud(void)
608 {
609 #ifdef CONFIG_ZPOOL
610 	zpool_unregister_driver(&zbud_zpool_driver);
611 #endif
612 
613 	pr_info("unloaded\n");
614 }
615 
616 module_init(init_zbud);
617 module_exit(exit_zbud);
618 
619 MODULE_LICENSE("GPL");
620 MODULE_AUTHOR("Seth Jennings <sjenning@linux.vnet.ibm.com>");
621 MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");
622