109c434b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
24e2e2770SSeth Jennings /*
34e2e2770SSeth Jennings * zbud.c
44e2e2770SSeth Jennings *
54e2e2770SSeth Jennings * Copyright (C) 2013, Seth Jennings, IBM
64e2e2770SSeth Jennings *
74e2e2770SSeth Jennings * Concepts based on zcache internal zbud allocator by Dan Magenheimer.
84e2e2770SSeth Jennings *
94e2e2770SSeth Jennings * zbud is an special purpose allocator for storing compressed pages. Contrary
104e2e2770SSeth Jennings * to what its name may suggest, zbud is not a buddy allocator, but rather an
114e2e2770SSeth Jennings * allocator that "buddies" two compressed pages together in a single memory
124e2e2770SSeth Jennings * page.
134e2e2770SSeth Jennings *
144e2e2770SSeth Jennings * While this design limits storage density, it has simple and deterministic
154e2e2770SSeth Jennings * reclaim properties that make it preferable to a higher density approach when
164e2e2770SSeth Jennings * reclaim will be used.
174e2e2770SSeth Jennings *
184e2e2770SSeth Jennings * zbud works by storing compressed pages, or "zpages", together in pairs in a
194e2e2770SSeth Jennings * single memory page called a "zbud page". The first buddy is "left
20eee87e17SJianguo Wu * justified" at the beginning of the zbud page, and the last buddy is "right
214e2e2770SSeth Jennings * justified" at the end of the zbud page. The benefit is that if either
224e2e2770SSeth Jennings * buddy is freed, the freed buddy space, coalesced with whatever slack space
234e2e2770SSeth Jennings * that existed between the buddies, results in the largest possible free region
244e2e2770SSeth Jennings * within the zbud page.
254e2e2770SSeth Jennings *
264e2e2770SSeth Jennings * zbud also provides an attractive lower bound on density. The ratio of zpages
274e2e2770SSeth Jennings * to zbud pages can not be less than 1. This ensures that zbud can never "do
284e2e2770SSeth Jennings * harm" by using more pages to store zpages than the uncompressed zpages would
294e2e2770SSeth Jennings * have used on their own.
304e2e2770SSeth Jennings *
314e2e2770SSeth Jennings * zbud pages are divided into "chunks". The size of the chunks is fixed at
324e2e2770SSeth Jennings * compile time and determined by NCHUNKS_ORDER below. Dividing zbud pages
334e2e2770SSeth Jennings * into chunks allows organizing unbuddied zbud pages into a manageable number
344e2e2770SSeth Jennings * of unbuddied lists according to the number of free chunks available in the
354e2e2770SSeth Jennings * zbud page.
364e2e2770SSeth Jennings *
374e2e2770SSeth Jennings * The zbud API differs from that of conventional allocators in that the
384e2e2770SSeth Jennings * allocation function, zbud_alloc(), returns an opaque handle to the user,
394e2e2770SSeth Jennings * not a dereferenceable pointer. The user must map the handle using
404e2e2770SSeth Jennings * zbud_map() in order to get a usable pointer by which to access the
414e2e2770SSeth Jennings * allocation data and unmap the handle with zbud_unmap() when operations
424e2e2770SSeth Jennings * on the allocation data are complete.
434e2e2770SSeth Jennings */
444e2e2770SSeth Jennings
454e2e2770SSeth Jennings #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
464e2e2770SSeth Jennings
474e2e2770SSeth Jennings #include <linux/atomic.h>
484e2e2770SSeth Jennings #include <linux/list.h>
494e2e2770SSeth Jennings #include <linux/mm.h>
504e2e2770SSeth Jennings #include <linux/module.h>
514e2e2770SSeth Jennings #include <linux/preempt.h>
524e2e2770SSeth Jennings #include <linux/slab.h>
534e2e2770SSeth Jennings #include <linux/spinlock.h>
54c795779dSDan Streetman #include <linux/zpool.h>
554e2e2770SSeth Jennings
564e2e2770SSeth Jennings /*****************
574e2e2770SSeth Jennings * Structures
584e2e2770SSeth Jennings *****************/
594e2e2770SSeth Jennings /*
604e2e2770SSeth Jennings * NCHUNKS_ORDER determines the internal allocation granularity, effectively
614e2e2770SSeth Jennings * adjusting internal fragmentation. It also determines the number of
624e2e2770SSeth Jennings * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
63f203c3b3SChao Yu * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk
64f203c3b3SChao Yu * in allocated page is occupied by zbud header, NCHUNKS will be calculated to
65f203c3b3SChao Yu * 63 which shows the max number of free chunks in zbud page, also there will be
66f203c3b3SChao Yu * 63 freelists per pool.
674e2e2770SSeth Jennings */
684e2e2770SSeth Jennings #define NCHUNKS_ORDER 6
694e2e2770SSeth Jennings
704e2e2770SSeth Jennings #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
714e2e2770SSeth Jennings #define CHUNK_SIZE (1 << CHUNK_SHIFT)
724e2e2770SSeth Jennings #define ZHDR_SIZE_ALIGNED CHUNK_SIZE
73f203c3b3SChao Yu #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
744e2e2770SSeth Jennings
752a03085cSMiaohe Lin struct zbud_pool;
762a03085cSMiaohe Lin
774e2e2770SSeth Jennings /**
784e2e2770SSeth Jennings * struct zbud_pool - stores metadata for each zbud pool
794e2e2770SSeth Jennings * @lock: protects all pool fields and first|last_chunk fields of any
804e2e2770SSeth Jennings * zbud page in the pool
814e2e2770SSeth Jennings * @unbuddied: array of lists tracking zbud pages that only contain one buddy;
824e2e2770SSeth Jennings * the lists each zbud page is added to depends on the size of
834e2e2770SSeth Jennings * its free region.
844e2e2770SSeth Jennings * @buddied: list tracking the zbud pages that contain two buddies;
854e2e2770SSeth Jennings * these zbud pages are full
864e2e2770SSeth Jennings * @pages_nr: number of zbud pages in the pool.
874e2e2770SSeth Jennings *
884e2e2770SSeth Jennings * This structure is allocated at pool creation time and maintains metadata
894e2e2770SSeth Jennings * pertaining to a particular zbud pool.
904e2e2770SSeth Jennings */
914e2e2770SSeth Jennings struct zbud_pool {
924e2e2770SSeth Jennings spinlock_t lock;
93f356aeacSMiaohe Lin union {
94f356aeacSMiaohe Lin /*
95f356aeacSMiaohe Lin * Reuse unbuddied[0] as buddied on the ground that
96f356aeacSMiaohe Lin * unbuddied[0] is unused.
97f356aeacSMiaohe Lin */
984e2e2770SSeth Jennings struct list_head buddied;
99f356aeacSMiaohe Lin struct list_head unbuddied[NCHUNKS];
100f356aeacSMiaohe Lin };
1014e2e2770SSeth Jennings u64 pages_nr;
1024e2e2770SSeth Jennings };
1034e2e2770SSeth Jennings
1044e2e2770SSeth Jennings /*
1054e2e2770SSeth Jennings * struct zbud_header - zbud page metadata occupying the first chunk of each
1064e2e2770SSeth Jennings * zbud page.
1074e2e2770SSeth Jennings * @buddy: links the zbud page into the unbuddied/buddied lists in the pool
1084e2e2770SSeth Jennings * @first_chunks: the size of the first buddy in chunks, 0 if free
1094e2e2770SSeth Jennings * @last_chunks: the size of the last buddy in chunks, 0 if free
1104e2e2770SSeth Jennings */
1114e2e2770SSeth Jennings struct zbud_header {
1124e2e2770SSeth Jennings struct list_head buddy;
1134e2e2770SSeth Jennings unsigned int first_chunks;
1144e2e2770SSeth Jennings unsigned int last_chunks;
1154e2e2770SSeth Jennings };
1164e2e2770SSeth Jennings
1174e2e2770SSeth Jennings /*****************
1182a03085cSMiaohe Lin * Helpers
1192a03085cSMiaohe Lin *****************/
1202a03085cSMiaohe Lin /* Just to make the code easier to read */
1212a03085cSMiaohe Lin enum buddy {
1222a03085cSMiaohe Lin FIRST,
1232a03085cSMiaohe Lin LAST
1242a03085cSMiaohe Lin };
1252a03085cSMiaohe Lin
1262a03085cSMiaohe Lin /* Converts an allocation size in bytes to size in zbud chunks */
size_to_chunks(size_t size)1272a03085cSMiaohe Lin static int size_to_chunks(size_t size)
1282a03085cSMiaohe Lin {
1292a03085cSMiaohe Lin return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
1302a03085cSMiaohe Lin }
1312a03085cSMiaohe Lin
1322a03085cSMiaohe Lin #define for_each_unbuddied_list(_iter, _begin) \
1332a03085cSMiaohe Lin for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1342a03085cSMiaohe Lin
1352a03085cSMiaohe Lin /* Initializes the zbud header of a newly allocated zbud page */
init_zbud_page(struct page * page)1362a03085cSMiaohe Lin static struct zbud_header *init_zbud_page(struct page *page)
1372a03085cSMiaohe Lin {
1382a03085cSMiaohe Lin struct zbud_header *zhdr = page_address(page);
1392a03085cSMiaohe Lin zhdr->first_chunks = 0;
1402a03085cSMiaohe Lin zhdr->last_chunks = 0;
1412a03085cSMiaohe Lin INIT_LIST_HEAD(&zhdr->buddy);
1422a03085cSMiaohe Lin return zhdr;
1432a03085cSMiaohe Lin }
1442a03085cSMiaohe Lin
1452a03085cSMiaohe Lin /* Resets the struct page fields and frees the page */
free_zbud_page(struct zbud_header * zhdr)1462a03085cSMiaohe Lin static void free_zbud_page(struct zbud_header *zhdr)
1472a03085cSMiaohe Lin {
1482a03085cSMiaohe Lin __free_page(virt_to_page(zhdr));
1492a03085cSMiaohe Lin }
1502a03085cSMiaohe Lin
1512a03085cSMiaohe Lin /*
1522a03085cSMiaohe Lin * Encodes the handle of a particular buddy within a zbud page
1532a03085cSMiaohe Lin * Pool lock should be held as this function accesses first|last_chunks
1542a03085cSMiaohe Lin */
encode_handle(struct zbud_header * zhdr,enum buddy bud)1552a03085cSMiaohe Lin static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud)
1562a03085cSMiaohe Lin {
1572a03085cSMiaohe Lin unsigned long handle;
1582a03085cSMiaohe Lin
1592a03085cSMiaohe Lin /*
1602a03085cSMiaohe Lin * For now, the encoded handle is actually just the pointer to the data
1612a03085cSMiaohe Lin * but this might not always be the case. A little information hiding.
1622a03085cSMiaohe Lin * Add CHUNK_SIZE to the handle if it is the first allocation to jump
1632a03085cSMiaohe Lin * over the zbud header in the first chunk.
1642a03085cSMiaohe Lin */
1652a03085cSMiaohe Lin handle = (unsigned long)zhdr;
1662a03085cSMiaohe Lin if (bud == FIRST)
1672a03085cSMiaohe Lin /* skip over zbud header */
1682a03085cSMiaohe Lin handle += ZHDR_SIZE_ALIGNED;
1692a03085cSMiaohe Lin else /* bud == LAST */
1702a03085cSMiaohe Lin handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
1712a03085cSMiaohe Lin return handle;
1722a03085cSMiaohe Lin }
1732a03085cSMiaohe Lin
1742a03085cSMiaohe Lin /* Returns the zbud page where a given handle is stored */
handle_to_zbud_header(unsigned long handle)1752a03085cSMiaohe Lin static struct zbud_header *handle_to_zbud_header(unsigned long handle)
1762a03085cSMiaohe Lin {
1772a03085cSMiaohe Lin return (struct zbud_header *)(handle & PAGE_MASK);
1782a03085cSMiaohe Lin }
1792a03085cSMiaohe Lin
1802a03085cSMiaohe Lin /* Returns the number of free chunks in a zbud page */
num_free_chunks(struct zbud_header * zhdr)1812a03085cSMiaohe Lin static int num_free_chunks(struct zbud_header *zhdr)
1822a03085cSMiaohe Lin {
1832a03085cSMiaohe Lin /*
1842a03085cSMiaohe Lin * Rather than branch for different situations, just use the fact that
1852a03085cSMiaohe Lin * free buddies have a length of zero to simplify everything.
1862a03085cSMiaohe Lin */
1872a03085cSMiaohe Lin return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
1882a03085cSMiaohe Lin }
1892a03085cSMiaohe Lin
1902a03085cSMiaohe Lin /*****************
1912a03085cSMiaohe Lin * API Functions
1922a03085cSMiaohe Lin *****************/
1932a03085cSMiaohe Lin /**
1942a03085cSMiaohe Lin * zbud_create_pool() - create a new zbud pool
1952a03085cSMiaohe Lin * @gfp: gfp flags when allocating the zbud pool structure
1962a03085cSMiaohe Lin *
1972a03085cSMiaohe Lin * Return: pointer to the new zbud pool or NULL if the metadata allocation
1982a03085cSMiaohe Lin * failed.
1992a03085cSMiaohe Lin */
zbud_create_pool(gfp_t gfp)2006a05aa30SJohannes Weiner static struct zbud_pool *zbud_create_pool(gfp_t gfp)
2012a03085cSMiaohe Lin {
2022a03085cSMiaohe Lin struct zbud_pool *pool;
2032a03085cSMiaohe Lin int i;
2042a03085cSMiaohe Lin
2052a03085cSMiaohe Lin pool = kzalloc(sizeof(struct zbud_pool), gfp);
2062a03085cSMiaohe Lin if (!pool)
2072a03085cSMiaohe Lin return NULL;
2082a03085cSMiaohe Lin spin_lock_init(&pool->lock);
2092a03085cSMiaohe Lin for_each_unbuddied_list(i, 0)
2102a03085cSMiaohe Lin INIT_LIST_HEAD(&pool->unbuddied[i]);
2112a03085cSMiaohe Lin INIT_LIST_HEAD(&pool->buddied);
2122a03085cSMiaohe Lin pool->pages_nr = 0;
2132a03085cSMiaohe Lin return pool;
2142a03085cSMiaohe Lin }
2152a03085cSMiaohe Lin
2162a03085cSMiaohe Lin /**
2172a03085cSMiaohe Lin * zbud_destroy_pool() - destroys an existing zbud pool
2182a03085cSMiaohe Lin * @pool: the zbud pool to be destroyed
2192a03085cSMiaohe Lin *
2202a03085cSMiaohe Lin * The pool should be emptied before this function is called.
2212a03085cSMiaohe Lin */
zbud_destroy_pool(struct zbud_pool * pool)2222a03085cSMiaohe Lin static void zbud_destroy_pool(struct zbud_pool *pool)
2232a03085cSMiaohe Lin {
2242a03085cSMiaohe Lin kfree(pool);
2252a03085cSMiaohe Lin }
2262a03085cSMiaohe Lin
2272a03085cSMiaohe Lin /**
2282a03085cSMiaohe Lin * zbud_alloc() - allocates a region of a given size
2292a03085cSMiaohe Lin * @pool: zbud pool from which to allocate
2302a03085cSMiaohe Lin * @size: size in bytes of the desired allocation
2312a03085cSMiaohe Lin * @gfp: gfp flags used if the pool needs to grow
2322a03085cSMiaohe Lin * @handle: handle of the new allocation
2332a03085cSMiaohe Lin *
2342a03085cSMiaohe Lin * This function will attempt to find a free region in the pool large enough to
2352a03085cSMiaohe Lin * satisfy the allocation request. A search of the unbuddied lists is
2362a03085cSMiaohe Lin * performed first. If no suitable free region is found, then a new page is
2372a03085cSMiaohe Lin * allocated and added to the pool to satisfy the request.
2382a03085cSMiaohe Lin *
2392a03085cSMiaohe Lin * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
2402a03085cSMiaohe Lin * as zbud pool pages.
2412a03085cSMiaohe Lin *
2422a03085cSMiaohe Lin * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
2432a03085cSMiaohe Lin * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
2442a03085cSMiaohe Lin * a new page.
2452a03085cSMiaohe Lin */
zbud_alloc(struct zbud_pool * pool,size_t size,gfp_t gfp,unsigned long * handle)2462a03085cSMiaohe Lin static int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
2472a03085cSMiaohe Lin unsigned long *handle)
2482a03085cSMiaohe Lin {
2492a03085cSMiaohe Lin int chunks, i, freechunks;
2502a03085cSMiaohe Lin struct zbud_header *zhdr = NULL;
2512a03085cSMiaohe Lin enum buddy bud;
2522a03085cSMiaohe Lin struct page *page;
2532a03085cSMiaohe Lin
2542a03085cSMiaohe Lin if (!size || (gfp & __GFP_HIGHMEM))
2552a03085cSMiaohe Lin return -EINVAL;
2562a03085cSMiaohe Lin if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
2572a03085cSMiaohe Lin return -ENOSPC;
2582a03085cSMiaohe Lin chunks = size_to_chunks(size);
2592a03085cSMiaohe Lin spin_lock(&pool->lock);
2602a03085cSMiaohe Lin
2612a03085cSMiaohe Lin /* First, try to find an unbuddied zbud page. */
2622a03085cSMiaohe Lin for_each_unbuddied_list(i, chunks) {
2632a03085cSMiaohe Lin if (!list_empty(&pool->unbuddied[i])) {
2642a03085cSMiaohe Lin zhdr = list_first_entry(&pool->unbuddied[i],
2652a03085cSMiaohe Lin struct zbud_header, buddy);
2662a03085cSMiaohe Lin list_del(&zhdr->buddy);
2672a03085cSMiaohe Lin if (zhdr->first_chunks == 0)
2682a03085cSMiaohe Lin bud = FIRST;
2692a03085cSMiaohe Lin else
2702a03085cSMiaohe Lin bud = LAST;
2712a03085cSMiaohe Lin goto found;
2722a03085cSMiaohe Lin }
2732a03085cSMiaohe Lin }
2742a03085cSMiaohe Lin
2752a03085cSMiaohe Lin /* Couldn't find unbuddied zbud page, create new one */
2762a03085cSMiaohe Lin spin_unlock(&pool->lock);
2772a03085cSMiaohe Lin page = alloc_page(gfp);
2782a03085cSMiaohe Lin if (!page)
2792a03085cSMiaohe Lin return -ENOMEM;
2802a03085cSMiaohe Lin spin_lock(&pool->lock);
2812a03085cSMiaohe Lin pool->pages_nr++;
2822a03085cSMiaohe Lin zhdr = init_zbud_page(page);
2832a03085cSMiaohe Lin bud = FIRST;
2842a03085cSMiaohe Lin
2852a03085cSMiaohe Lin found:
2862a03085cSMiaohe Lin if (bud == FIRST)
2872a03085cSMiaohe Lin zhdr->first_chunks = chunks;
2882a03085cSMiaohe Lin else
2892a03085cSMiaohe Lin zhdr->last_chunks = chunks;
2902a03085cSMiaohe Lin
2912a03085cSMiaohe Lin if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) {
2922a03085cSMiaohe Lin /* Add to unbuddied list */
2932a03085cSMiaohe Lin freechunks = num_free_chunks(zhdr);
2942a03085cSMiaohe Lin list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
2952a03085cSMiaohe Lin } else {
2962a03085cSMiaohe Lin /* Add to buddied list */
2972a03085cSMiaohe Lin list_add(&zhdr->buddy, &pool->buddied);
2982a03085cSMiaohe Lin }
2992a03085cSMiaohe Lin
3002a03085cSMiaohe Lin *handle = encode_handle(zhdr, bud);
3012a03085cSMiaohe Lin spin_unlock(&pool->lock);
3022a03085cSMiaohe Lin
3032a03085cSMiaohe Lin return 0;
3042a03085cSMiaohe Lin }
3052a03085cSMiaohe Lin
3062a03085cSMiaohe Lin /**
3072a03085cSMiaohe Lin * zbud_free() - frees the allocation associated with the given handle
3082a03085cSMiaohe Lin * @pool: pool in which the allocation resided
3092a03085cSMiaohe Lin * @handle: handle associated with the allocation returned by zbud_alloc()
3102a03085cSMiaohe Lin */
zbud_free(struct zbud_pool * pool,unsigned long handle)3112a03085cSMiaohe Lin static void zbud_free(struct zbud_pool *pool, unsigned long handle)
3122a03085cSMiaohe Lin {
3132a03085cSMiaohe Lin struct zbud_header *zhdr;
3142a03085cSMiaohe Lin int freechunks;
3152a03085cSMiaohe Lin
3162a03085cSMiaohe Lin spin_lock(&pool->lock);
3172a03085cSMiaohe Lin zhdr = handle_to_zbud_header(handle);
3182a03085cSMiaohe Lin
3192a03085cSMiaohe Lin /* If first buddy, handle will be page aligned */
3202a03085cSMiaohe Lin if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK)
3212a03085cSMiaohe Lin zhdr->last_chunks = 0;
3222a03085cSMiaohe Lin else
3232a03085cSMiaohe Lin zhdr->first_chunks = 0;
3242a03085cSMiaohe Lin
3252a03085cSMiaohe Lin /* Remove from existing buddy list */
3262a03085cSMiaohe Lin list_del(&zhdr->buddy);
3272a03085cSMiaohe Lin
3282a03085cSMiaohe Lin if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
3292a03085cSMiaohe Lin /* zbud page is empty, free */
3302a03085cSMiaohe Lin free_zbud_page(zhdr);
3312a03085cSMiaohe Lin pool->pages_nr--;
3322a03085cSMiaohe Lin } else {
3332a03085cSMiaohe Lin /* Add to unbuddied list */
3342a03085cSMiaohe Lin freechunks = num_free_chunks(zhdr);
3352a03085cSMiaohe Lin list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
3362a03085cSMiaohe Lin }
3372a03085cSMiaohe Lin
3382a03085cSMiaohe Lin spin_unlock(&pool->lock);
3392a03085cSMiaohe Lin }
3402a03085cSMiaohe Lin
3412a03085cSMiaohe Lin /**
3422a03085cSMiaohe Lin * zbud_map() - maps the allocation associated with the given handle
3432a03085cSMiaohe Lin * @pool: pool in which the allocation resides
3442a03085cSMiaohe Lin * @handle: handle associated with the allocation to be mapped
3452a03085cSMiaohe Lin *
3462a03085cSMiaohe Lin * While trivial for zbud, the mapping functions for others allocators
3472a03085cSMiaohe Lin * implementing this allocation API could have more complex information encoded
3482a03085cSMiaohe Lin * in the handle and could create temporary mappings to make the data
3492a03085cSMiaohe Lin * accessible to the user.
3502a03085cSMiaohe Lin *
3512a03085cSMiaohe Lin * Returns: a pointer to the mapped allocation
3522a03085cSMiaohe Lin */
zbud_map(struct zbud_pool * pool,unsigned long handle)3532a03085cSMiaohe Lin static void *zbud_map(struct zbud_pool *pool, unsigned long handle)
3542a03085cSMiaohe Lin {
3552a03085cSMiaohe Lin return (void *)(handle);
3562a03085cSMiaohe Lin }
3572a03085cSMiaohe Lin
3582a03085cSMiaohe Lin /**
3592a03085cSMiaohe Lin * zbud_unmap() - maps the allocation associated with the given handle
3602a03085cSMiaohe Lin * @pool: pool in which the allocation resides
3612a03085cSMiaohe Lin * @handle: handle associated with the allocation to be unmapped
3622a03085cSMiaohe Lin */
zbud_unmap(struct zbud_pool * pool,unsigned long handle)3632a03085cSMiaohe Lin static void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
3642a03085cSMiaohe Lin {
3652a03085cSMiaohe Lin }
3662a03085cSMiaohe Lin
3672a03085cSMiaohe Lin /**
3682a03085cSMiaohe Lin * zbud_get_pool_size() - gets the zbud pool size in pages
3692a03085cSMiaohe Lin * @pool: pool whose size is being queried
3702a03085cSMiaohe Lin *
3712a03085cSMiaohe Lin * Returns: size in pages of the given pool. The pool lock need not be
3722a03085cSMiaohe Lin * taken to access pages_nr.
3732a03085cSMiaohe Lin */
zbud_get_pool_size(struct zbud_pool * pool)3742a03085cSMiaohe Lin static u64 zbud_get_pool_size(struct zbud_pool *pool)
3752a03085cSMiaohe Lin {
3762a03085cSMiaohe Lin return pool->pages_nr;
3772a03085cSMiaohe Lin }
3782a03085cSMiaohe Lin
3792a03085cSMiaohe Lin /*****************
380c795779dSDan Streetman * zpool
381c795779dSDan Streetman ****************/
382c795779dSDan Streetman
zbud_zpool_create(const char * name,gfp_t gfp)383*35499e2bSDomenico Cerasuolo static void *zbud_zpool_create(const char *name, gfp_t gfp)
384c795779dSDan Streetman {
3851be537c6SDomenico Cerasuolo return zbud_create_pool(gfp);
386c795779dSDan Streetman }
387c795779dSDan Streetman
zbud_zpool_destroy(void * pool)388c795779dSDan Streetman static void zbud_zpool_destroy(void *pool)
389c795779dSDan Streetman {
390c795779dSDan Streetman zbud_destroy_pool(pool);
391c795779dSDan Streetman }
392c795779dSDan Streetman
zbud_zpool_malloc(void * pool,size_t size,gfp_t gfp,unsigned long * handle)393c795779dSDan Streetman static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp,
394c795779dSDan Streetman unsigned long *handle)
395c795779dSDan Streetman {
396c795779dSDan Streetman return zbud_alloc(pool, size, gfp, handle);
397c795779dSDan Streetman }
zbud_zpool_free(void * pool,unsigned long handle)398c795779dSDan Streetman static void zbud_zpool_free(void *pool, unsigned long handle)
399c795779dSDan Streetman {
400c795779dSDan Streetman zbud_free(pool, handle);
401c795779dSDan Streetman }
402c795779dSDan Streetman
zbud_zpool_map(void * pool,unsigned long handle,enum zpool_mapmode mm)403c795779dSDan Streetman static void *zbud_zpool_map(void *pool, unsigned long handle,
404c795779dSDan Streetman enum zpool_mapmode mm)
405c795779dSDan Streetman {
406c795779dSDan Streetman return zbud_map(pool, handle);
407c795779dSDan Streetman }
zbud_zpool_unmap(void * pool,unsigned long handle)408c795779dSDan Streetman static void zbud_zpool_unmap(void *pool, unsigned long handle)
409c795779dSDan Streetman {
410c795779dSDan Streetman zbud_unmap(pool, handle);
411c795779dSDan Streetman }
412c795779dSDan Streetman
zbud_zpool_total_size(void * pool)413c795779dSDan Streetman static u64 zbud_zpool_total_size(void *pool)
414c795779dSDan Streetman {
415c795779dSDan Streetman return zbud_get_pool_size(pool) * PAGE_SIZE;
416c795779dSDan Streetman }
417c795779dSDan Streetman
418c795779dSDan Streetman static struct zpool_driver zbud_zpool_driver = {
419c795779dSDan Streetman .type = "zbud",
420e818e820STian Tao .sleep_mapped = true,
421c795779dSDan Streetman .owner = THIS_MODULE,
422c795779dSDan Streetman .create = zbud_zpool_create,
423c795779dSDan Streetman .destroy = zbud_zpool_destroy,
424c795779dSDan Streetman .malloc = zbud_zpool_malloc,
425c795779dSDan Streetman .free = zbud_zpool_free,
426c795779dSDan Streetman .map = zbud_zpool_map,
427c795779dSDan Streetman .unmap = zbud_zpool_unmap,
428c795779dSDan Streetman .total_size = zbud_zpool_total_size,
429c795779dSDan Streetman };
430c795779dSDan Streetman
431137f8cffSKees Cook MODULE_ALIAS("zpool-zbud");
4324e2e2770SSeth Jennings
init_zbud(void)4334e2e2770SSeth Jennings static int __init init_zbud(void)
4344e2e2770SSeth Jennings {
4354e2e2770SSeth Jennings /* Make sure the zbud header will fit in one chunk */
4364e2e2770SSeth Jennings BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED);
4374e2e2770SSeth Jennings pr_info("loaded\n");
438c795779dSDan Streetman
439c795779dSDan Streetman zpool_register_driver(&zbud_zpool_driver);
440c795779dSDan Streetman
4414e2e2770SSeth Jennings return 0;
4424e2e2770SSeth Jennings }
4434e2e2770SSeth Jennings
exit_zbud(void)4444e2e2770SSeth Jennings static void __exit exit_zbud(void)
4454e2e2770SSeth Jennings {
446c795779dSDan Streetman zpool_unregister_driver(&zbud_zpool_driver);
4474e2e2770SSeth Jennings pr_info("unloaded\n");
4484e2e2770SSeth Jennings }
4494e2e2770SSeth Jennings
4504e2e2770SSeth Jennings module_init(init_zbud);
4514e2e2770SSeth Jennings module_exit(exit_zbud);
4524e2e2770SSeth Jennings
4534e2e2770SSeth Jennings MODULE_LICENSE("GPL");
45468386da8SSeth Jennings MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
4554e2e2770SSeth Jennings MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");
456