xref: /openbmc/linux/mm/zpool.c (revision 3805e6a1)
1 /*
2  * zpool memory storage api
3  *
4  * Copyright (C) 2014 Dan Streetman
5  *
6  * This is a common frontend for memory storage pool implementations.
7  * Typically, this is used to store compressed memory.
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/list.h>
13 #include <linux/types.h>
14 #include <linux/mm.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/module.h>
18 #include <linux/zpool.h>
19 
20 struct zpool {
21 	struct zpool_driver *driver;
22 	void *pool;
23 	const struct zpool_ops *ops;
24 
25 	struct list_head list;
26 };
27 
28 static LIST_HEAD(drivers_head);
29 static DEFINE_SPINLOCK(drivers_lock);
30 
31 static LIST_HEAD(pools_head);
32 static DEFINE_SPINLOCK(pools_lock);
33 
34 /**
35  * zpool_register_driver() - register a zpool implementation.
36  * @driver:	driver to register
37  */
38 void zpool_register_driver(struct zpool_driver *driver)
39 {
40 	spin_lock(&drivers_lock);
41 	atomic_set(&driver->refcount, 0);
42 	list_add(&driver->list, &drivers_head);
43 	spin_unlock(&drivers_lock);
44 }
45 EXPORT_SYMBOL(zpool_register_driver);
46 
47 /**
48  * zpool_unregister_driver() - unregister a zpool implementation.
49  * @driver:	driver to unregister.
50  *
51  * Module usage counting is used to prevent using a driver
52  * while/after unloading, so if this is called from module
53  * exit function, this should never fail; if called from
54  * other than the module exit function, and this returns
55  * failure, the driver is in use and must remain available.
56  */
57 int zpool_unregister_driver(struct zpool_driver *driver)
58 {
59 	int ret = 0, refcount;
60 
61 	spin_lock(&drivers_lock);
62 	refcount = atomic_read(&driver->refcount);
63 	WARN_ON(refcount < 0);
64 	if (refcount > 0)
65 		ret = -EBUSY;
66 	else
67 		list_del(&driver->list);
68 	spin_unlock(&drivers_lock);
69 
70 	return ret;
71 }
72 EXPORT_SYMBOL(zpool_unregister_driver);
73 
74 /* this assumes @type is null-terminated. */
75 static struct zpool_driver *zpool_get_driver(const char *type)
76 {
77 	struct zpool_driver *driver;
78 
79 	spin_lock(&drivers_lock);
80 	list_for_each_entry(driver, &drivers_head, list) {
81 		if (!strcmp(driver->type, type)) {
82 			bool got = try_module_get(driver->owner);
83 
84 			if (got)
85 				atomic_inc(&driver->refcount);
86 			spin_unlock(&drivers_lock);
87 			return got ? driver : NULL;
88 		}
89 	}
90 
91 	spin_unlock(&drivers_lock);
92 	return NULL;
93 }
94 
95 static void zpool_put_driver(struct zpool_driver *driver)
96 {
97 	atomic_dec(&driver->refcount);
98 	module_put(driver->owner);
99 }
100 
101 /**
102  * zpool_has_pool() - Check if the pool driver is available
103  * @type	The type of the zpool to check (e.g. zbud, zsmalloc)
104  *
105  * This checks if the @type pool driver is available.  This will try to load
106  * the requested module, if needed, but there is no guarantee the module will
107  * still be loaded and available immediately after calling.  If this returns
108  * true, the caller should assume the pool is available, but must be prepared
109  * to handle the @zpool_create_pool() returning failure.  However if this
110  * returns false, the caller should assume the requested pool type is not
111  * available; either the requested pool type module does not exist, or could
112  * not be loaded, and calling @zpool_create_pool() with the pool type will
113  * fail.
114  *
115  * The @type string must be null-terminated.
116  *
117  * Returns: true if @type pool is available, false if not
118  */
119 bool zpool_has_pool(char *type)
120 {
121 	struct zpool_driver *driver = zpool_get_driver(type);
122 
123 	if (!driver) {
124 		request_module("zpool-%s", type);
125 		driver = zpool_get_driver(type);
126 	}
127 
128 	if (!driver)
129 		return false;
130 
131 	zpool_put_driver(driver);
132 	return true;
133 }
134 EXPORT_SYMBOL(zpool_has_pool);
135 
136 /**
137  * zpool_create_pool() - Create a new zpool
138  * @type	The type of the zpool to create (e.g. zbud, zsmalloc)
139  * @name	The name of the zpool (e.g. zram0, zswap)
140  * @gfp		The GFP flags to use when allocating the pool.
141  * @ops		The optional ops callback.
142  *
143  * This creates a new zpool of the specified type.  The gfp flags will be
144  * used when allocating memory, if the implementation supports it.  If the
145  * ops param is NULL, then the created zpool will not be shrinkable.
146  *
147  * Implementations must guarantee this to be thread-safe.
148  *
149  * The @type and @name strings must be null-terminated.
150  *
151  * Returns: New zpool on success, NULL on failure.
152  */
153 struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp,
154 		const struct zpool_ops *ops)
155 {
156 	struct zpool_driver *driver;
157 	struct zpool *zpool;
158 
159 	pr_debug("creating pool type %s\n", type);
160 
161 	driver = zpool_get_driver(type);
162 
163 	if (!driver) {
164 		request_module("zpool-%s", type);
165 		driver = zpool_get_driver(type);
166 	}
167 
168 	if (!driver) {
169 		pr_err("no driver for type %s\n", type);
170 		return NULL;
171 	}
172 
173 	zpool = kmalloc(sizeof(*zpool), gfp);
174 	if (!zpool) {
175 		pr_err("couldn't create zpool - out of memory\n");
176 		zpool_put_driver(driver);
177 		return NULL;
178 	}
179 
180 	zpool->driver = driver;
181 	zpool->pool = driver->create(name, gfp, ops, zpool);
182 	zpool->ops = ops;
183 
184 	if (!zpool->pool) {
185 		pr_err("couldn't create %s pool\n", type);
186 		zpool_put_driver(driver);
187 		kfree(zpool);
188 		return NULL;
189 	}
190 
191 	pr_debug("created pool type %s\n", type);
192 
193 	spin_lock(&pools_lock);
194 	list_add(&zpool->list, &pools_head);
195 	spin_unlock(&pools_lock);
196 
197 	return zpool;
198 }
199 
200 /**
201  * zpool_destroy_pool() - Destroy a zpool
202  * @pool	The zpool to destroy.
203  *
204  * Implementations must guarantee this to be thread-safe,
205  * however only when destroying different pools.  The same
206  * pool should only be destroyed once, and should not be used
207  * after it is destroyed.
208  *
209  * This destroys an existing zpool.  The zpool should not be in use.
210  */
211 void zpool_destroy_pool(struct zpool *zpool)
212 {
213 	pr_debug("destroying pool type %s\n", zpool->driver->type);
214 
215 	spin_lock(&pools_lock);
216 	list_del(&zpool->list);
217 	spin_unlock(&pools_lock);
218 	zpool->driver->destroy(zpool->pool);
219 	zpool_put_driver(zpool->driver);
220 	kfree(zpool);
221 }
222 
223 /**
224  * zpool_get_type() - Get the type of the zpool
225  * @pool	The zpool to check
226  *
227  * This returns the type of the pool.
228  *
229  * Implementations must guarantee this to be thread-safe.
230  *
231  * Returns: The type of zpool.
232  */
233 const char *zpool_get_type(struct zpool *zpool)
234 {
235 	return zpool->driver->type;
236 }
237 
238 /**
239  * zpool_malloc() - Allocate memory
240  * @pool	The zpool to allocate from.
241  * @size	The amount of memory to allocate.
242  * @gfp		The GFP flags to use when allocating memory.
243  * @handle	Pointer to the handle to set
244  *
245  * This allocates the requested amount of memory from the pool.
246  * The gfp flags will be used when allocating memory, if the
247  * implementation supports it.  The provided @handle will be
248  * set to the allocated object handle.
249  *
250  * Implementations must guarantee this to be thread-safe.
251  *
252  * Returns: 0 on success, negative value on error.
253  */
254 int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp,
255 			unsigned long *handle)
256 {
257 	return zpool->driver->malloc(zpool->pool, size, gfp, handle);
258 }
259 
260 /**
261  * zpool_free() - Free previously allocated memory
262  * @pool	The zpool that allocated the memory.
263  * @handle	The handle to the memory to free.
264  *
265  * This frees previously allocated memory.  This does not guarantee
266  * that the pool will actually free memory, only that the memory
267  * in the pool will become available for use by the pool.
268  *
269  * Implementations must guarantee this to be thread-safe,
270  * however only when freeing different handles.  The same
271  * handle should only be freed once, and should not be used
272  * after freeing.
273  */
274 void zpool_free(struct zpool *zpool, unsigned long handle)
275 {
276 	zpool->driver->free(zpool->pool, handle);
277 }
278 
279 /**
280  * zpool_shrink() - Shrink the pool size
281  * @pool	The zpool to shrink.
282  * @pages	The number of pages to shrink the pool.
283  * @reclaimed	The number of pages successfully evicted.
284  *
285  * This attempts to shrink the actual memory size of the pool
286  * by evicting currently used handle(s).  If the pool was
287  * created with no zpool_ops, or the evict call fails for any
288  * of the handles, this will fail.  If non-NULL, the @reclaimed
289  * parameter will be set to the number of pages reclaimed,
290  * which may be more than the number of pages requested.
291  *
292  * Implementations must guarantee this to be thread-safe.
293  *
294  * Returns: 0 on success, negative value on error/failure.
295  */
296 int zpool_shrink(struct zpool *zpool, unsigned int pages,
297 			unsigned int *reclaimed)
298 {
299 	return zpool->driver->shrink(zpool->pool, pages, reclaimed);
300 }
301 
302 /**
303  * zpool_map_handle() - Map a previously allocated handle into memory
304  * @pool	The zpool that the handle was allocated from
305  * @handle	The handle to map
306  * @mm		How the memory should be mapped
307  *
308  * This maps a previously allocated handle into memory.  The @mm
309  * param indicates to the implementation how the memory will be
310  * used, i.e. read-only, write-only, read-write.  If the
311  * implementation does not support it, the memory will be treated
312  * as read-write.
313  *
314  * This may hold locks, disable interrupts, and/or preemption,
315  * and the zpool_unmap_handle() must be called to undo those
316  * actions.  The code that uses the mapped handle should complete
317  * its operatons on the mapped handle memory quickly and unmap
318  * as soon as possible.  As the implementation may use per-cpu
319  * data, multiple handles should not be mapped concurrently on
320  * any cpu.
321  *
322  * Returns: A pointer to the handle's mapped memory area.
323  */
324 void *zpool_map_handle(struct zpool *zpool, unsigned long handle,
325 			enum zpool_mapmode mapmode)
326 {
327 	return zpool->driver->map(zpool->pool, handle, mapmode);
328 }
329 
330 /**
331  * zpool_unmap_handle() - Unmap a previously mapped handle
332  * @pool	The zpool that the handle was allocated from
333  * @handle	The handle to unmap
334  *
335  * This unmaps a previously mapped handle.  Any locks or other
336  * actions that the implementation took in zpool_map_handle()
337  * will be undone here.  The memory area returned from
338  * zpool_map_handle() should no longer be used after this.
339  */
340 void zpool_unmap_handle(struct zpool *zpool, unsigned long handle)
341 {
342 	zpool->driver->unmap(zpool->pool, handle);
343 }
344 
345 /**
346  * zpool_get_total_size() - The total size of the pool
347  * @pool	The zpool to check
348  *
349  * This returns the total size in bytes of the pool.
350  *
351  * Returns: Total size of the zpool in bytes.
352  */
353 u64 zpool_get_total_size(struct zpool *zpool)
354 {
355 	return zpool->driver->total_size(zpool->pool);
356 }
357 
358 MODULE_LICENSE("GPL");
359 MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
360 MODULE_DESCRIPTION("Common API for compressed memory storage");
361