Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * zpool memory storage api
4 *
5 * Copyright (C) 2014 Dan Streetman
6 *
7 * This is a common frontend for memory storage pool implementations.
8 * Typically, this is used to store compressed memory.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/list.h>
14#include <linux/types.h>
15#include <linux/mm.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/module.h>
19#include <linux/zpool.h>
20
21struct zpool {
22 struct zpool_driver *driver;
23 void *pool;
24 const struct zpool_ops *ops;
25 bool evictable;
26 bool can_sleep_mapped;
27
28 struct list_head list;
29};
30
31static LIST_HEAD(drivers_head);
32static DEFINE_SPINLOCK(drivers_lock);
33
34static LIST_HEAD(pools_head);
35static DEFINE_SPINLOCK(pools_lock);
36
37/**
38 * zpool_register_driver() - register a zpool implementation.
39 * @driver: driver to register
40 */
41void zpool_register_driver(struct zpool_driver *driver)
42{
43 spin_lock(&drivers_lock);
44 atomic_set(&driver->refcount, 0);
45 list_add(&driver->list, &drivers_head);
46 spin_unlock(&drivers_lock);
47}
48EXPORT_SYMBOL(zpool_register_driver);
49
50/**
51 * zpool_unregister_driver() - unregister a zpool implementation.
52 * @driver: driver to unregister.
53 *
54 * Module usage counting is used to prevent using a driver
55 * while/after unloading, so if this is called from module
56 * exit function, this should never fail; if called from
57 * other than the module exit function, and this returns
58 * failure, the driver is in use and must remain available.
59 */
60int zpool_unregister_driver(struct zpool_driver *driver)
61{
62 int ret = 0, refcount;
63
64 spin_lock(&drivers_lock);
65 refcount = atomic_read(&driver->refcount);
66 WARN_ON(refcount < 0);
67 if (refcount > 0)
68 ret = -EBUSY;
69 else
70 list_del(&driver->list);
71 spin_unlock(&drivers_lock);
72
73 return ret;
74}
75EXPORT_SYMBOL(zpool_unregister_driver);
76
77/* this assumes @type is null-terminated. */
78static struct zpool_driver *zpool_get_driver(const char *type)
79{
80 struct zpool_driver *driver;
81
82 spin_lock(&drivers_lock);
83 list_for_each_entry(driver, &drivers_head, list) {
84 if (!strcmp(driver->type, type)) {
85 bool got = try_module_get(driver->owner);
86
87 if (got)
88 atomic_inc(&driver->refcount);
89 spin_unlock(&drivers_lock);
90 return got ? driver : NULL;
91 }
92 }
93
94 spin_unlock(&drivers_lock);
95 return NULL;
96}
97
98static void zpool_put_driver(struct zpool_driver *driver)
99{
100 atomic_dec(&driver->refcount);
101 module_put(driver->owner);
102}
103
104/**
105 * zpool_has_pool() - Check if the pool driver is available
106 * @type: The type of the zpool to check (e.g. zbud, zsmalloc)
107 *
108 * This checks if the @type pool driver is available. This will try to load
109 * the requested module, if needed, but there is no guarantee the module will
110 * still be loaded and available immediately after calling. If this returns
111 * true, the caller should assume the pool is available, but must be prepared
112 * to handle the @zpool_create_pool() returning failure. However if this
113 * returns false, the caller should assume the requested pool type is not
114 * available; either the requested pool type module does not exist, or could
115 * not be loaded, and calling @zpool_create_pool() with the pool type will
116 * fail.
117 *
118 * The @type string must be null-terminated.
119 *
120 * Returns: true if @type pool is available, false if not
121 */
122bool zpool_has_pool(char *type)
123{
124 struct zpool_driver *driver = zpool_get_driver(type);
125
126 if (!driver) {
127 request_module("zpool-%s", type);
128 driver = zpool_get_driver(type);
129 }
130
131 if (!driver)
132 return false;
133
134 zpool_put_driver(driver);
135 return true;
136}
137EXPORT_SYMBOL(zpool_has_pool);
138
139/**
140 * zpool_create_pool() - Create a new zpool
141 * @type: The type of the zpool to create (e.g. zbud, zsmalloc)
142 * @name: The name of the zpool (e.g. zram0, zswap)
143 * @gfp: The GFP flags to use when allocating the pool.
144 * @ops: The optional ops callback.
145 *
146 * This creates a new zpool of the specified type. The gfp flags will be
147 * used when allocating memory, if the implementation supports it. If the
148 * ops param is NULL, then the created zpool will not be evictable.
149 *
150 * Implementations must guarantee this to be thread-safe.
151 *
152 * The @type and @name strings must be null-terminated.
153 *
154 * Returns: New zpool on success, NULL on failure.
155 */
156struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp,
157 const struct zpool_ops *ops)
158{
159 struct zpool_driver *driver;
160 struct zpool *zpool;
161
162 pr_debug("creating pool type %s\n", type);
163
164 driver = zpool_get_driver(type);
165
166 if (!driver) {
167 request_module("zpool-%s", type);
168 driver = zpool_get_driver(type);
169 }
170
171 if (!driver) {
172 pr_err("no driver for type %s\n", type);
173 return NULL;
174 }
175
176 zpool = kmalloc(sizeof(*zpool), gfp);
177 if (!zpool) {
178 pr_err("couldn't create zpool - out of memory\n");
179 zpool_put_driver(driver);
180 return NULL;
181 }
182
183 zpool->driver = driver;
184 zpool->pool = driver->create(name, gfp, ops, zpool);
185 zpool->ops = ops;
186 zpool->evictable = driver->shrink && ops && ops->evict;
187 zpool->can_sleep_mapped = driver->sleep_mapped;
188
189 if (!zpool->pool) {
190 pr_err("couldn't create %s pool\n", type);
191 zpool_put_driver(driver);
192 kfree(zpool);
193 return NULL;
194 }
195
196 pr_debug("created pool type %s\n", type);
197
198 spin_lock(&pools_lock);
199 list_add(&zpool->list, &pools_head);
200 spin_unlock(&pools_lock);
201
202 return zpool;
203}
204
205/**
206 * zpool_destroy_pool() - Destroy a zpool
207 * @zpool: The zpool to destroy.
208 *
209 * Implementations must guarantee this to be thread-safe,
210 * however only when destroying different pools. The same
211 * pool should only be destroyed once, and should not be used
212 * after it is destroyed.
213 *
214 * This destroys an existing zpool. The zpool should not be in use.
215 */
216void zpool_destroy_pool(struct zpool *zpool)
217{
218 pr_debug("destroying pool type %s\n", zpool->driver->type);
219
220 spin_lock(&pools_lock);
221 list_del(&zpool->list);
222 spin_unlock(&pools_lock);
223 zpool->driver->destroy(zpool->pool);
224 zpool_put_driver(zpool->driver);
225 kfree(zpool);
226}
227
228/**
229 * zpool_get_type() - Get the type of the zpool
230 * @zpool: The zpool to check
231 *
232 * This returns the type of the pool.
233 *
234 * Implementations must guarantee this to be thread-safe.
235 *
236 * Returns: The type of zpool.
237 */
238const char *zpool_get_type(struct zpool *zpool)
239{
240 return zpool->driver->type;
241}
242
243/**
244 * zpool_malloc_support_movable() - Check if the zpool supports
245 * allocating movable memory
246 * @zpool: The zpool to check
247 *
248 * This returns if the zpool supports allocating movable memory.
249 *
250 * Implementations must guarantee this to be thread-safe.
251 *
252 * Returns: true if the zpool supports allocating movable memory, false if not
253 */
254bool zpool_malloc_support_movable(struct zpool *zpool)
255{
256 return zpool->driver->malloc_support_movable;
257}
258
259/**
260 * zpool_malloc() - Allocate memory
261 * @zpool: The zpool to allocate from.
262 * @size: The amount of memory to allocate.
263 * @gfp: The GFP flags to use when allocating memory.
264 * @handle: Pointer to the handle to set
265 *
266 * This allocates the requested amount of memory from the pool.
267 * The gfp flags will be used when allocating memory, if the
268 * implementation supports it. The provided @handle will be
269 * set to the allocated object handle.
270 *
271 * Implementations must guarantee this to be thread-safe.
272 *
273 * Returns: 0 on success, negative value on error.
274 */
275int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp,
276 unsigned long *handle)
277{
278 return zpool->driver->malloc(zpool->pool, size, gfp, handle);
279}
280
281/**
282 * zpool_free() - Free previously allocated memory
283 * @zpool: The zpool that allocated the memory.
284 * @handle: The handle to the memory to free.
285 *
286 * This frees previously allocated memory. This does not guarantee
287 * that the pool will actually free memory, only that the memory
288 * in the pool will become available for use by the pool.
289 *
290 * Implementations must guarantee this to be thread-safe,
291 * however only when freeing different handles. The same
292 * handle should only be freed once, and should not be used
293 * after freeing.
294 */
295void zpool_free(struct zpool *zpool, unsigned long handle)
296{
297 zpool->driver->free(zpool->pool, handle);
298}
299
300/**
301 * zpool_shrink() - Shrink the pool size
302 * @zpool: The zpool to shrink.
303 * @pages: The number of pages to shrink the pool.
304 * @reclaimed: The number of pages successfully evicted.
305 *
306 * This attempts to shrink the actual memory size of the pool
307 * by evicting currently used handle(s). If the pool was
308 * created with no zpool_ops, or the evict call fails for any
309 * of the handles, this will fail. If non-NULL, the @reclaimed
310 * parameter will be set to the number of pages reclaimed,
311 * which may be more than the number of pages requested.
312 *
313 * Implementations must guarantee this to be thread-safe.
314 *
315 * Returns: 0 on success, negative value on error/failure.
316 */
317int zpool_shrink(struct zpool *zpool, unsigned int pages,
318 unsigned int *reclaimed)
319{
320 return zpool->driver->shrink ?
321 zpool->driver->shrink(zpool->pool, pages, reclaimed) : -EINVAL;
322}
323
324/**
325 * zpool_map_handle() - Map a previously allocated handle into memory
326 * @zpool: The zpool that the handle was allocated from
327 * @handle: The handle to map
328 * @mapmode: How the memory should be mapped
329 *
330 * This maps a previously allocated handle into memory. The @mapmode
331 * param indicates to the implementation how the memory will be
332 * used, i.e. read-only, write-only, read-write. If the
333 * implementation does not support it, the memory will be treated
334 * as read-write.
335 *
336 * This may hold locks, disable interrupts, and/or preemption,
337 * and the zpool_unmap_handle() must be called to undo those
338 * actions. The code that uses the mapped handle should complete
339 * its operations on the mapped handle memory quickly and unmap
340 * as soon as possible. As the implementation may use per-cpu
341 * data, multiple handles should not be mapped concurrently on
342 * any cpu.
343 *
344 * Returns: A pointer to the handle's mapped memory area.
345 */
346void *zpool_map_handle(struct zpool *zpool, unsigned long handle,
347 enum zpool_mapmode mapmode)
348{
349 return zpool->driver->map(zpool->pool, handle, mapmode);
350}
351
352/**
353 * zpool_unmap_handle() - Unmap a previously mapped handle
354 * @zpool: The zpool that the handle was allocated from
355 * @handle: The handle to unmap
356 *
357 * This unmaps a previously mapped handle. Any locks or other
358 * actions that the implementation took in zpool_map_handle()
359 * will be undone here. The memory area returned from
360 * zpool_map_handle() should no longer be used after this.
361 */
362void zpool_unmap_handle(struct zpool *zpool, unsigned long handle)
363{
364 zpool->driver->unmap(zpool->pool, handle);
365}
366
367/**
368 * zpool_get_total_size() - The total size of the pool
369 * @zpool: The zpool to check
370 *
371 * This returns the total size in bytes of the pool.
372 *
373 * Returns: Total size of the zpool in bytes.
374 */
375u64 zpool_get_total_size(struct zpool *zpool)
376{
377 return zpool->driver->total_size(zpool->pool);
378}
379
380/**
381 * zpool_evictable() - Test if zpool is potentially evictable
382 * @zpool: The zpool to test
383 *
384 * Zpool is only potentially evictable when it's created with struct
385 * zpool_ops.evict and its driver implements struct zpool_driver.shrink.
386 *
387 * However, it doesn't necessarily mean driver will use zpool_ops.evict
388 * in its implementation of zpool_driver.shrink. It could do internal
389 * defragmentation instead.
390 *
391 * Returns: true if potentially evictable; false otherwise.
392 */
393bool zpool_evictable(struct zpool *zpool)
394{
395 return zpool->evictable;
396}
397
398/**
399 * zpool_can_sleep_mapped - Test if zpool can sleep when do mapped.
400 * @zpool: The zpool to test
401 *
402 * Returns: true if zpool can sleep; false otherwise.
403 */
404bool zpool_can_sleep_mapped(struct zpool *zpool)
405{
406 return zpool->can_sleep_mapped;
407}
408
409MODULE_LICENSE("GPL");
410MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
411MODULE_DESCRIPTION("Common API for compressed memory storage");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * zpool memory storage api
4 *
5 * Copyright (C) 2014 Dan Streetman
6 *
7 * This is a common frontend for memory storage pool implementations.
8 * Typically, this is used to store compressed memory.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/list.h>
14#include <linux/types.h>
15#include <linux/mm.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/module.h>
19#include <linux/zpool.h>
20
21struct zpool {
22 struct zpool_driver *driver;
23 void *pool;
24};
25
26static LIST_HEAD(drivers_head);
27static DEFINE_SPINLOCK(drivers_lock);
28
29/**
30 * zpool_register_driver() - register a zpool implementation.
31 * @driver: driver to register
32 */
33void zpool_register_driver(struct zpool_driver *driver)
34{
35 spin_lock(&drivers_lock);
36 atomic_set(&driver->refcount, 0);
37 list_add(&driver->list, &drivers_head);
38 spin_unlock(&drivers_lock);
39}
40EXPORT_SYMBOL(zpool_register_driver);
41
42/**
43 * zpool_unregister_driver() - unregister a zpool implementation.
44 * @driver: driver to unregister.
45 *
46 * Module usage counting is used to prevent using a driver
47 * while/after unloading, so if this is called from module
48 * exit function, this should never fail; if called from
49 * other than the module exit function, and this returns
50 * failure, the driver is in use and must remain available.
51 */
52int zpool_unregister_driver(struct zpool_driver *driver)
53{
54 int ret = 0, refcount;
55
56 spin_lock(&drivers_lock);
57 refcount = atomic_read(&driver->refcount);
58 WARN_ON(refcount < 0);
59 if (refcount > 0)
60 ret = -EBUSY;
61 else
62 list_del(&driver->list);
63 spin_unlock(&drivers_lock);
64
65 return ret;
66}
67EXPORT_SYMBOL(zpool_unregister_driver);
68
69/* this assumes @type is null-terminated. */
70static struct zpool_driver *zpool_get_driver(const char *type)
71{
72 struct zpool_driver *driver;
73
74 spin_lock(&drivers_lock);
75 list_for_each_entry(driver, &drivers_head, list) {
76 if (!strcmp(driver->type, type)) {
77 bool got = try_module_get(driver->owner);
78
79 if (got)
80 atomic_inc(&driver->refcount);
81 spin_unlock(&drivers_lock);
82 return got ? driver : NULL;
83 }
84 }
85
86 spin_unlock(&drivers_lock);
87 return NULL;
88}
89
90static void zpool_put_driver(struct zpool_driver *driver)
91{
92 atomic_dec(&driver->refcount);
93 module_put(driver->owner);
94}
95
96/**
97 * zpool_has_pool() - Check if the pool driver is available
98 * @type: The type of the zpool to check (e.g. zbud, zsmalloc)
99 *
100 * This checks if the @type pool driver is available. This will try to load
101 * the requested module, if needed, but there is no guarantee the module will
102 * still be loaded and available immediately after calling. If this returns
103 * true, the caller should assume the pool is available, but must be prepared
104 * to handle the @zpool_create_pool() returning failure. However if this
105 * returns false, the caller should assume the requested pool type is not
106 * available; either the requested pool type module does not exist, or could
107 * not be loaded, and calling @zpool_create_pool() with the pool type will
108 * fail.
109 *
110 * The @type string must be null-terminated.
111 *
112 * Returns: true if @type pool is available, false if not
113 */
114bool zpool_has_pool(char *type)
115{
116 struct zpool_driver *driver = zpool_get_driver(type);
117
118 if (!driver) {
119 request_module("zpool-%s", type);
120 driver = zpool_get_driver(type);
121 }
122
123 if (!driver)
124 return false;
125
126 zpool_put_driver(driver);
127 return true;
128}
129EXPORT_SYMBOL(zpool_has_pool);
130
131/**
132 * zpool_create_pool() - Create a new zpool
133 * @type: The type of the zpool to create (e.g. zbud, zsmalloc)
134 * @name: The name of the zpool (e.g. zram0, zswap)
135 * @gfp: The GFP flags to use when allocating the pool.
136 *
137 * This creates a new zpool of the specified type. The gfp flags will be
138 * used when allocating memory, if the implementation supports it. If the
139 * ops param is NULL, then the created zpool will not be evictable.
140 *
141 * Implementations must guarantee this to be thread-safe.
142 *
143 * The @type and @name strings must be null-terminated.
144 *
145 * Returns: New zpool on success, NULL on failure.
146 */
147struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp)
148{
149 struct zpool_driver *driver;
150 struct zpool *zpool;
151
152 pr_debug("creating pool type %s\n", type);
153
154 driver = zpool_get_driver(type);
155
156 if (!driver) {
157 request_module("zpool-%s", type);
158 driver = zpool_get_driver(type);
159 }
160
161 if (!driver) {
162 pr_err("no driver for type %s\n", type);
163 return NULL;
164 }
165
166 zpool = kmalloc(sizeof(*zpool), gfp);
167 if (!zpool) {
168 pr_err("couldn't create zpool - out of memory\n");
169 zpool_put_driver(driver);
170 return NULL;
171 }
172
173 zpool->driver = driver;
174 zpool->pool = driver->create(name, gfp);
175
176 if (!zpool->pool) {
177 pr_err("couldn't create %s pool\n", type);
178 zpool_put_driver(driver);
179 kfree(zpool);
180 return NULL;
181 }
182
183 pr_debug("created pool type %s\n", type);
184
185 return zpool;
186}
187
188/**
189 * zpool_destroy_pool() - Destroy a zpool
190 * @zpool: The zpool to destroy.
191 *
192 * Implementations must guarantee this to be thread-safe,
193 * however only when destroying different pools. The same
194 * pool should only be destroyed once, and should not be used
195 * after it is destroyed.
196 *
197 * This destroys an existing zpool. The zpool should not be in use.
198 */
199void zpool_destroy_pool(struct zpool *zpool)
200{
201 pr_debug("destroying pool type %s\n", zpool->driver->type);
202
203 zpool->driver->destroy(zpool->pool);
204 zpool_put_driver(zpool->driver);
205 kfree(zpool);
206}
207
208/**
209 * zpool_get_type() - Get the type of the zpool
210 * @zpool: The zpool to check
211 *
212 * This returns the type of the pool.
213 *
214 * Implementations must guarantee this to be thread-safe.
215 *
216 * Returns: The type of zpool.
217 */
218const char *zpool_get_type(struct zpool *zpool)
219{
220 return zpool->driver->type;
221}
222
223/**
224 * zpool_malloc_support_movable() - Check if the zpool supports
225 * allocating movable memory
226 * @zpool: The zpool to check
227 *
228 * This returns if the zpool supports allocating movable memory.
229 *
230 * Implementations must guarantee this to be thread-safe.
231 *
232 * Returns: true if the zpool supports allocating movable memory, false if not
233 */
234bool zpool_malloc_support_movable(struct zpool *zpool)
235{
236 return zpool->driver->malloc_support_movable;
237}
238
239/**
240 * zpool_malloc() - Allocate memory
241 * @zpool: The zpool to allocate from.
242 * @size: The amount of memory to allocate.
243 * @gfp: The GFP flags to use when allocating memory.
244 * @handle: Pointer to the handle to set
245 *
246 * This allocates the requested amount of memory from the pool.
247 * The gfp flags will be used when allocating memory, if the
248 * implementation supports it. The provided @handle will be
249 * set to the allocated object handle.
250 *
251 * Implementations must guarantee this to be thread-safe.
252 *
253 * Returns: 0 on success, negative value on error.
254 */
255int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp,
256 unsigned long *handle)
257{
258 return zpool->driver->malloc(zpool->pool, size, gfp, handle);
259}
260
261/**
262 * zpool_free() - Free previously allocated memory
263 * @zpool: The zpool that allocated the memory.
264 * @handle: The handle to the memory to free.
265 *
266 * This frees previously allocated memory. This does not guarantee
267 * that the pool will actually free memory, only that the memory
268 * in the pool will become available for use by the pool.
269 *
270 * Implementations must guarantee this to be thread-safe,
271 * however only when freeing different handles. The same
272 * handle should only be freed once, and should not be used
273 * after freeing.
274 */
275void zpool_free(struct zpool *zpool, unsigned long handle)
276{
277 zpool->driver->free(zpool->pool, handle);
278}
279
280/**
281 * zpool_map_handle() - Map a previously allocated handle into memory
282 * @zpool: The zpool that the handle was allocated from
283 * @handle: The handle to map
284 * @mapmode: How the memory should be mapped
285 *
286 * This maps a previously allocated handle into memory. The @mapmode
287 * param indicates to the implementation how the memory will be
288 * used, i.e. read-only, write-only, read-write. If the
289 * implementation does not support it, the memory will be treated
290 * as read-write.
291 *
292 * This may hold locks, disable interrupts, and/or preemption,
293 * and the zpool_unmap_handle() must be called to undo those
294 * actions. The code that uses the mapped handle should complete
295 * its operations on the mapped handle memory quickly and unmap
296 * as soon as possible. As the implementation may use per-cpu
297 * data, multiple handles should not be mapped concurrently on
298 * any cpu.
299 *
300 * Returns: A pointer to the handle's mapped memory area.
301 */
302void *zpool_map_handle(struct zpool *zpool, unsigned long handle,
303 enum zpool_mapmode mapmode)
304{
305 return zpool->driver->map(zpool->pool, handle, mapmode);
306}
307
308/**
309 * zpool_unmap_handle() - Unmap a previously mapped handle
310 * @zpool: The zpool that the handle was allocated from
311 * @handle: The handle to unmap
312 *
313 * This unmaps a previously mapped handle. Any locks or other
314 * actions that the implementation took in zpool_map_handle()
315 * will be undone here. The memory area returned from
316 * zpool_map_handle() should no longer be used after this.
317 */
318void zpool_unmap_handle(struct zpool *zpool, unsigned long handle)
319{
320 zpool->driver->unmap(zpool->pool, handle);
321}
322
323/**
324 * zpool_get_total_size() - The total size of the pool
325 * @zpool: The zpool to check
326 *
327 * This returns the total size in bytes of the pool.
328 *
329 * Returns: Total size of the zpool in bytes.
330 */
331u64 zpool_get_total_size(struct zpool *zpool)
332{
333 return zpool->driver->total_size(zpool->pool);
334}
335
336/**
337 * zpool_can_sleep_mapped - Test if zpool can sleep when do mapped.
338 * @zpool: The zpool to test
339 *
340 * Some allocators enter non-preemptible context in ->map() callback (e.g.
341 * disable pagefaults) and exit that context in ->unmap(), which limits what
342 * we can do with the mapped object. For instance, we cannot wait for
343 * asynchronous crypto API to decompress such an object or take mutexes
344 * since those will call into the scheduler. This function tells us whether
345 * we use such an allocator.
346 *
347 * Returns: true if zpool can sleep; false otherwise.
348 */
349bool zpool_can_sleep_mapped(struct zpool *zpool)
350{
351 return zpool->driver->sleep_mapped;
352}
353
354MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
355MODULE_DESCRIPTION("Common API for compressed memory storage");