Loading...
1/*
2 * Copyright (C) 2014 Sergey Senozhatsky.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/string.h>
12#include <linux/err.h>
13#include <linux/slab.h>
14#include <linux/wait.h>
15#include <linux/sched.h>
16
17#include "zcomp.h"
18#include "zcomp_lzo.h"
19#ifdef CONFIG_ZRAM_LZ4_COMPRESS
20#include "zcomp_lz4.h"
21#endif
22
23/*
24 * single zcomp_strm backend
25 */
26struct zcomp_strm_single {
27 struct mutex strm_lock;
28 struct zcomp_strm *zstrm;
29};
30
31/*
32 * multi zcomp_strm backend
33 */
34struct zcomp_strm_multi {
35 /* protect strm list */
36 spinlock_t strm_lock;
37 /* max possible number of zstrm streams */
38 int max_strm;
39 /* number of available zstrm streams */
40 int avail_strm;
41 /* list of available strms */
42 struct list_head idle_strm;
43 wait_queue_head_t strm_wait;
44};
45
46static struct zcomp_backend *backends[] = {
47 &zcomp_lzo,
48#ifdef CONFIG_ZRAM_LZ4_COMPRESS
49 &zcomp_lz4,
50#endif
51 NULL
52};
53
54static struct zcomp_backend *find_backend(const char *compress)
55{
56 int i = 0;
57 while (backends[i]) {
58 if (sysfs_streq(compress, backends[i]->name))
59 break;
60 i++;
61 }
62 return backends[i];
63}
64
65static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
66{
67 if (zstrm->private)
68 comp->backend->destroy(zstrm->private);
69 free_pages((unsigned long)zstrm->buffer, 1);
70 kfree(zstrm);
71}
72
73/*
74 * allocate new zcomp_strm structure with ->private initialized by
75 * backend, return NULL on error
76 */
77static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
78{
79 struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
80 if (!zstrm)
81 return NULL;
82
83 zstrm->private = comp->backend->create();
84 /*
85 * allocate 2 pages. 1 for compressed data, plus 1 extra for the
86 * case when compressed size is larger than the original one
87 */
88 zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
89 if (!zstrm->private || !zstrm->buffer) {
90 zcomp_strm_free(comp, zstrm);
91 zstrm = NULL;
92 }
93 return zstrm;
94}
95
96/*
97 * get idle zcomp_strm or wait until other process release
98 * (zcomp_strm_release()) one for us
99 */
100static struct zcomp_strm *zcomp_strm_multi_find(struct zcomp *comp)
101{
102 struct zcomp_strm_multi *zs = comp->stream;
103 struct zcomp_strm *zstrm;
104
105 while (1) {
106 spin_lock(&zs->strm_lock);
107 if (!list_empty(&zs->idle_strm)) {
108 zstrm = list_entry(zs->idle_strm.next,
109 struct zcomp_strm, list);
110 list_del(&zstrm->list);
111 spin_unlock(&zs->strm_lock);
112 return zstrm;
113 }
114 /* zstrm streams limit reached, wait for idle stream */
115 if (zs->avail_strm >= zs->max_strm) {
116 spin_unlock(&zs->strm_lock);
117 wait_event(zs->strm_wait, !list_empty(&zs->idle_strm));
118 continue;
119 }
120 /* allocate new zstrm stream */
121 zs->avail_strm++;
122 spin_unlock(&zs->strm_lock);
123
124 zstrm = zcomp_strm_alloc(comp);
125 if (!zstrm) {
126 spin_lock(&zs->strm_lock);
127 zs->avail_strm--;
128 spin_unlock(&zs->strm_lock);
129 wait_event(zs->strm_wait, !list_empty(&zs->idle_strm));
130 continue;
131 }
132 break;
133 }
134 return zstrm;
135}
136
137/* add stream back to idle list and wake up waiter or free the stream */
138static void zcomp_strm_multi_release(struct zcomp *comp, struct zcomp_strm *zstrm)
139{
140 struct zcomp_strm_multi *zs = comp->stream;
141
142 spin_lock(&zs->strm_lock);
143 if (zs->avail_strm <= zs->max_strm) {
144 list_add(&zstrm->list, &zs->idle_strm);
145 spin_unlock(&zs->strm_lock);
146 wake_up(&zs->strm_wait);
147 return;
148 }
149
150 zs->avail_strm--;
151 spin_unlock(&zs->strm_lock);
152 zcomp_strm_free(comp, zstrm);
153}
154
155/* change max_strm limit */
156static bool zcomp_strm_multi_set_max_streams(struct zcomp *comp, int num_strm)
157{
158 struct zcomp_strm_multi *zs = comp->stream;
159 struct zcomp_strm *zstrm;
160
161 spin_lock(&zs->strm_lock);
162 zs->max_strm = num_strm;
163 /*
164 * if user has lowered the limit and there are idle streams,
165 * immediately free as much streams (and memory) as we can.
166 */
167 while (zs->avail_strm > num_strm && !list_empty(&zs->idle_strm)) {
168 zstrm = list_entry(zs->idle_strm.next,
169 struct zcomp_strm, list);
170 list_del(&zstrm->list);
171 zcomp_strm_free(comp, zstrm);
172 zs->avail_strm--;
173 }
174 spin_unlock(&zs->strm_lock);
175 return true;
176}
177
178static void zcomp_strm_multi_destroy(struct zcomp *comp)
179{
180 struct zcomp_strm_multi *zs = comp->stream;
181 struct zcomp_strm *zstrm;
182
183 while (!list_empty(&zs->idle_strm)) {
184 zstrm = list_entry(zs->idle_strm.next,
185 struct zcomp_strm, list);
186 list_del(&zstrm->list);
187 zcomp_strm_free(comp, zstrm);
188 }
189 kfree(zs);
190}
191
192static int zcomp_strm_multi_create(struct zcomp *comp, int max_strm)
193{
194 struct zcomp_strm *zstrm;
195 struct zcomp_strm_multi *zs;
196
197 comp->destroy = zcomp_strm_multi_destroy;
198 comp->strm_find = zcomp_strm_multi_find;
199 comp->strm_release = zcomp_strm_multi_release;
200 comp->set_max_streams = zcomp_strm_multi_set_max_streams;
201 zs = kmalloc(sizeof(struct zcomp_strm_multi), GFP_KERNEL);
202 if (!zs)
203 return -ENOMEM;
204
205 comp->stream = zs;
206 spin_lock_init(&zs->strm_lock);
207 INIT_LIST_HEAD(&zs->idle_strm);
208 init_waitqueue_head(&zs->strm_wait);
209 zs->max_strm = max_strm;
210 zs->avail_strm = 1;
211
212 zstrm = zcomp_strm_alloc(comp);
213 if (!zstrm) {
214 kfree(zs);
215 return -ENOMEM;
216 }
217 list_add(&zstrm->list, &zs->idle_strm);
218 return 0;
219}
220
221static struct zcomp_strm *zcomp_strm_single_find(struct zcomp *comp)
222{
223 struct zcomp_strm_single *zs = comp->stream;
224 mutex_lock(&zs->strm_lock);
225 return zs->zstrm;
226}
227
228static void zcomp_strm_single_release(struct zcomp *comp,
229 struct zcomp_strm *zstrm)
230{
231 struct zcomp_strm_single *zs = comp->stream;
232 mutex_unlock(&zs->strm_lock);
233}
234
235static bool zcomp_strm_single_set_max_streams(struct zcomp *comp, int num_strm)
236{
237 /* zcomp_strm_single support only max_comp_streams == 1 */
238 return false;
239}
240
241static void zcomp_strm_single_destroy(struct zcomp *comp)
242{
243 struct zcomp_strm_single *zs = comp->stream;
244 zcomp_strm_free(comp, zs->zstrm);
245 kfree(zs);
246}
247
248static int zcomp_strm_single_create(struct zcomp *comp)
249{
250 struct zcomp_strm_single *zs;
251
252 comp->destroy = zcomp_strm_single_destroy;
253 comp->strm_find = zcomp_strm_single_find;
254 comp->strm_release = zcomp_strm_single_release;
255 comp->set_max_streams = zcomp_strm_single_set_max_streams;
256 zs = kmalloc(sizeof(struct zcomp_strm_single), GFP_KERNEL);
257 if (!zs)
258 return -ENOMEM;
259
260 comp->stream = zs;
261 mutex_init(&zs->strm_lock);
262 zs->zstrm = zcomp_strm_alloc(comp);
263 if (!zs->zstrm) {
264 kfree(zs);
265 return -ENOMEM;
266 }
267 return 0;
268}
269
270/* show available compressors */
271ssize_t zcomp_available_show(const char *comp, char *buf)
272{
273 ssize_t sz = 0;
274 int i = 0;
275
276 while (backends[i]) {
277 if (sysfs_streq(comp, backends[i]->name))
278 sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
279 "[%s] ", backends[i]->name);
280 else
281 sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
282 "%s ", backends[i]->name);
283 i++;
284 }
285 sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
286 return sz;
287}
288
289bool zcomp_set_max_streams(struct zcomp *comp, int num_strm)
290{
291 return comp->set_max_streams(comp, num_strm);
292}
293
294struct zcomp_strm *zcomp_strm_find(struct zcomp *comp)
295{
296 return comp->strm_find(comp);
297}
298
299void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm)
300{
301 comp->strm_release(comp, zstrm);
302}
303
304int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
305 const unsigned char *src, size_t *dst_len)
306{
307 return comp->backend->compress(src, zstrm->buffer, dst_len,
308 zstrm->private);
309}
310
311int zcomp_decompress(struct zcomp *comp, const unsigned char *src,
312 size_t src_len, unsigned char *dst)
313{
314 return comp->backend->decompress(src, src_len, dst);
315}
316
317void zcomp_destroy(struct zcomp *comp)
318{
319 comp->destroy(comp);
320 kfree(comp);
321}
322
323/*
324 * search available compressors for requested algorithm.
325 * allocate new zcomp and initialize it. return compressing
326 * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
327 * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
328 * case of allocation error.
329 */
330struct zcomp *zcomp_create(const char *compress, int max_strm)
331{
332 struct zcomp *comp;
333 struct zcomp_backend *backend;
334
335 backend = find_backend(compress);
336 if (!backend)
337 return ERR_PTR(-EINVAL);
338
339 comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
340 if (!comp)
341 return ERR_PTR(-ENOMEM);
342
343 comp->backend = backend;
344 if (max_strm > 1)
345 zcomp_strm_multi_create(comp, max_strm);
346 else
347 zcomp_strm_single_create(comp);
348 if (!comp->stream) {
349 kfree(comp);
350 return ERR_PTR(-ENOMEM);
351 }
352 return comp;
353}
1// SPDX-License-Identifier: GPL-2.0-or-later
2
3#include <linux/kernel.h>
4#include <linux/string.h>
5#include <linux/err.h>
6#include <linux/slab.h>
7#include <linux/wait.h>
8#include <linux/sched.h>
9#include <linux/cpu.h>
10#include <linux/crypto.h>
11#include <linux/vmalloc.h>
12
13#include "zcomp.h"
14
15#include "backend_lzo.h"
16#include "backend_lzorle.h"
17#include "backend_lz4.h"
18#include "backend_lz4hc.h"
19#include "backend_zstd.h"
20#include "backend_deflate.h"
21#include "backend_842.h"
22
23static const struct zcomp_ops *backends[] = {
24#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZO)
25 &backend_lzorle,
26 &backend_lzo,
27#endif
28#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4)
29 &backend_lz4,
30#endif
31#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4HC)
32 &backend_lz4hc,
33#endif
34#if IS_ENABLED(CONFIG_ZRAM_BACKEND_ZSTD)
35 &backend_zstd,
36#endif
37#if IS_ENABLED(CONFIG_ZRAM_BACKEND_DEFLATE)
38 &backend_deflate,
39#endif
40#if IS_ENABLED(CONFIG_ZRAM_BACKEND_842)
41 &backend_842,
42#endif
43 NULL
44};
45
46static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
47{
48 comp->ops->destroy_ctx(&zstrm->ctx);
49 vfree(zstrm->buffer);
50 zstrm->buffer = NULL;
51}
52
53static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm)
54{
55 int ret;
56
57 ret = comp->ops->create_ctx(comp->params, &zstrm->ctx);
58 if (ret)
59 return ret;
60
61 /*
62 * allocate 2 pages. 1 for compressed data, plus 1 extra for the
63 * case when compressed size is larger than the original one
64 */
65 zstrm->buffer = vzalloc(2 * PAGE_SIZE);
66 if (!zstrm->buffer) {
67 zcomp_strm_free(comp, zstrm);
68 return -ENOMEM;
69 }
70 return 0;
71}
72
73static const struct zcomp_ops *lookup_backend_ops(const char *comp)
74{
75 int i = 0;
76
77 while (backends[i]) {
78 if (sysfs_streq(comp, backends[i]->name))
79 break;
80 i++;
81 }
82 return backends[i];
83}
84
85bool zcomp_available_algorithm(const char *comp)
86{
87 return lookup_backend_ops(comp) != NULL;
88}
89
90/* show available compressors */
91ssize_t zcomp_available_show(const char *comp, char *buf)
92{
93 ssize_t sz = 0;
94 int i;
95
96 for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) {
97 if (!strcmp(comp, backends[i]->name)) {
98 sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
99 "[%s] ", backends[i]->name);
100 } else {
101 sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
102 "%s ", backends[i]->name);
103 }
104 }
105
106 sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
107 return sz;
108}
109
110struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
111{
112 local_lock(&comp->stream->lock);
113 return this_cpu_ptr(comp->stream);
114}
115
116void zcomp_stream_put(struct zcomp *comp)
117{
118 local_unlock(&comp->stream->lock);
119}
120
121int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
122 const void *src, unsigned int *dst_len)
123{
124 struct zcomp_req req = {
125 .src = src,
126 .dst = zstrm->buffer,
127 .src_len = PAGE_SIZE,
128 .dst_len = 2 * PAGE_SIZE,
129 };
130 int ret;
131
132 ret = comp->ops->compress(comp->params, &zstrm->ctx, &req);
133 if (!ret)
134 *dst_len = req.dst_len;
135 return ret;
136}
137
138int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
139 const void *src, unsigned int src_len, void *dst)
140{
141 struct zcomp_req req = {
142 .src = src,
143 .dst = dst,
144 .src_len = src_len,
145 .dst_len = PAGE_SIZE,
146 };
147
148 return comp->ops->decompress(comp->params, &zstrm->ctx, &req);
149}
150
151int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
152{
153 struct zcomp *comp = hlist_entry(node, struct zcomp, node);
154 struct zcomp_strm *zstrm;
155 int ret;
156
157 zstrm = per_cpu_ptr(comp->stream, cpu);
158 local_lock_init(&zstrm->lock);
159
160 ret = zcomp_strm_init(comp, zstrm);
161 if (ret)
162 pr_err("Can't allocate a compression stream\n");
163 return ret;
164}
165
166int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
167{
168 struct zcomp *comp = hlist_entry(node, struct zcomp, node);
169 struct zcomp_strm *zstrm;
170
171 zstrm = per_cpu_ptr(comp->stream, cpu);
172 zcomp_strm_free(comp, zstrm);
173 return 0;
174}
175
176static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
177{
178 int ret;
179
180 comp->stream = alloc_percpu(struct zcomp_strm);
181 if (!comp->stream)
182 return -ENOMEM;
183
184 comp->params = params;
185 ret = comp->ops->setup_params(comp->params);
186 if (ret)
187 goto cleanup;
188
189 ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
190 if (ret < 0)
191 goto cleanup;
192
193 return 0;
194
195cleanup:
196 comp->ops->release_params(comp->params);
197 free_percpu(comp->stream);
198 return ret;
199}
200
201void zcomp_destroy(struct zcomp *comp)
202{
203 cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
204 comp->ops->release_params(comp->params);
205 free_percpu(comp->stream);
206 kfree(comp);
207}
208
209struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params)
210{
211 struct zcomp *comp;
212 int error;
213
214 /*
215 * The backends array has a sentinel NULL value, so the minimum
216 * size is 1. In order to be valid the array, apart from the
217 * sentinel NULL element, should have at least one compression
218 * backend selected.
219 */
220 BUILD_BUG_ON(ARRAY_SIZE(backends) <= 1);
221
222 comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
223 if (!comp)
224 return ERR_PTR(-ENOMEM);
225
226 comp->ops = lookup_backend_ops(alg);
227 if (!comp->ops) {
228 kfree(comp);
229 return ERR_PTR(-EINVAL);
230 }
231
232 error = zcomp_init(comp, params);
233 if (error) {
234 kfree(comp);
235 return ERR_PTR(error);
236 }
237 return comp;
238}