Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Synchronous Compression operations
  4 *
  5 * Copyright 2015 LG Electronics Inc.
  6 * Copyright (c) 2016, Intel Corporation
  7 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
 
 
 
 
 
 
  8 */
  9
 10#include <crypto/internal/acompress.h>
 11#include <crypto/internal/scompress.h>
 12#include <crypto/scatterwalk.h>
 13#include <linux/cryptouser.h>
 14#include <linux/err.h>
 15#include <linux/kernel.h>
 16#include <linux/module.h>
 17#include <linux/scatterlist.h>
 18#include <linux/seq_file.h>
 19#include <linux/slab.h>
 20#include <linux/string.h>
 
 21#include <linux/vmalloc.h>
 
 
 22#include <net/netlink.h>
 23
 24#include "compress.h"
 25
 26struct scomp_scratch {
 27	spinlock_t	lock;
 28	void		*src;
 29	void		*dst;
 30};
 31
 32static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
 33	.lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
 34};
 35
 36static const struct crypto_type crypto_scomp_type;
 
 
 37static int scomp_scratch_users;
 38static DEFINE_MUTEX(scomp_lock);
 39
 40static int __maybe_unused crypto_scomp_report(
 41	struct sk_buff *skb, struct crypto_alg *alg)
 42{
 43	struct crypto_report_comp rscomp;
 44
 45	memset(&rscomp, 0, sizeof(rscomp));
 46
 47	strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
 
 
 
 48
 49	return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
 50		       sizeof(rscomp), &rscomp);
 51}
 
 
 
 
 
 
 52
 53static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
 54	__maybe_unused;
 55
 56static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
 57{
 58	seq_puts(m, "type         : scomp\n");
 59}
 60
 61static void crypto_scomp_free_scratches(void)
 
 
 
 
 
 62{
 63	struct scomp_scratch *scratch;
 64	int i;
 65
 66	for_each_possible_cpu(i) {
 67		scratch = per_cpu_ptr(&scomp_scratch, i);
 68
 69		vfree(scratch->src);
 70		vfree(scratch->dst);
 71		scratch->src = NULL;
 72		scratch->dst = NULL;
 73	}
 74}
 75
 76static int crypto_scomp_alloc_scratches(void)
 77{
 78	struct scomp_scratch *scratch;
 79	int i;
 80
 81	for_each_possible_cpu(i) {
 82		void *mem;
 
 83
 84		scratch = per_cpu_ptr(&scomp_scratch, i);
 
 85
 86		mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
 87		if (!mem)
 88			goto error;
 89		scratch->src = mem;
 90		mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
 91		if (!mem)
 92			goto error;
 93		scratch->dst = mem;
 94	}
 95	return 0;
 
 
 96error:
 97	crypto_scomp_free_scratches();
 98	return -ENOMEM;
 99}
100
101static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
102{
103	int ret = 0;
 
 
 
 
 
 
104
105	mutex_lock(&scomp_lock);
106	if (!scomp_scratch_users++)
107		ret = crypto_scomp_alloc_scratches();
108	mutex_unlock(&scomp_lock);
 
 
 
 
 
 
 
 
109
110	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111}
112
113static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
114{
115	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
116	void **tfm_ctx = acomp_tfm_ctx(tfm);
117	struct crypto_scomp *scomp = *tfm_ctx;
118	void **ctx = acomp_request_ctx(req);
119	struct scomp_scratch *scratch;
120	unsigned int dlen;
 
121	int ret;
122
123	if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
124		return -EINVAL;
 
 
125
126	if (req->dst && !req->dlen)
127		return -EINVAL;
 
 
128
129	if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
130		req->dlen = SCOMP_SCRATCH_SIZE;
131
132	dlen = req->dlen;
133
134	scratch = raw_cpu_ptr(&scomp_scratch);
135	spin_lock(&scratch->lock);
136
137	scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
138	if (dir)
139		ret = crypto_scomp_compress(scomp, scratch->src, req->slen,
140					    scratch->dst, &req->dlen, *ctx);
141	else
142		ret = crypto_scomp_decompress(scomp, scratch->src, req->slen,
143					      scratch->dst, &req->dlen, *ctx);
144	if (!ret) {
145		if (!req->dst) {
146			req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
147			if (!req->dst) {
148				ret = -ENOMEM;
 
149				goto out;
150			}
151		} else if (req->dlen > dlen) {
152			ret = -ENOSPC;
153			goto out;
154		}
155		scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
156					 1);
157	}
158out:
159	spin_unlock(&scratch->lock);
160	return ret;
161}
162
163static int scomp_acomp_compress(struct acomp_req *req)
164{
165	return scomp_acomp_comp_decomp(req, 1);
166}
167
168static int scomp_acomp_decompress(struct acomp_req *req)
169{
170	return scomp_acomp_comp_decomp(req, 0);
171}
172
173static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
174{
175	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
176
177	crypto_free_scomp(*ctx);
178
179	mutex_lock(&scomp_lock);
180	if (!--scomp_scratch_users)
181		crypto_scomp_free_scratches();
182	mutex_unlock(&scomp_lock);
183}
184
185int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
186{
187	struct crypto_alg *calg = tfm->__crt_alg;
188	struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
189	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
190	struct crypto_scomp *scomp;
191
192	if (!crypto_mod_get(calg))
193		return -EAGAIN;
194
195	scomp = crypto_create_tfm(calg, &crypto_scomp_type);
196	if (IS_ERR(scomp)) {
197		crypto_mod_put(calg);
198		return PTR_ERR(scomp);
199	}
200
201	*ctx = scomp;
202	tfm->exit = crypto_exit_scomp_ops_async;
203
204	crt->compress = scomp_acomp_compress;
205	crt->decompress = scomp_acomp_decompress;
206	crt->dst_free = sgl_free;
207	crt->reqsize = sizeof(void *);
208
209	return 0;
210}
211
212struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
213{
214	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
215	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
216	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
217	struct crypto_scomp *scomp = *tfm_ctx;
218	void *ctx;
219
220	ctx = crypto_scomp_alloc_ctx(scomp);
221	if (IS_ERR(ctx)) {
222		kfree(req);
223		return NULL;
224	}
225
226	*req->__ctx = ctx;
227
228	return req;
229}
230
231void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
232{
233	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
234	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
235	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
236	struct crypto_scomp *scomp = *tfm_ctx;
237	void *ctx = *req->__ctx;
238
239	if (ctx)
240		crypto_scomp_free_ctx(scomp, ctx);
241}
242
243static const struct crypto_type crypto_scomp_type = {
244	.extsize = crypto_alg_extsize,
245	.init_tfm = crypto_scomp_init_tfm,
246#ifdef CONFIG_PROC_FS
247	.show = crypto_scomp_show,
248#endif
249#if IS_ENABLED(CONFIG_CRYPTO_USER)
250	.report = crypto_scomp_report,
251#endif
252#ifdef CONFIG_CRYPTO_STATS
253	.report_stat = crypto_acomp_report_stat,
254#endif
255	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
256	.maskset = CRYPTO_ALG_TYPE_MASK,
257	.type = CRYPTO_ALG_TYPE_SCOMPRESS,
258	.tfmsize = offsetof(struct crypto_scomp, base),
259};
260
261int crypto_register_scomp(struct scomp_alg *alg)
262{
263	struct crypto_alg *base = &alg->calg.base;
 
264
265	comp_prepare_alg(&alg->calg);
 
 
266
267	base->cra_type = &crypto_scomp_type;
 
268	base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
269
270	return crypto_register_alg(base);
271}
272EXPORT_SYMBOL_GPL(crypto_register_scomp);
273
274void crypto_unregister_scomp(struct scomp_alg *alg)
275{
276	crypto_unregister_alg(&alg->base);
277}
278EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
279
280int crypto_register_scomps(struct scomp_alg *algs, int count)
281{
282	int i, ret;
283
284	for (i = 0; i < count; i++) {
285		ret = crypto_register_scomp(&algs[i]);
286		if (ret)
287			goto err;
288	}
289
290	return 0;
291
292err:
293	for (--i; i >= 0; --i)
294		crypto_unregister_scomp(&algs[i]);
295
 
 
 
296	return ret;
297}
298EXPORT_SYMBOL_GPL(crypto_register_scomps);
299
300void crypto_unregister_scomps(struct scomp_alg *algs, int count)
301{
302	int i;
 
 
 
 
 
303
304	for (i = count - 1; i >= 0; --i)
305		crypto_unregister_scomp(&algs[i]);
306}
307EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
308
309MODULE_LICENSE("GPL");
310MODULE_DESCRIPTION("Synchronous compression type");
v4.10.11
 
  1/*
  2 * Synchronous Compression operations
  3 *
  4 * Copyright 2015 LG Electronics Inc.
  5 * Copyright (c) 2016, Intel Corporation
  6 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
  7 *
  8 * This program is free software; you can redistribute it and/or modify it
  9 * under the terms of the GNU General Public License as published by the Free
 10 * Software Foundation; either version 2 of the License, or (at your option)
 11 * any later version.
 12 *
 13 */
 14#include <linux/errno.h>
 
 
 
 
 
 15#include <linux/kernel.h>
 16#include <linux/module.h>
 
 17#include <linux/seq_file.h>
 18#include <linux/slab.h>
 19#include <linux/string.h>
 20#include <linux/crypto.h>
 21#include <linux/vmalloc.h>
 22#include <crypto/algapi.h>
 23#include <linux/cryptouser.h>
 24#include <net/netlink.h>
 25#include <linux/scatterlist.h>
 26#include <crypto/scatterwalk.h>
 27#include <crypto/internal/acompress.h>
 28#include <crypto/internal/scompress.h>
 29#include "internal.h"
 
 
 
 
 
 
 
 30
 31static const struct crypto_type crypto_scomp_type;
 32static void * __percpu *scomp_src_scratches;
 33static void * __percpu *scomp_dst_scratches;
 34static int scomp_scratch_users;
 35static DEFINE_MUTEX(scomp_lock);
 36
 37#ifdef CONFIG_NET
 38static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
 39{
 40	struct crypto_report_comp rscomp;
 41
 42	strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
 43
 44	if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
 45		    sizeof(struct crypto_report_comp), &rscomp))
 46		goto nla_put_failure;
 47	return 0;
 48
 49nla_put_failure:
 50	return -EMSGSIZE;
 51}
 52#else
 53static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
 54{
 55	return -ENOSYS;
 56}
 57#endif
 58
 59static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
 60	__attribute__ ((unused));
 61
 62static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
 63{
 64	seq_puts(m, "type         : scomp\n");
 65}
 66
 67static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
 68{
 69	return 0;
 70}
 71
 72static void crypto_scomp_free_scratches(void * __percpu *scratches)
 73{
 
 74	int i;
 75
 76	if (!scratches)
 77		return;
 78
 79	for_each_possible_cpu(i)
 80		vfree(*per_cpu_ptr(scratches, i));
 81
 82	free_percpu(scratches);
 
 83}
 84
 85static void * __percpu *crypto_scomp_alloc_scratches(void)
 86{
 87	void * __percpu *scratches;
 88	int i;
 89
 90	scratches = alloc_percpu(void *);
 91	if (!scratches)
 92		return NULL;
 93
 94	for_each_possible_cpu(i) {
 95		void *scratch;
 96
 97		scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
 98		if (!scratch)
 99			goto error;
100		*per_cpu_ptr(scratches, i) = scratch;
 
 
 
 
101	}
102
103	return scratches;
104
105error:
106	crypto_scomp_free_scratches(scratches);
107	return NULL;
108}
109
110static void crypto_scomp_free_all_scratches(void)
111{
112	if (!--scomp_scratch_users) {
113		crypto_scomp_free_scratches(scomp_src_scratches);
114		crypto_scomp_free_scratches(scomp_dst_scratches);
115		scomp_src_scratches = NULL;
116		scomp_dst_scratches = NULL;
117	}
118}
119
120static int crypto_scomp_alloc_all_scratches(void)
121{
122	if (!scomp_scratch_users++) {
123		scomp_src_scratches = crypto_scomp_alloc_scratches();
124		if (!scomp_src_scratches)
125			return -ENOMEM;
126		scomp_dst_scratches = crypto_scomp_alloc_scratches();
127		if (!scomp_dst_scratches)
128			return -ENOMEM;
129	}
130	return 0;
131}
132
133static void crypto_scomp_sg_free(struct scatterlist *sgl)
134{
135	int i, n;
136	struct page *page;
137
138	if (!sgl)
139		return;
140
141	n = sg_nents(sgl);
142	for_each_sg(sgl, sgl, n, i) {
143		page = sg_page(sgl);
144		if (page)
145			__free_page(page);
146	}
147
148	kfree(sgl);
149}
150
151static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
152{
153	struct scatterlist *sgl;
154	struct page *page;
155	int i, n;
156
157	n = ((size - 1) >> PAGE_SHIFT) + 1;
158
159	sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
160	if (!sgl)
161		return NULL;
162
163	sg_init_table(sgl, n);
164
165	for (i = 0; i < n; i++) {
166		page = alloc_page(gfp);
167		if (!page)
168			goto err;
169		sg_set_page(sgl + i, page, PAGE_SIZE, 0);
170	}
171
172	return sgl;
173
174err:
175	sg_mark_end(sgl + i);
176	crypto_scomp_sg_free(sgl);
177	return NULL;
178}
179
180static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
181{
182	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
183	void **tfm_ctx = acomp_tfm_ctx(tfm);
184	struct crypto_scomp *scomp = *tfm_ctx;
185	void **ctx = acomp_request_ctx(req);
186	const int cpu = get_cpu();
187	u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
188	u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
189	int ret;
190
191	if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
192		ret = -EINVAL;
193		goto out;
194	}
195
196	if (req->dst && !req->dlen) {
197		ret = -EINVAL;
198		goto out;
199	}
200
201	if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
202		req->dlen = SCOMP_SCRATCH_SIZE;
203
204	scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
 
 
 
 
 
205	if (dir)
206		ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
207					    scratch_dst, &req->dlen, *ctx);
208	else
209		ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
210					      scratch_dst, &req->dlen, *ctx);
211	if (!ret) {
212		if (!req->dst) {
213			req->dst = crypto_scomp_sg_alloc(req->dlen,
214				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
215				   GFP_KERNEL : GFP_ATOMIC);
216			if (!req->dst)
217				goto out;
 
 
 
 
218		}
219		scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
220					 1);
221	}
222out:
223	put_cpu();
224	return ret;
225}
226
227static int scomp_acomp_compress(struct acomp_req *req)
228{
229	return scomp_acomp_comp_decomp(req, 1);
230}
231
232static int scomp_acomp_decompress(struct acomp_req *req)
233{
234	return scomp_acomp_comp_decomp(req, 0);
235}
236
237static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
238{
239	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
240
241	crypto_free_scomp(*ctx);
 
 
 
 
 
242}
243
244int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
245{
246	struct crypto_alg *calg = tfm->__crt_alg;
247	struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
248	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
249	struct crypto_scomp *scomp;
250
251	if (!crypto_mod_get(calg))
252		return -EAGAIN;
253
254	scomp = crypto_create_tfm(calg, &crypto_scomp_type);
255	if (IS_ERR(scomp)) {
256		crypto_mod_put(calg);
257		return PTR_ERR(scomp);
258	}
259
260	*ctx = scomp;
261	tfm->exit = crypto_exit_scomp_ops_async;
262
263	crt->compress = scomp_acomp_compress;
264	crt->decompress = scomp_acomp_decompress;
265	crt->dst_free = crypto_scomp_sg_free;
266	crt->reqsize = sizeof(void *);
267
268	return 0;
269}
270
271struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
272{
273	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
274	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
275	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
276	struct crypto_scomp *scomp = *tfm_ctx;
277	void *ctx;
278
279	ctx = crypto_scomp_alloc_ctx(scomp);
280	if (IS_ERR(ctx)) {
281		kfree(req);
282		return NULL;
283	}
284
285	*req->__ctx = ctx;
286
287	return req;
288}
289
290void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
291{
292	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
293	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
294	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
295	struct crypto_scomp *scomp = *tfm_ctx;
296	void *ctx = *req->__ctx;
297
298	if (ctx)
299		crypto_scomp_free_ctx(scomp, ctx);
300}
301
302static const struct crypto_type crypto_scomp_type = {
303	.extsize = crypto_alg_extsize,
304	.init_tfm = crypto_scomp_init_tfm,
305#ifdef CONFIG_PROC_FS
306	.show = crypto_scomp_show,
307#endif
 
308	.report = crypto_scomp_report,
 
 
 
 
309	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
310	.maskset = CRYPTO_ALG_TYPE_MASK,
311	.type = CRYPTO_ALG_TYPE_SCOMPRESS,
312	.tfmsize = offsetof(struct crypto_scomp, base),
313};
314
315int crypto_register_scomp(struct scomp_alg *alg)
316{
317	struct crypto_alg *base = &alg->base;
318	int ret = -ENOMEM;
319
320	mutex_lock(&scomp_lock);
321	if (crypto_scomp_alloc_all_scratches())
322		goto error;
323
324	base->cra_type = &crypto_scomp_type;
325	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
326	base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
327
328	ret = crypto_register_alg(base);
329	if (ret)
330		goto error;
 
 
 
 
 
 
 
 
 
 
331
332	mutex_unlock(&scomp_lock);
333	return ret;
 
 
 
 
 
 
 
 
 
334
335error:
336	crypto_scomp_free_all_scratches();
337	mutex_unlock(&scomp_lock);
338	return ret;
339}
340EXPORT_SYMBOL_GPL(crypto_register_scomp);
341
342int crypto_unregister_scomp(struct scomp_alg *alg)
343{
344	int ret;
345
346	mutex_lock(&scomp_lock);
347	ret = crypto_unregister_alg(&alg->base);
348	crypto_scomp_free_all_scratches();
349	mutex_unlock(&scomp_lock);
350
351	return ret;
 
352}
353EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
354
355MODULE_LICENSE("GPL");
356MODULE_DESCRIPTION("Synchronous compression type");