Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Synchronous Compression operations
  3 *
  4 * Copyright 2015 LG Electronics Inc.
  5 * Copyright (c) 2016, Intel Corporation
  6 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
  7 *
  8 * This program is free software; you can redistribute it and/or modify it
  9 * under the terms of the GNU General Public License as published by the Free
 10 * Software Foundation; either version 2 of the License, or (at your option)
 11 * any later version.
 12 *
 13 */
 14#include <linux/errno.h>
 
 
 
 
 
 15#include <linux/kernel.h>
 16#include <linux/module.h>
 
 17#include <linux/seq_file.h>
 18#include <linux/slab.h>
 19#include <linux/string.h>
 20#include <linux/crypto.h>
 21#include <linux/compiler.h>
 22#include <linux/vmalloc.h>
 23#include <crypto/algapi.h>
 24#include <linux/cryptouser.h>
 25#include <net/netlink.h>
 26#include <linux/scatterlist.h>
 27#include <crypto/scatterwalk.h>
 28#include <crypto/internal/acompress.h>
 29#include <crypto/internal/scompress.h>
 30#include "internal.h"
 
 
 
 
 
 
 
 31
 32static const struct crypto_type crypto_scomp_type;
 33static void * __percpu *scomp_src_scratches;
 34static void * __percpu *scomp_dst_scratches;
 35static int scomp_scratch_users;
 36static DEFINE_MUTEX(scomp_lock);
 37
 38#ifdef CONFIG_NET
 39static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
 40{
 41	struct crypto_report_comp rscomp;
 42
 43	strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
 44
 45	if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
 46		    sizeof(struct crypto_report_comp), &rscomp))
 47		goto nla_put_failure;
 48	return 0;
 49
 50nla_put_failure:
 51	return -EMSGSIZE;
 52}
 53#else
 54static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
 55{
 56	return -ENOSYS;
 57}
 58#endif
 59
 60static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
 61	__maybe_unused;
 62
 63static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
 64{
 65	seq_puts(m, "type         : scomp\n");
 66}
 67
 68static void crypto_scomp_free_scratches(void * __percpu *scratches)
 69{
 
 70	int i;
 71
 72	if (!scratches)
 73		return;
 74
 75	for_each_possible_cpu(i)
 76		vfree(*per_cpu_ptr(scratches, i));
 77
 78	free_percpu(scratches);
 
 
 
 
 79}
 80
 81static void * __percpu *crypto_scomp_alloc_scratches(void)
 82{
 83	void * __percpu *scratches;
 84	int i;
 85
 86	scratches = alloc_percpu(void *);
 87	if (!scratches)
 88		return NULL;
 89
 90	for_each_possible_cpu(i) {
 91		void *scratch;
 92
 93		scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
 94		if (!scratch)
 95			goto error;
 96		*per_cpu_ptr(scratches, i) = scratch;
 97	}
 98
 99	return scratches;
100
101error:
102	crypto_scomp_free_scratches(scratches);
103	return NULL;
104}
105
106static void crypto_scomp_free_all_scratches(void)
107{
108	if (!--scomp_scratch_users) {
109		crypto_scomp_free_scratches(scomp_src_scratches);
110		crypto_scomp_free_scratches(scomp_dst_scratches);
111		scomp_src_scratches = NULL;
112		scomp_dst_scratches = NULL;
113	}
114}
115
116static int crypto_scomp_alloc_all_scratches(void)
117{
118	if (!scomp_scratch_users++) {
119		scomp_src_scratches = crypto_scomp_alloc_scratches();
120		if (!scomp_src_scratches)
121			return -ENOMEM;
122		scomp_dst_scratches = crypto_scomp_alloc_scratches();
123		if (!scomp_dst_scratches) {
124			crypto_scomp_free_scratches(scomp_src_scratches);
125			scomp_src_scratches = NULL;
126			return -ENOMEM;
127		}
128	}
129	return 0;
 
 
 
130}
131
132static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
133{
134	int ret;
135
136	mutex_lock(&scomp_lock);
137	ret = crypto_scomp_alloc_all_scratches();
 
138	mutex_unlock(&scomp_lock);
139
140	return ret;
141}
142
143static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
144{
145	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
146	void **tfm_ctx = acomp_tfm_ctx(tfm);
147	struct crypto_scomp *scomp = *tfm_ctx;
148	void **ctx = acomp_request_ctx(req);
149	const int cpu = get_cpu();
150	u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
151	u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
152	int ret;
153
154	if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
155		ret = -EINVAL;
156		goto out;
157	}
158
159	if (req->dst && !req->dlen) {
160		ret = -EINVAL;
161		goto out;
162	}
163
164	if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
165		req->dlen = SCOMP_SCRATCH_SIZE;
166
167	scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168	if (dir)
169		ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
170					    scratch_dst, &req->dlen, *ctx);
171	else
172		ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
173					      scratch_dst, &req->dlen, *ctx);
174	if (!ret) {
175		if (!req->dst) {
176			req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
177			if (!req->dst)
 
178				goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179		}
180		scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
181					 1);
182	}
183out:
184	put_cpu();
185	return ret;
186}
187
188static int scomp_acomp_compress(struct acomp_req *req)
189{
190	return scomp_acomp_comp_decomp(req, 1);
191}
192
193static int scomp_acomp_decompress(struct acomp_req *req)
194{
195	return scomp_acomp_comp_decomp(req, 0);
196}
197
198static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
199{
200	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
201
202	crypto_free_scomp(*ctx);
203
204	mutex_lock(&scomp_lock);
205	crypto_scomp_free_all_scratches();
 
206	mutex_unlock(&scomp_lock);
207}
208
209int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
210{
211	struct crypto_alg *calg = tfm->__crt_alg;
212	struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
213	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
214	struct crypto_scomp *scomp;
215
216	if (!crypto_mod_get(calg))
217		return -EAGAIN;
218
219	scomp = crypto_create_tfm(calg, &crypto_scomp_type);
220	if (IS_ERR(scomp)) {
221		crypto_mod_put(calg);
222		return PTR_ERR(scomp);
223	}
224
225	*ctx = scomp;
226	tfm->exit = crypto_exit_scomp_ops_async;
227
228	crt->compress = scomp_acomp_compress;
229	crt->decompress = scomp_acomp_decompress;
230	crt->dst_free = sgl_free;
231	crt->reqsize = sizeof(void *);
232
233	return 0;
234}
235
236struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
237{
238	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
239	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
240	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
241	struct crypto_scomp *scomp = *tfm_ctx;
242	void *ctx;
243
244	ctx = crypto_scomp_alloc_ctx(scomp);
245	if (IS_ERR(ctx)) {
246		kfree(req);
247		return NULL;
248	}
249
250	*req->__ctx = ctx;
251
252	return req;
253}
254
255void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
256{
257	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
258	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
259	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
260	struct crypto_scomp *scomp = *tfm_ctx;
261	void *ctx = *req->__ctx;
262
263	if (ctx)
264		crypto_scomp_free_ctx(scomp, ctx);
265}
266
267static const struct crypto_type crypto_scomp_type = {
268	.extsize = crypto_alg_extsize,
269	.init_tfm = crypto_scomp_init_tfm,
270#ifdef CONFIG_PROC_FS
271	.show = crypto_scomp_show,
272#endif
 
273	.report = crypto_scomp_report,
 
274	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
275	.maskset = CRYPTO_ALG_TYPE_MASK,
276	.type = CRYPTO_ALG_TYPE_SCOMPRESS,
277	.tfmsize = offsetof(struct crypto_scomp, base),
278};
279
280int crypto_register_scomp(struct scomp_alg *alg)
281{
282	struct crypto_alg *base = &alg->base;
 
 
283
284	base->cra_type = &crypto_scomp_type;
285	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
286	base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
287
288	return crypto_register_alg(base);
289}
290EXPORT_SYMBOL_GPL(crypto_register_scomp);
291
292int crypto_unregister_scomp(struct scomp_alg *alg)
293{
294	return crypto_unregister_alg(&alg->base);
295}
296EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
297
298int crypto_register_scomps(struct scomp_alg *algs, int count)
299{
300	int i, ret;
301
302	for (i = 0; i < count; i++) {
303		ret = crypto_register_scomp(&algs[i]);
304		if (ret)
305			goto err;
306	}
307
308	return 0;
309
310err:
311	for (--i; i >= 0; --i)
312		crypto_unregister_scomp(&algs[i]);
313
314	return ret;
315}
316EXPORT_SYMBOL_GPL(crypto_register_scomps);
317
318void crypto_unregister_scomps(struct scomp_alg *algs, int count)
319{
320	int i;
321
322	for (i = count - 1; i >= 0; --i)
323		crypto_unregister_scomp(&algs[i]);
324}
325EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
326
327MODULE_LICENSE("GPL");
328MODULE_DESCRIPTION("Synchronous compression type");
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Synchronous Compression operations
  4 *
  5 * Copyright 2015 LG Electronics Inc.
  6 * Copyright (c) 2016, Intel Corporation
  7 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
 
 
 
 
 
 
  8 */
  9
 10#include <crypto/internal/acompress.h>
 11#include <crypto/internal/scompress.h>
 12#include <crypto/scatterwalk.h>
 13#include <linux/cryptouser.h>
 14#include <linux/err.h>
 15#include <linux/kernel.h>
 16#include <linux/module.h>
 17#include <linux/scatterlist.h>
 18#include <linux/seq_file.h>
 19#include <linux/slab.h>
 20#include <linux/string.h>
 
 
 21#include <linux/vmalloc.h>
 
 
 22#include <net/netlink.h>
 23
 24#include "compress.h"
 25
 26struct scomp_scratch {
 27	spinlock_t	lock;
 28	void		*src;
 29	void		*dst;
 30};
 31
 32static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
 33	.lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
 34};
 35
 36static const struct crypto_type crypto_scomp_type;
 
 
 37static int scomp_scratch_users;
 38static DEFINE_MUTEX(scomp_lock);
 39
 40static int __maybe_unused crypto_scomp_report(
 41	struct sk_buff *skb, struct crypto_alg *alg)
 42{
 43	struct crypto_report_comp rscomp;
 44
 45	memset(&rscomp, 0, sizeof(rscomp));
 46
 47	strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
 
 
 
 48
 49	return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
 50		       sizeof(rscomp), &rscomp);
 
 
 
 
 
 51}
 
 52
 53static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
 54	__maybe_unused;
 55
 56static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
 57{
 58	seq_puts(m, "type         : scomp\n");
 59}
 60
 61static void crypto_scomp_free_scratches(void)
 62{
 63	struct scomp_scratch *scratch;
 64	int i;
 65
 66	for_each_possible_cpu(i) {
 67		scratch = per_cpu_ptr(&scomp_scratch, i);
 
 
 
 68
 69		vfree(scratch->src);
 70		vfree(scratch->dst);
 71		scratch->src = NULL;
 72		scratch->dst = NULL;
 73	}
 74}
 75
 76static int crypto_scomp_alloc_scratches(void)
 77{
 78	struct scomp_scratch *scratch;
 79	int i;
 80
 
 
 
 
 81	for_each_possible_cpu(i) {
 82		void *mem;
 83
 84		scratch = per_cpu_ptr(&scomp_scratch, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 85
 86		mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
 87		if (!mem)
 88			goto error;
 89		scratch->src = mem;
 90		mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
 91		if (!mem)
 92			goto error;
 93		scratch->dst = mem;
 
 
 
 
 94	}
 95	return 0;
 96error:
 97	crypto_scomp_free_scratches();
 98	return -ENOMEM;
 99}
100
101static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
102{
103	int ret = 0;
104
105	mutex_lock(&scomp_lock);
106	if (!scomp_scratch_users++)
107		ret = crypto_scomp_alloc_scratches();
108	mutex_unlock(&scomp_lock);
109
110	return ret;
111}
112
113static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
114{
115	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
116	void **tfm_ctx = acomp_tfm_ctx(tfm);
117	struct crypto_scomp *scomp = *tfm_ctx;
118	void **ctx = acomp_request_ctx(req);
119	struct scomp_scratch *scratch;
120	void *src, *dst;
121	unsigned int dlen;
122	int ret;
123
124	if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
125		return -EINVAL;
 
 
126
127	if (req->dst && !req->dlen)
128		return -EINVAL;
 
 
129
130	if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
131		req->dlen = SCOMP_SCRATCH_SIZE;
132
133	dlen = req->dlen;
134
135	scratch = raw_cpu_ptr(&scomp_scratch);
136	spin_lock(&scratch->lock);
137
138	if (sg_nents(req->src) == 1 && !PageHighMem(sg_page(req->src))) {
139		src = page_to_virt(sg_page(req->src)) + req->src->offset;
140	} else {
141		scatterwalk_map_and_copy(scratch->src, req->src, 0,
142					 req->slen, 0);
143		src = scratch->src;
144	}
145
146	if (req->dst && sg_nents(req->dst) == 1 && !PageHighMem(sg_page(req->dst)))
147		dst = page_to_virt(sg_page(req->dst)) + req->dst->offset;
148	else
149		dst = scratch->dst;
150
151	if (dir)
152		ret = crypto_scomp_compress(scomp, src, req->slen,
153					    dst, &req->dlen, *ctx);
154	else
155		ret = crypto_scomp_decompress(scomp, src, req->slen,
156					      dst, &req->dlen, *ctx);
157	if (!ret) {
158		if (!req->dst) {
159			req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
160			if (!req->dst) {
161				ret = -ENOMEM;
162				goto out;
163			}
164		} else if (req->dlen > dlen) {
165			ret = -ENOSPC;
166			goto out;
167		}
168		if (dst == scratch->dst) {
169			scatterwalk_map_and_copy(scratch->dst, req->dst, 0,
170						 req->dlen, 1);
171		} else {
172			int nr_pages = DIV_ROUND_UP(req->dst->offset + req->dlen, PAGE_SIZE);
173			int i;
174			struct page *dst_page = sg_page(req->dst);
175
176			for (i = 0; i < nr_pages; i++)
177				flush_dcache_page(dst_page + i);
178		}
 
 
179	}
180out:
181	spin_unlock(&scratch->lock);
182	return ret;
183}
184
185static int scomp_acomp_compress(struct acomp_req *req)
186{
187	return scomp_acomp_comp_decomp(req, 1);
188}
189
190static int scomp_acomp_decompress(struct acomp_req *req)
191{
192	return scomp_acomp_comp_decomp(req, 0);
193}
194
195static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
196{
197	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
198
199	crypto_free_scomp(*ctx);
200
201	mutex_lock(&scomp_lock);
202	if (!--scomp_scratch_users)
203		crypto_scomp_free_scratches();
204	mutex_unlock(&scomp_lock);
205}
206
207int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
208{
209	struct crypto_alg *calg = tfm->__crt_alg;
210	struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
211	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
212	struct crypto_scomp *scomp;
213
214	if (!crypto_mod_get(calg))
215		return -EAGAIN;
216
217	scomp = crypto_create_tfm(calg, &crypto_scomp_type);
218	if (IS_ERR(scomp)) {
219		crypto_mod_put(calg);
220		return PTR_ERR(scomp);
221	}
222
223	*ctx = scomp;
224	tfm->exit = crypto_exit_scomp_ops_async;
225
226	crt->compress = scomp_acomp_compress;
227	crt->decompress = scomp_acomp_decompress;
228	crt->dst_free = sgl_free;
229	crt->reqsize = sizeof(void *);
230
231	return 0;
232}
233
234struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
235{
236	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
237	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
238	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
239	struct crypto_scomp *scomp = *tfm_ctx;
240	void *ctx;
241
242	ctx = crypto_scomp_alloc_ctx(scomp);
243	if (IS_ERR(ctx)) {
244		kfree(req);
245		return NULL;
246	}
247
248	*req->__ctx = ctx;
249
250	return req;
251}
252
253void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
254{
255	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
256	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
257	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
258	struct crypto_scomp *scomp = *tfm_ctx;
259	void *ctx = *req->__ctx;
260
261	if (ctx)
262		crypto_scomp_free_ctx(scomp, ctx);
263}
264
265static const struct crypto_type crypto_scomp_type = {
266	.extsize = crypto_alg_extsize,
267	.init_tfm = crypto_scomp_init_tfm,
268#ifdef CONFIG_PROC_FS
269	.show = crypto_scomp_show,
270#endif
271#if IS_ENABLED(CONFIG_CRYPTO_USER)
272	.report = crypto_scomp_report,
273#endif
274	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
275	.maskset = CRYPTO_ALG_TYPE_MASK,
276	.type = CRYPTO_ALG_TYPE_SCOMPRESS,
277	.tfmsize = offsetof(struct crypto_scomp, base),
278};
279
280int crypto_register_scomp(struct scomp_alg *alg)
281{
282	struct crypto_alg *base = &alg->calg.base;
283
284	comp_prepare_alg(&alg->calg);
285
286	base->cra_type = &crypto_scomp_type;
 
287	base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
288
289	return crypto_register_alg(base);
290}
291EXPORT_SYMBOL_GPL(crypto_register_scomp);
292
293void crypto_unregister_scomp(struct scomp_alg *alg)
294{
295	crypto_unregister_alg(&alg->base);
296}
297EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
298
299int crypto_register_scomps(struct scomp_alg *algs, int count)
300{
301	int i, ret;
302
303	for (i = 0; i < count; i++) {
304		ret = crypto_register_scomp(&algs[i]);
305		if (ret)
306			goto err;
307	}
308
309	return 0;
310
311err:
312	for (--i; i >= 0; --i)
313		crypto_unregister_scomp(&algs[i]);
314
315	return ret;
316}
317EXPORT_SYMBOL_GPL(crypto_register_scomps);
318
319void crypto_unregister_scomps(struct scomp_alg *algs, int count)
320{
321	int i;
322
323	for (i = count - 1; i >= 0; --i)
324		crypto_unregister_scomp(&algs[i]);
325}
326EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
327
328MODULE_LICENSE("GPL");
329MODULE_DESCRIPTION("Synchronous compression type");