Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright(c) 2022 Intel Corporation */
  3#include <linux/crypto.h>
  4#include <crypto/acompress.h>
  5#include <crypto/internal/acompress.h>
  6#include <crypto/scatterwalk.h>
  7#include <linux/dma-mapping.h>
  8#include <linux/workqueue.h>
  9#include "adf_accel_devices.h"
 10#include "adf_common_drv.h"
 11#include "qat_bl.h"
 12#include "qat_comp_req.h"
 13#include "qat_compression.h"
 14#include "qat_algs_send.h"
 15
 16#define QAT_RFC_1950_HDR_SIZE 2
 17#define QAT_RFC_1950_FOOTER_SIZE 4
 18#define QAT_RFC_1950_CM_DEFLATE 8
 19#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7
 20#define QAT_RFC_1950_CM_MASK 0x0f
 21#define QAT_RFC_1950_CM_OFFSET 4
 22#define QAT_RFC_1950_DICT_MASK 0x20
 23#define QAT_RFC_1950_COMP_HDR 0x785e
 24
 25static DEFINE_MUTEX(algs_lock);
 26static unsigned int active_devs;
 27
 28enum direction {
 29	DECOMPRESSION = 0,
 30	COMPRESSION = 1,
 31};
 32
 33struct qat_compression_req;
 34
 35struct qat_compression_ctx {
 36	u8 comp_ctx[QAT_COMP_CTX_SIZE];
 37	struct qat_compression_instance *inst;
 38	int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
 39};
 40
 41struct qat_dst {
 42	bool is_null;
 43	int resubmitted;
 44};
 45
 46struct qat_compression_req {
 47	u8 req[QAT_COMP_REQ_SIZE];
 48	struct qat_compression_ctx *qat_compression_ctx;
 49	struct acomp_req *acompress_req;
 50	struct qat_request_buffs buf;
 51	enum direction dir;
 52	int actual_dlen;
 53	struct qat_alg_req alg_req;
 54	struct work_struct resubmit;
 55	struct qat_dst dst;
 56};
 57
 58static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
 59				   struct qat_compression_instance *inst,
 60				   struct crypto_async_request *base)
 61{
 62	struct qat_alg_req *alg_req = &qat_req->alg_req;
 63
 64	alg_req->fw_req = (u32 *)&qat_req->req;
 65	alg_req->tx_ring = inst->dc_tx;
 66	alg_req->base = base;
 67	alg_req->backlog = &inst->backlog;
 68
 69	return qat_alg_send_message(alg_req);
 70}
 71
 72static void qat_comp_resubmit(struct work_struct *work)
 73{
 74	struct qat_compression_req *qat_req =
 75		container_of(work, struct qat_compression_req, resubmit);
 76	struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
 77	struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
 78	struct qat_request_buffs *qat_bufs = &qat_req->buf;
 79	struct qat_compression_instance *inst = ctx->inst;
 80	struct acomp_req *areq = qat_req->acompress_req;
 81	struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
 82	unsigned int dlen = CRYPTO_ACOMP_DST_MAX;
 83	u8 *req = qat_req->req;
 84	dma_addr_t dfbuf;
 85	int ret;
 86
 87	areq->dlen = dlen;
 88
 89	dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n",
 90		crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
 91		qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen);
 92
 93	ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs,
 94					 qat_algs_alloc_flags(&areq->base));
 95	if (ret)
 96		goto err;
 97
 98	qat_req->dst.resubmitted = true;
 99
100	dfbuf = qat_req->buf.bloutp;
101	qat_comp_override_dst(req, dfbuf, dlen);
102
103	ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
104	if (ret != -ENOSPC)
105		return;
106
107err:
108	qat_bl_free_bufl(accel_dev, qat_bufs);
109	acomp_request_complete(areq, ret);
110}
111
112static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
113				      void *resp)
114{
115	struct acomp_req *areq = qat_req->acompress_req;
116	struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
117	struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
118	struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
119	struct qat_compression_instance *inst = ctx->inst;
120	int consumed, produced;
121	s8 cmp_err, xlt_err;
122	int res = -EBADMSG;
123	int status;
124	u8 cnv;
125
126	status = qat_comp_get_cmp_status(resp);
127	status |= qat_comp_get_xlt_status(resp);
128	cmp_err = qat_comp_get_cmp_err(resp);
129	xlt_err = qat_comp_get_xlt_err(resp);
130
131	consumed = qat_comp_get_consumed_ctr(resp);
132	produced = qat_comp_get_produced_ctr(resp);
133
134	dev_dbg(&GET_DEV(accel_dev),
135		"[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d",
136		crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
137		qat_req->dir == COMPRESSION ? "comp  " : "decomp",
138		status ? "ERR" : "OK ",
139		areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err);
140
141	areq->dlen = 0;
142
143	if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) {
144		if (cmp_err == ERR_CODE_OVERFLOW_ERROR) {
145			if (qat_req->dst.resubmitted) {
146				dev_dbg(&GET_DEV(accel_dev),
147					"Output does not fit destination buffer\n");
148				res = -EOVERFLOW;
149				goto end;
150			}
151
152			INIT_WORK(&qat_req->resubmit, qat_comp_resubmit);
153			adf_misc_wq_queue_work(&qat_req->resubmit);
154			return;
155		}
156	}
157
158	if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
159		goto end;
160
161	if (qat_req->dir == COMPRESSION) {
162		cnv = qat_comp_get_cmp_cnv_flag(resp);
163		if (unlikely(!cnv)) {
164			dev_err(&GET_DEV(accel_dev),
165				"Verified compression not supported\n");
166			goto end;
167		}
168
169		if (unlikely(produced > qat_req->actual_dlen)) {
170			memset(inst->dc_data->ovf_buff, 0,
171			       inst->dc_data->ovf_buff_sz);
172			dev_dbg(&GET_DEV(accel_dev),
173				"Actual buffer overflow: produced=%d, dlen=%d\n",
174				produced, qat_req->actual_dlen);
175			goto end;
176		}
177	}
178
179	res = 0;
180	areq->dlen = produced;
181
182	if (ctx->qat_comp_callback)
183		res = ctx->qat_comp_callback(qat_req, resp);
184
185end:
186	qat_bl_free_bufl(accel_dev, &qat_req->buf);
187	acomp_request_complete(areq, res);
188}
189
190void qat_comp_alg_callback(void *resp)
191{
192	struct qat_compression_req *qat_req =
193			(void *)(__force long)qat_comp_get_opaque(resp);
194	struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
195
196	qat_comp_generic_callback(qat_req, resp);
197
198	qat_alg_send_backlog(backlog);
199}
200
201static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
202{
203	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
204	struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
205	struct qat_compression_instance *inst;
206	int node;
207
208	if (tfm->node == NUMA_NO_NODE)
209		node = numa_node_id();
210	else
211		node = tfm->node;
212
213	memset(ctx, 0, sizeof(*ctx));
214	inst = qat_compression_get_instance_node(node);
215	if (!inst)
216		return -EINVAL;
217	ctx->inst = inst;
218
219	ctx->inst->build_deflate_ctx(ctx->comp_ctx);
220
221	return 0;
222}
223
224static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
225{
226	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
227	struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
228
229	qat_compression_put_instance(ctx->inst);
230	memset(ctx, 0, sizeof(*ctx));
231}
232
233static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir,
234					    unsigned int shdr, unsigned int sftr,
235					    unsigned int dhdr, unsigned int dftr)
236{
237	struct qat_compression_req *qat_req = acomp_request_ctx(areq);
238	struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
239	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
240	struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
241	struct qat_compression_instance *inst = ctx->inst;
242	gfp_t f = qat_algs_alloc_flags(&areq->base);
243	struct qat_sgl_to_bufl_params params = {0};
244	int slen = areq->slen - shdr - sftr;
245	int dlen = areq->dlen - dhdr - dftr;
246	dma_addr_t sfbuf, dfbuf;
247	u8 *req = qat_req->req;
248	size_t ovf_buff_sz;
249	int ret;
250
251	params.sskip = shdr;
252	params.dskip = dhdr;
253
254	if (!areq->src || !slen)
255		return -EINVAL;
256
257	if (areq->dst && !dlen)
258		return -EINVAL;
259
260	qat_req->dst.is_null = false;
261
262	/* Handle acomp requests that require the allocation of a destination
263	 * buffer. The size of the destination buffer is double the source
264	 * buffer (rounded up to the size of a page) to fit the decompressed
265	 * output or an expansion on the data for compression.
266	 */
267	if (!areq->dst) {
268		qat_req->dst.is_null = true;
269
270		dlen = round_up(2 * slen, PAGE_SIZE);
271		areq->dst = sgl_alloc(dlen, f, NULL);
272		if (!areq->dst)
273			return -ENOMEM;
274
275		dlen -= dhdr + dftr;
276		areq->dlen = dlen;
277		qat_req->dst.resubmitted = false;
278	}
279
280	if (dir == COMPRESSION) {
281		params.extra_dst_buff = inst->dc_data->ovf_buff_p;
282		ovf_buff_sz = inst->dc_data->ovf_buff_sz;
283		params.sz_extra_dst_buff = ovf_buff_sz;
284	}
285
286	ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
287				 &qat_req->buf, &params, f);
288	if (unlikely(ret))
289		return ret;
290
291	sfbuf = qat_req->buf.blp;
292	dfbuf = qat_req->buf.bloutp;
293	qat_req->qat_compression_ctx = ctx;
294	qat_req->acompress_req = areq;
295	qat_req->dir = dir;
296
297	if (dir == COMPRESSION) {
298		qat_req->actual_dlen = dlen;
299		dlen += ovf_buff_sz;
300		qat_comp_create_compression_req(ctx->comp_ctx, req,
301						(u64)(__force long)sfbuf, slen,
302						(u64)(__force long)dfbuf, dlen,
303						(u64)(__force long)qat_req);
304	} else {
305		qat_comp_create_decompression_req(ctx->comp_ctx, req,
306						  (u64)(__force long)sfbuf, slen,
307						  (u64)(__force long)dfbuf, dlen,
308						  (u64)(__force long)qat_req);
309	}
310
311	ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
312	if (ret == -ENOSPC)
313		qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
314
315	return ret;
316}
317
318static int qat_comp_alg_compress(struct acomp_req *req)
319{
320	return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, 0, 0);
321}
322
323static int qat_comp_alg_decompress(struct acomp_req *req)
324{
325	return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
326}
327
328static struct acomp_alg qat_acomp[] = { {
329	.base = {
330		.cra_name = "deflate",
331		.cra_driver_name = "qat_deflate",
332		.cra_priority = 4001,
333		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
334		.cra_ctxsize = sizeof(struct qat_compression_ctx),
335		.cra_module = THIS_MODULE,
336	},
337	.init = qat_comp_alg_init_tfm,
338	.exit = qat_comp_alg_exit_tfm,
339	.compress = qat_comp_alg_compress,
340	.decompress = qat_comp_alg_decompress,
341	.dst_free = sgl_free,
342	.reqsize = sizeof(struct qat_compression_req),
343}};
344
345int qat_comp_algs_register(void)
346{
347	int ret = 0;
348
349	mutex_lock(&algs_lock);
350	if (++active_devs == 1)
351		ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
352	mutex_unlock(&algs_lock);
353	return ret;
354}
355
356void qat_comp_algs_unregister(void)
357{
358	mutex_lock(&algs_lock);
359	if (--active_devs == 0)
360		crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
361	mutex_unlock(&algs_lock);
362}