Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright(c) 2014 - 2022 Intel Corporation */
  3#include <linux/device.h>
  4#include <linux/dma-mapping.h>
  5#include <linux/pci.h>
  6#include <linux/scatterlist.h>
  7#include <linux/slab.h>
  8#include <linux/types.h>
  9#include "adf_accel_devices.h"
 10#include "qat_bl.h"
 11#include "qat_crypto.h"
 12
 13void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
 14		      struct qat_request_buffs *buf)
 15{
 16	struct device *dev = &GET_DEV(accel_dev);
 17	struct qat_alg_buf_list *bl = buf->bl;
 18	struct qat_alg_buf_list *blout = buf->blout;
 19	dma_addr_t blp = buf->blp;
 20	dma_addr_t blpout = buf->bloutp;
 21	size_t sz = buf->sz;
 22	size_t sz_out = buf->sz_out;
 23	int bl_dma_dir;
 24	int i;
 25
 26	bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
 27
 28	for (i = 0; i < bl->num_bufs; i++)
 29		dma_unmap_single(dev, bl->buffers[i].addr,
 30				 bl->buffers[i].len, bl_dma_dir);
 31
 32	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
 33
 34	if (!buf->sgl_src_valid)
 35		kfree(bl);
 36
 37	if (blp != blpout) {
 38		for (i = 0; i < blout->num_mapped_bufs; i++) {
 39			dma_unmap_single(dev, blout->buffers[i].addr,
 40					 blout->buffers[i].len,
 41					 DMA_FROM_DEVICE);
 42		}
 43		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
 44
 45		if (!buf->sgl_dst_valid)
 46			kfree(blout);
 47	}
 48}
 49
 50static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
 51				struct scatterlist *sgl,
 52				struct scatterlist *sglout,
 53				struct qat_request_buffs *buf,
 54				dma_addr_t extra_dst_buff,
 55				size_t sz_extra_dst_buff,
 56				unsigned int sskip,
 57				unsigned int dskip,
 58				gfp_t flags)
 59{
 60	struct device *dev = &GET_DEV(accel_dev);
 61	int i, sg_nctr = 0;
 62	int n = sg_nents(sgl);
 63	struct qat_alg_buf_list *bufl;
 64	struct qat_alg_buf_list *buflout = NULL;
 65	dma_addr_t blp = DMA_MAPPING_ERROR;
 66	dma_addr_t bloutp = DMA_MAPPING_ERROR;
 67	struct scatterlist *sg;
 68	size_t sz_out, sz = struct_size(bufl, buffers, n);
 69	int node = dev_to_node(&GET_DEV(accel_dev));
 70	unsigned int left;
 71	int bufl_dma_dir;
 72
 73	if (unlikely(!n))
 74		return -EINVAL;
 75
 76	buf->sgl_src_valid = false;
 77	buf->sgl_dst_valid = false;
 78
 79	if (n > QAT_MAX_BUFF_DESC) {
 80		bufl = kzalloc_node(sz, flags, node);
 81		if (unlikely(!bufl))
 82			return -ENOMEM;
 83	} else {
 84		bufl = &buf->sgl_src.sgl_hdr;
 
 85		memset(bufl, 0, sizeof(struct qat_alg_buf_list));
 86		buf->sgl_src_valid = true;
 87	}
 88
 89	bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
 90
 91	for (i = 0; i < n; i++)
 92		bufl->buffers[i].addr = DMA_MAPPING_ERROR;
 93
 94	left = sskip;
 95
 96	for_each_sg(sgl, sg, n, i) {
 97		int y = sg_nctr;
 98
 99		if (!sg->length)
100			continue;
101
102		if (left >= sg->length) {
103			left -= sg->length;
104			continue;
105		}
106		bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
107						       sg->length - left,
108						       bufl_dma_dir);
109		bufl->buffers[y].len = sg->length;
110		if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
111			goto err_in;
112		sg_nctr++;
113		if (left) {
114			bufl->buffers[y].len -= left;
115			left = 0;
116		}
117	}
118	bufl->num_bufs = sg_nctr;
119	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
120	if (unlikely(dma_mapping_error(dev, blp)))
121		goto err_in;
122	buf->bl = bufl;
123	buf->blp = blp;
124	buf->sz = sz;
125	/* Handle out of place operation */
126	if (sgl != sglout) {
127		struct qat_alg_buf *buffers;
128		int extra_buff = extra_dst_buff ? 1 : 0;
129		int n_sglout = sg_nents(sglout);
130
131		n = n_sglout + extra_buff;
132		sz_out = struct_size(buflout, buffers, n);
133		left = dskip;
134
135		sg_nctr = 0;
136
137		if (n > QAT_MAX_BUFF_DESC) {
138			buflout = kzalloc_node(sz_out, flags, node);
139			if (unlikely(!buflout))
140				goto err_in;
141		} else {
142			buflout = &buf->sgl_dst.sgl_hdr;
 
143			memset(buflout, 0, sizeof(struct qat_alg_buf_list));
144			buf->sgl_dst_valid = true;
145		}
146
147		buffers = buflout->buffers;
148		for (i = 0; i < n; i++)
149			buffers[i].addr = DMA_MAPPING_ERROR;
150
151		for_each_sg(sglout, sg, n_sglout, i) {
152			int y = sg_nctr;
153
154			if (!sg->length)
155				continue;
156
157			if (left >= sg->length) {
158				left -= sg->length;
159				continue;
160			}
161			buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
162							 sg->length - left,
163							 DMA_FROM_DEVICE);
164			if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
165				goto err_out;
166			buffers[y].len = sg->length;
167			sg_nctr++;
168			if (left) {
169				buffers[y].len -= left;
170				left = 0;
171			}
172		}
173		if (extra_buff) {
174			buffers[sg_nctr].addr = extra_dst_buff;
175			buffers[sg_nctr].len = sz_extra_dst_buff;
176		}
177
178		buflout->num_bufs = sg_nctr;
179		buflout->num_bufs += extra_buff;
180		buflout->num_mapped_bufs = sg_nctr;
181		bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
182		if (unlikely(dma_mapping_error(dev, bloutp)))
183			goto err_out;
184		buf->blout = buflout;
185		buf->bloutp = bloutp;
186		buf->sz_out = sz_out;
187	} else {
188		/* Otherwise set the src and dst to the same address */
189		buf->bloutp = buf->blp;
190		buf->sz_out = 0;
191	}
192	return 0;
193
194err_out:
195	if (!dma_mapping_error(dev, bloutp))
196		dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
197
198	n = sg_nents(sglout);
199	for (i = 0; i < n; i++) {
200		if (buflout->buffers[i].addr == extra_dst_buff)
201			break;
202		if (!dma_mapping_error(dev, buflout->buffers[i].addr))
203			dma_unmap_single(dev, buflout->buffers[i].addr,
204					 buflout->buffers[i].len,
205					 DMA_FROM_DEVICE);
206	}
207
208	if (!buf->sgl_dst_valid)
209		kfree(buflout);
210
211err_in:
212	if (!dma_mapping_error(dev, blp))
213		dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
214
215	n = sg_nents(sgl);
216	for (i = 0; i < n; i++)
217		if (!dma_mapping_error(dev, bufl->buffers[i].addr))
218			dma_unmap_single(dev, bufl->buffers[i].addr,
219					 bufl->buffers[i].len,
220					 bufl_dma_dir);
221
222	if (!buf->sgl_src_valid)
223		kfree(bufl);
224
225	dev_err(dev, "Failed to map buf for dma\n");
226	return -ENOMEM;
227}
228
229int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
230		       struct scatterlist *sgl,
231		       struct scatterlist *sglout,
232		       struct qat_request_buffs *buf,
233		       struct qat_sgl_to_bufl_params *params,
234		       gfp_t flags)
235{
236	dma_addr_t extra_dst_buff = 0;
237	size_t sz_extra_dst_buff = 0;
238	unsigned int sskip = 0;
239	unsigned int dskip = 0;
240
241	if (params) {
242		extra_dst_buff = params->extra_dst_buff;
243		sz_extra_dst_buff = params->sz_extra_dst_buff;
244		sskip = params->sskip;
245		dskip = params->dskip;
246	}
247
248	return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
249				    extra_dst_buff, sz_extra_dst_buff,
250				    sskip, dskip, flags);
251}
252
253static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
254			     struct qat_alg_buf_list *bl)
255{
256	struct device *dev = &GET_DEV(accel_dev);
257	int n = bl->num_bufs;
258	int i;
259
260	for (i = 0; i < n; i++)
261		if (!dma_mapping_error(dev, bl->buffers[i].addr))
262			dma_unmap_single(dev, bl->buffers[i].addr,
263					 bl->buffers[i].len, DMA_FROM_DEVICE);
264}
265
266static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
267			  struct scatterlist *sgl,
268			  struct qat_alg_buf_list **bl)
269{
270	struct device *dev = &GET_DEV(accel_dev);
271	struct qat_alg_buf_list *bufl;
272	int node = dev_to_node(dev);
273	struct scatterlist *sg;
274	int n, i, sg_nctr;
275	size_t sz;
276
277	n = sg_nents(sgl);
278	sz = struct_size(bufl, buffers, n);
279	bufl = kzalloc_node(sz, GFP_KERNEL, node);
280	if (unlikely(!bufl))
281		return -ENOMEM;
282
283	for (i = 0; i < n; i++)
284		bufl->buffers[i].addr = DMA_MAPPING_ERROR;
285
286	sg_nctr = 0;
287	for_each_sg(sgl, sg, n, i) {
288		int y = sg_nctr;
289
290		if (!sg->length)
291			continue;
292
293		bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
294						       sg->length,
295						       DMA_FROM_DEVICE);
296		bufl->buffers[y].len = sg->length;
297		if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
298			goto err_map;
299		sg_nctr++;
300	}
301	bufl->num_bufs = sg_nctr;
302	bufl->num_mapped_bufs = sg_nctr;
303
304	*bl = bufl;
305
306	return 0;
307
308err_map:
309	for (i = 0; i < n; i++)
310		if (!dma_mapping_error(dev, bufl->buffers[i].addr))
311			dma_unmap_single(dev, bufl->buffers[i].addr,
312					 bufl->buffers[i].len,
313					 DMA_FROM_DEVICE);
314	kfree(bufl);
315	*bl = NULL;
316
317	return -ENOMEM;
318}
319
320static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev,
321				  struct scatterlist *sgl,
322				  struct qat_alg_buf_list *bl,
323				  bool free_bl)
324{
325	if (bl) {
326		qat_bl_sgl_unmap(accel_dev, bl);
327
328		if (free_bl)
329			kfree(bl);
330	}
331	if (sgl)
332		sgl_free(sgl);
333}
334
335static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev,
336				struct scatterlist **sgl,
337				struct qat_alg_buf_list **bl,
338				unsigned int dlen,
339				gfp_t gfp)
340{
341	struct scatterlist *dst;
342	int ret;
343
344	dst = sgl_alloc(dlen, gfp, NULL);
345	if (!dst) {
346		dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n");
347		return -ENOMEM;
348	}
349
350	ret = qat_bl_sgl_map(accel_dev, dst, bl);
351	if (ret)
352		goto err;
353
354	*sgl = dst;
355
356	return 0;
357
358err:
359	sgl_free(dst);
360	*sgl = NULL;
361	return ret;
362}
363
364int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
365			       struct scatterlist **sg,
366			       unsigned int dlen,
367			       struct qat_request_buffs *qat_bufs,
368			       gfp_t gfp)
369{
370	struct device *dev = &GET_DEV(accel_dev);
371	dma_addr_t new_blp = DMA_MAPPING_ERROR;
372	struct qat_alg_buf_list *new_bl;
373	struct scatterlist *new_sg;
374	size_t new_bl_size;
375	int ret;
376
377	ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp);
378	if (ret)
379		return ret;
380
381	new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
382
383	/* Map new firmware SGL descriptor */
384	new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
385	if (unlikely(dma_mapping_error(dev, new_blp)))
386		goto err;
387
388	/* Unmap old firmware SGL descriptor */
389	dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE);
390
391	/* Free and unmap old scatterlist */
392	qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout,
393			      !qat_bufs->sgl_dst_valid);
394
395	qat_bufs->sgl_dst_valid = false;
396	qat_bufs->blout = new_bl;
397	qat_bufs->bloutp = new_blp;
398	qat_bufs->sz_out = new_bl_size;
399
400	*sg = new_sg;
401
402	return 0;
403err:
404	qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true);
405
406	if (!dma_mapping_error(dev, new_blp))
407		dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE);
408
409	return -ENOMEM;
410}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright(c) 2014 - 2022 Intel Corporation */
  3#include <linux/device.h>
  4#include <linux/dma-mapping.h>
  5#include <linux/pci.h>
  6#include <linux/scatterlist.h>
  7#include <linux/slab.h>
  8#include <linux/types.h>
  9#include "adf_accel_devices.h"
 10#include "qat_bl.h"
 11#include "qat_crypto.h"
 12
 13void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
 14		      struct qat_request_buffs *buf)
 15{
 16	struct device *dev = &GET_DEV(accel_dev);
 17	struct qat_alg_buf_list *bl = buf->bl;
 18	struct qat_alg_buf_list *blout = buf->blout;
 19	dma_addr_t blp = buf->blp;
 20	dma_addr_t blpout = buf->bloutp;
 21	size_t sz = buf->sz;
 22	size_t sz_out = buf->sz_out;
 23	int bl_dma_dir;
 24	int i;
 25
 26	bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
 27
 28	for (i = 0; i < bl->num_bufs; i++)
 29		dma_unmap_single(dev, bl->buffers[i].addr,
 30				 bl->buffers[i].len, bl_dma_dir);
 31
 32	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
 33
 34	if (!buf->sgl_src_valid)
 35		kfree(bl);
 36
 37	if (blp != blpout) {
 38		for (i = 0; i < blout->num_mapped_bufs; i++) {
 39			dma_unmap_single(dev, blout->buffers[i].addr,
 40					 blout->buffers[i].len,
 41					 DMA_FROM_DEVICE);
 42		}
 43		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
 44
 45		if (!buf->sgl_dst_valid)
 46			kfree(blout);
 47	}
 48}
 49
 50static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
 51				struct scatterlist *sgl,
 52				struct scatterlist *sglout,
 53				struct qat_request_buffs *buf,
 54				dma_addr_t extra_dst_buff,
 55				size_t sz_extra_dst_buff,
 56				unsigned int sskip,
 57				unsigned int dskip,
 58				gfp_t flags)
 59{
 60	struct device *dev = &GET_DEV(accel_dev);
 61	int i, sg_nctr = 0;
 62	int n = sg_nents(sgl);
 63	struct qat_alg_buf_list *bufl;
 64	struct qat_alg_buf_list *buflout = NULL;
 65	dma_addr_t blp = DMA_MAPPING_ERROR;
 66	dma_addr_t bloutp = DMA_MAPPING_ERROR;
 67	struct scatterlist *sg;
 68	size_t sz_out, sz = struct_size(bufl, buffers, n);
 69	int node = dev_to_node(&GET_DEV(accel_dev));
 70	unsigned int left;
 71	int bufl_dma_dir;
 72
 73	if (unlikely(!n))
 74		return -EINVAL;
 75
 76	buf->sgl_src_valid = false;
 77	buf->sgl_dst_valid = false;
 78
 79	if (n > QAT_MAX_BUFF_DESC) {
 80		bufl = kzalloc_node(sz, flags, node);
 81		if (unlikely(!bufl))
 82			return -ENOMEM;
 83	} else {
 84		bufl = container_of(&buf->sgl_src.sgl_hdr,
 85				    struct qat_alg_buf_list, hdr);
 86		memset(bufl, 0, sizeof(struct qat_alg_buf_list));
 87		buf->sgl_src_valid = true;
 88	}
 89
 90	bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
 91
 92	for (i = 0; i < n; i++)
 93		bufl->buffers[i].addr = DMA_MAPPING_ERROR;
 94
 95	left = sskip;
 96
 97	for_each_sg(sgl, sg, n, i) {
 98		int y = sg_nctr;
 99
100		if (!sg->length)
101			continue;
102
103		if (left >= sg->length) {
104			left -= sg->length;
105			continue;
106		}
107		bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
108						       sg->length - left,
109						       bufl_dma_dir);
110		bufl->buffers[y].len = sg->length;
111		if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
112			goto err_in;
113		sg_nctr++;
114		if (left) {
115			bufl->buffers[y].len -= left;
116			left = 0;
117		}
118	}
119	bufl->num_bufs = sg_nctr;
120	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
121	if (unlikely(dma_mapping_error(dev, blp)))
122		goto err_in;
123	buf->bl = bufl;
124	buf->blp = blp;
125	buf->sz = sz;
126	/* Handle out of place operation */
127	if (sgl != sglout) {
128		struct qat_alg_buf *buffers;
129		int extra_buff = extra_dst_buff ? 1 : 0;
130		int n_sglout = sg_nents(sglout);
131
132		n = n_sglout + extra_buff;
133		sz_out = struct_size(buflout, buffers, n);
134		left = dskip;
135
136		sg_nctr = 0;
137
138		if (n > QAT_MAX_BUFF_DESC) {
139			buflout = kzalloc_node(sz_out, flags, node);
140			if (unlikely(!buflout))
141				goto err_in;
142		} else {
143			buflout = container_of(&buf->sgl_dst.sgl_hdr,
144					       struct qat_alg_buf_list, hdr);
145			memset(buflout, 0, sizeof(struct qat_alg_buf_list));
146			buf->sgl_dst_valid = true;
147		}
148
149		buffers = buflout->buffers;
150		for (i = 0; i < n; i++)
151			buffers[i].addr = DMA_MAPPING_ERROR;
152
153		for_each_sg(sglout, sg, n_sglout, i) {
154			int y = sg_nctr;
155
156			if (!sg->length)
157				continue;
158
159			if (left >= sg->length) {
160				left -= sg->length;
161				continue;
162			}
163			buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
164							 sg->length - left,
165							 DMA_FROM_DEVICE);
166			if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
167				goto err_out;
168			buffers[y].len = sg->length;
169			sg_nctr++;
170			if (left) {
171				buffers[y].len -= left;
172				left = 0;
173			}
174		}
175		if (extra_buff) {
176			buffers[sg_nctr].addr = extra_dst_buff;
177			buffers[sg_nctr].len = sz_extra_dst_buff;
178		}
179
180		buflout->num_bufs = sg_nctr;
181		buflout->num_bufs += extra_buff;
182		buflout->num_mapped_bufs = sg_nctr;
183		bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
184		if (unlikely(dma_mapping_error(dev, bloutp)))
185			goto err_out;
186		buf->blout = buflout;
187		buf->bloutp = bloutp;
188		buf->sz_out = sz_out;
189	} else {
190		/* Otherwise set the src and dst to the same address */
191		buf->bloutp = buf->blp;
192		buf->sz_out = 0;
193	}
194	return 0;
195
196err_out:
197	if (!dma_mapping_error(dev, bloutp))
198		dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
199
200	n = sg_nents(sglout);
201	for (i = 0; i < n; i++) {
202		if (buflout->buffers[i].addr == extra_dst_buff)
203			break;
204		if (!dma_mapping_error(dev, buflout->buffers[i].addr))
205			dma_unmap_single(dev, buflout->buffers[i].addr,
206					 buflout->buffers[i].len,
207					 DMA_FROM_DEVICE);
208	}
209
210	if (!buf->sgl_dst_valid)
211		kfree(buflout);
212
213err_in:
214	if (!dma_mapping_error(dev, blp))
215		dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
216
217	n = sg_nents(sgl);
218	for (i = 0; i < n; i++)
219		if (!dma_mapping_error(dev, bufl->buffers[i].addr))
220			dma_unmap_single(dev, bufl->buffers[i].addr,
221					 bufl->buffers[i].len,
222					 bufl_dma_dir);
223
224	if (!buf->sgl_src_valid)
225		kfree(bufl);
226
227	dev_err(dev, "Failed to map buf for dma\n");
228	return -ENOMEM;
229}
230
231int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
232		       struct scatterlist *sgl,
233		       struct scatterlist *sglout,
234		       struct qat_request_buffs *buf,
235		       struct qat_sgl_to_bufl_params *params,
236		       gfp_t flags)
237{
238	dma_addr_t extra_dst_buff = 0;
239	size_t sz_extra_dst_buff = 0;
240	unsigned int sskip = 0;
241	unsigned int dskip = 0;
242
243	if (params) {
244		extra_dst_buff = params->extra_dst_buff;
245		sz_extra_dst_buff = params->sz_extra_dst_buff;
246		sskip = params->sskip;
247		dskip = params->dskip;
248	}
249
250	return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
251				    extra_dst_buff, sz_extra_dst_buff,
252				    sskip, dskip, flags);
253}
254
255static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
256			     struct qat_alg_buf_list *bl)
257{
258	struct device *dev = &GET_DEV(accel_dev);
259	int n = bl->num_bufs;
260	int i;
261
262	for (i = 0; i < n; i++)
263		if (!dma_mapping_error(dev, bl->buffers[i].addr))
264			dma_unmap_single(dev, bl->buffers[i].addr,
265					 bl->buffers[i].len, DMA_FROM_DEVICE);
266}
267
268static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
269			  struct scatterlist *sgl,
270			  struct qat_alg_buf_list **bl)
271{
272	struct device *dev = &GET_DEV(accel_dev);
273	struct qat_alg_buf_list *bufl;
274	int node = dev_to_node(dev);
275	struct scatterlist *sg;
276	int n, i, sg_nctr;
277	size_t sz;
278
279	n = sg_nents(sgl);
280	sz = struct_size(bufl, buffers, n);
281	bufl = kzalloc_node(sz, GFP_KERNEL, node);
282	if (unlikely(!bufl))
283		return -ENOMEM;
284
285	for (i = 0; i < n; i++)
286		bufl->buffers[i].addr = DMA_MAPPING_ERROR;
287
288	sg_nctr = 0;
289	for_each_sg(sgl, sg, n, i) {
290		int y = sg_nctr;
291
292		if (!sg->length)
293			continue;
294
295		bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
296						       sg->length,
297						       DMA_FROM_DEVICE);
298		bufl->buffers[y].len = sg->length;
299		if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
300			goto err_map;
301		sg_nctr++;
302	}
303	bufl->num_bufs = sg_nctr;
304	bufl->num_mapped_bufs = sg_nctr;
305
306	*bl = bufl;
307
308	return 0;
309
310err_map:
311	for (i = 0; i < n; i++)
312		if (!dma_mapping_error(dev, bufl->buffers[i].addr))
313			dma_unmap_single(dev, bufl->buffers[i].addr,
314					 bufl->buffers[i].len,
315					 DMA_FROM_DEVICE);
316	kfree(bufl);
317	*bl = NULL;
318
319	return -ENOMEM;
320}
321
322static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev,
323				  struct scatterlist *sgl,
324				  struct qat_alg_buf_list *bl,
325				  bool free_bl)
326{
327	if (bl) {
328		qat_bl_sgl_unmap(accel_dev, bl);
329
330		if (free_bl)
331			kfree(bl);
332	}
333	if (sgl)
334		sgl_free(sgl);
335}
336
337static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev,
338				struct scatterlist **sgl,
339				struct qat_alg_buf_list **bl,
340				unsigned int dlen,
341				gfp_t gfp)
342{
343	struct scatterlist *dst;
344	int ret;
345
346	dst = sgl_alloc(dlen, gfp, NULL);
347	if (!dst) {
348		dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n");
349		return -ENOMEM;
350	}
351
352	ret = qat_bl_sgl_map(accel_dev, dst, bl);
353	if (ret)
354		goto err;
355
356	*sgl = dst;
357
358	return 0;
359
360err:
361	sgl_free(dst);
362	*sgl = NULL;
363	return ret;
364}
365
366int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
367			       struct scatterlist **sg,
368			       unsigned int dlen,
369			       struct qat_request_buffs *qat_bufs,
370			       gfp_t gfp)
371{
372	struct device *dev = &GET_DEV(accel_dev);
373	dma_addr_t new_blp = DMA_MAPPING_ERROR;
374	struct qat_alg_buf_list *new_bl;
375	struct scatterlist *new_sg;
376	size_t new_bl_size;
377	int ret;
378
379	ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp);
380	if (ret)
381		return ret;
382
383	new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
384
385	/* Map new firmware SGL descriptor */
386	new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
387	if (unlikely(dma_mapping_error(dev, new_blp)))
388		goto err;
389
390	/* Unmap old firmware SGL descriptor */
391	dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE);
392
393	/* Free and unmap old scatterlist */
394	qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout,
395			      !qat_bufs->sgl_dst_valid);
396
397	qat_bufs->sgl_dst_valid = false;
398	qat_bufs->blout = new_bl;
399	qat_bufs->bloutp = new_blp;
400	qat_bufs->sz_out = new_bl_size;
401
402	*sg = new_sg;
403
404	return 0;
405err:
406	qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true);
407
408	if (!dma_mapping_error(dev, new_blp))
409		dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE);
410
411	return -ENOMEM;
412}