Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
   3
   4#include <crypto/internal/aead.h>
   5#include <crypto/authenc.h>
   6#include <crypto/scatterwalk.h>
   7#include <linux/dmapool.h>
   8#include <linux/dma-mapping.h>
   9
  10#include "cc_buffer_mgr.h"
  11#include "cc_lli_defs.h"
  12#include "cc_cipher.h"
  13#include "cc_hash.h"
  14#include "cc_aead.h"
  15
  16union buffer_array_entry {
  17	struct scatterlist *sgl;
  18	dma_addr_t buffer_dma;
  19};
  20
  21struct buffer_array {
  22	unsigned int num_of_buffers;
  23	union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
  24	unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
  25	int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  26	int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
  27	bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
  28	u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  29};
  30
  31static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
  32{
  33	switch (type) {
  34	case CC_DMA_BUF_NULL:
  35		return "BUF_NULL";
  36	case CC_DMA_BUF_DLLI:
  37		return "BUF_DLLI";
  38	case CC_DMA_BUF_MLLI:
  39		return "BUF_MLLI";
  40	default:
  41		return "BUF_INVALID";
  42	}
  43}
  44
  45/**
  46 * cc_copy_mac() - Copy MAC to temporary location
  47 *
  48 * @dev: device object
  49 * @req: aead request object
  50 * @dir: [IN] copy from/to sgl
  51 */
  52static void cc_copy_mac(struct device *dev, struct aead_request *req,
  53			enum cc_sg_cpy_direct dir)
  54{
  55	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  56	u32 skip = req->assoclen + req->cryptlen;
  57
  58	cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
  59			   (skip - areq_ctx->req_authsize), skip, dir);
  60}
  61
  62/**
  63 * cc_get_sgl_nents() - Get scatterlist number of entries.
  64 *
  65 * @dev: Device object
  66 * @sg_list: SG list
  67 * @nbytes: [IN] Total SGL data bytes.
  68 * @lbytes: [OUT] Returns the amount of bytes at the last entry
  69 *
  70 * Return:
  71 * Number of entries in the scatterlist
  72 */
  73static unsigned int cc_get_sgl_nents(struct device *dev,
  74				     struct scatterlist *sg_list,
  75				     unsigned int nbytes, u32 *lbytes)
  76{
  77	unsigned int nents = 0;
  78
  79	*lbytes = 0;
  80
  81	while (nbytes && sg_list) {
  82		nents++;
  83		/* get the number of bytes in the last entry */
  84		*lbytes = nbytes;
  85		nbytes -= (sg_list->length > nbytes) ?
  86				nbytes : sg_list->length;
  87		sg_list = sg_next(sg_list);
  88	}
  89
  90	dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
  91	return nents;
  92}
  93
  94/**
  95 * cc_copy_sg_portion() - Copy scatter list data,
  96 * from to_skip to end, to dest and vice versa
  97 *
  98 * @dev: Device object
  99 * @dest: Buffer to copy to/from
 100 * @sg: SG list
 101 * @to_skip: Number of bytes to skip before copying
 102 * @end: Offset of last byte to copy
 103 * @direct: Transfer direction (true == from SG list to buffer, false == from
 104 *          buffer to SG list)
 105 */
 106void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
 107			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
 108{
 109	u32 nents;
 110
 111	nents = sg_nents_for_len(sg, end);
 112	sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip,
 113		       (direct == CC_SG_TO_BUF));
 114}
 115
 116static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
 117				  u32 buff_size, u32 *curr_nents,
 118				  u32 **mlli_entry_pp)
 119{
 120	u32 *mlli_entry_p = *mlli_entry_pp;
 121	u32 new_nents;
 122
 123	/* Verify there is no memory overflow*/
 124	new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
 125	if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
 126		dev_err(dev, "Too many mlli entries. current %d max %d\n",
 127			new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
 128		return -ENOMEM;
 129	}
 130
 131	/*handle buffer longer than 64 kbytes */
 132	while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
 133		cc_lli_set_addr(mlli_entry_p, buff_dma);
 134		cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
 135		dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 136			*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 137			mlli_entry_p[LLI_WORD1_OFFSET]);
 138		buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
 139		buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
 140		mlli_entry_p = mlli_entry_p + 2;
 141		(*curr_nents)++;
 142	}
 143	/*Last entry */
 144	cc_lli_set_addr(mlli_entry_p, buff_dma);
 145	cc_lli_set_size(mlli_entry_p, buff_size);
 146	dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 147		*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 148		mlli_entry_p[LLI_WORD1_OFFSET]);
 149	mlli_entry_p = mlli_entry_p + 2;
 150	*mlli_entry_pp = mlli_entry_p;
 151	(*curr_nents)++;
 152	return 0;
 153}
 154
 155static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
 156				u32 sgl_data_len, u32 sgl_offset,
 157				u32 *curr_nents, u32 **mlli_entry_pp)
 158{
 159	struct scatterlist *curr_sgl = sgl;
 160	u32 *mlli_entry_p = *mlli_entry_pp;
 161	s32 rc = 0;
 162
 163	for ( ; (curr_sgl && sgl_data_len);
 164	      curr_sgl = sg_next(curr_sgl)) {
 165		u32 entry_data_len =
 166			(sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
 167				sg_dma_len(curr_sgl) - sgl_offset :
 168				sgl_data_len;
 169		sgl_data_len -= entry_data_len;
 170		rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
 171					    sgl_offset, entry_data_len,
 172					    curr_nents, &mlli_entry_p);
 173		if (rc)
 174			return rc;
 175
 176		sgl_offset = 0;
 177	}
 178	*mlli_entry_pp = mlli_entry_p;
 179	return 0;
 180}
 181
 182static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
 183			    struct mlli_params *mlli_params, gfp_t flags)
 184{
 185	u32 *mlli_p;
 186	u32 total_nents = 0, prev_total_nents = 0;
 187	int rc = 0, i;
 188
 189	dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
 190
 191	/* Allocate memory from the pointed pool */
 192	mlli_params->mlli_virt_addr =
 193		dma_pool_alloc(mlli_params->curr_pool, flags,
 194			       &mlli_params->mlli_dma_addr);
 195	if (!mlli_params->mlli_virt_addr) {
 196		dev_err(dev, "dma_pool_alloc() failed\n");
 197		rc = -ENOMEM;
 198		goto build_mlli_exit;
 199	}
 200	/* Point to start of MLLI */
 201	mlli_p = mlli_params->mlli_virt_addr;
 202	/* go over all SG's and link it to one MLLI table */
 203	for (i = 0; i < sg_data->num_of_buffers; i++) {
 204		union buffer_array_entry *entry = &sg_data->entry[i];
 205		u32 tot_len = sg_data->total_data_len[i];
 206		u32 offset = sg_data->offset[i];
 207
 208		rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset,
 209					  &total_nents, &mlli_p);
 210		if (rc)
 211			return rc;
 212
 213		/* set last bit in the current table */
 214		if (sg_data->mlli_nents[i]) {
 215			/*Calculate the current MLLI table length for the
 216			 *length field in the descriptor
 217			 */
 218			*sg_data->mlli_nents[i] +=
 219				(total_nents - prev_total_nents);
 220			prev_total_nents = total_nents;
 221		}
 222	}
 223
 224	/* Set MLLI size for the bypass operation */
 225	mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
 226
 227	dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
 228		mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
 229		mlli_params->mlli_len);
 230
 231build_mlli_exit:
 232	return rc;
 233}
 234
 235static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
 236			    unsigned int nents, struct scatterlist *sgl,
 237			    unsigned int data_len, unsigned int data_offset,
 238			    bool is_last_table, u32 *mlli_nents)
 239{
 240	unsigned int index = sgl_data->num_of_buffers;
 241
 242	dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
 243		index, nents, sgl, data_len, is_last_table);
 244	sgl_data->nents[index] = nents;
 245	sgl_data->entry[index].sgl = sgl;
 246	sgl_data->offset[index] = data_offset;
 247	sgl_data->total_data_len[index] = data_len;
 248	sgl_data->is_last[index] = is_last_table;
 249	sgl_data->mlli_nents[index] = mlli_nents;
 250	if (sgl_data->mlli_nents[index])
 251		*sgl_data->mlli_nents[index] = 0;
 252	sgl_data->num_of_buffers++;
 253}
 254
 255static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 256		     unsigned int nbytes, int direction, u32 *nents,
 257		     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
 258{
 259	int ret = 0;
 260
 
 
 
 
 
 
 
 261	*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
 262	if (*nents > max_sg_nents) {
 263		*nents = 0;
 264		dev_err(dev, "Too many fragments. current %d max %d\n",
 265			*nents, max_sg_nents);
 266		return -ENOMEM;
 267	}
 268
 269	ret = dma_map_sg(dev, sg, *nents, direction);
 270	if (dma_mapping_error(dev, ret)) {
 271		*nents = 0;
 272		dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
 273		return -ENOMEM;
 274	}
 275
 276	*mapped_nents = ret;
 277
 278	return 0;
 279}
 280
 281static int
 282cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
 283		     u8 *config_data, struct buffer_array *sg_data,
 284		     unsigned int assoclen)
 285{
 286	dev_dbg(dev, " handle additional data config set to DLLI\n");
 287	/* create sg for the current buffer */
 288	sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
 289		    AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
 290	if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
 291		dev_err(dev, "dma_map_sg() config buffer failed\n");
 292		return -ENOMEM;
 293	}
 294	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 295		&sg_dma_address(&areq_ctx->ccm_adata_sg),
 296		sg_page(&areq_ctx->ccm_adata_sg),
 297		sg_virt(&areq_ctx->ccm_adata_sg),
 298		areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
 299	/* prepare for case of MLLI */
 300	if (assoclen > 0) {
 301		cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
 302				(AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
 303				0, false, NULL);
 304	}
 305	return 0;
 306}
 307
 308static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
 309			   u8 *curr_buff, u32 curr_buff_cnt,
 310			   struct buffer_array *sg_data)
 311{
 312	dev_dbg(dev, " handle curr buff %x set to   DLLI\n", curr_buff_cnt);
 313	/* create sg for the current buffer */
 314	sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
 315	if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
 316		dev_err(dev, "dma_map_sg() src buffer failed\n");
 317		return -ENOMEM;
 318	}
 319	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 320		&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
 321		sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
 322		areq_ctx->buff_sg->length);
 323	areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
 324	areq_ctx->curr_sg = areq_ctx->buff_sg;
 325	areq_ctx->in_nents = 0;
 326	/* prepare for case of MLLI */
 327	cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
 328			false, NULL);
 329	return 0;
 330}
 331
 332void cc_unmap_cipher_request(struct device *dev, void *ctx,
 333				unsigned int ivsize, struct scatterlist *src,
 334				struct scatterlist *dst)
 335{
 336	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 337
 338	if (req_ctx->gen_ctx.iv_dma_addr) {
 339		dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
 340			&req_ctx->gen_ctx.iv_dma_addr, ivsize);
 341		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
 342				 ivsize, DMA_BIDIRECTIONAL);
 343	}
 344	/* Release pool */
 345	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
 346	    req_ctx->mlli_params.mlli_virt_addr) {
 347		dma_pool_free(req_ctx->mlli_params.curr_pool,
 348			      req_ctx->mlli_params.mlli_virt_addr,
 349			      req_ctx->mlli_params.mlli_dma_addr);
 350	}
 351
 352	dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
 353	dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
 354
 355	if (src != dst) {
 356		dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
 
 357		dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
 
 
 
 
 358	}
 359}
 360
 361int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
 362			  unsigned int ivsize, unsigned int nbytes,
 363			  void *info, struct scatterlist *src,
 364			  struct scatterlist *dst, gfp_t flags)
 365{
 366	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 367	struct mlli_params *mlli_params = &req_ctx->mlli_params;
 368	struct device *dev = drvdata_to_dev(drvdata);
 369	struct buffer_array sg_data;
 370	u32 dummy = 0;
 371	int rc = 0;
 372	u32 mapped_nents = 0;
 
 373
 374	req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
 375	mlli_params->curr_pool = NULL;
 376	sg_data.num_of_buffers = 0;
 377
 378	/* Map IV buffer */
 379	if (ivsize) {
 380		dump_byte_array("iv", info, ivsize);
 381		req_ctx->gen_ctx.iv_dma_addr =
 382			dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
 383		if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
 384			dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 385				ivsize, info);
 386			return -ENOMEM;
 387		}
 388		dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 389			ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
 390	} else {
 391		req_ctx->gen_ctx.iv_dma_addr = 0;
 392	}
 393
 394	/* Map the src SGL */
 395	rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
 396		       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
 397	if (rc)
 398		goto cipher_exit;
 399	if (mapped_nents > 1)
 400		req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 401
 402	if (src == dst) {
 403		/* Handle inplace operation */
 404		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 405			req_ctx->out_nents = 0;
 406			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 407					nbytes, 0, true,
 408					&req_ctx->in_mlli_nents);
 409		}
 410	} else {
 411		/* Map the dst sg */
 412		rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
 413			       &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
 414			       &dummy, &mapped_nents);
 415		if (rc)
 416			goto cipher_exit;
 417		if (mapped_nents > 1)
 418			req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 419
 420		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 421			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 422					nbytes, 0, true,
 423					&req_ctx->in_mlli_nents);
 424			cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
 425					nbytes, 0, true,
 426					&req_ctx->out_mlli_nents);
 427		}
 428	}
 429
 430	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 431		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
 432		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
 433		if (rc)
 434			goto cipher_exit;
 435	}
 436
 437	dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
 438		cc_dma_buf_type(req_ctx->dma_buf_type));
 439
 440	return 0;
 441
 442cipher_exit:
 443	cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
 444	return rc;
 445}
 446
 447void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 448{
 449	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 450	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 451	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
 
 452
 453	if (areq_ctx->mac_buf_dma_addr) {
 454		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
 455				 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
 456	}
 457
 458	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
 459		if (areq_ctx->hkey_dma_addr) {
 460			dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
 461					 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
 462		}
 463
 464		if (areq_ctx->gcm_block_len_dma_addr) {
 465			dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
 466					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 467		}
 468
 469		if (areq_ctx->gcm_iv_inc1_dma_addr) {
 470			dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
 471					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 472		}
 473
 474		if (areq_ctx->gcm_iv_inc2_dma_addr) {
 475			dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
 476					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 477		}
 478	}
 479
 480	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 481		if (areq_ctx->ccm_iv0_dma_addr) {
 482			dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
 483					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 484		}
 485
 486		dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
 487	}
 488	if (areq_ctx->gen_ctx.iv_dma_addr) {
 489		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
 490				 hw_iv_size, DMA_BIDIRECTIONAL);
 491		kfree_sensitive(areq_ctx->gen_ctx.iv);
 492	}
 493
 494	/* Release pool */
 495	if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
 496	     areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
 497	    (areq_ctx->mlli_params.mlli_virt_addr)) {
 498		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
 499			&areq_ctx->mlli_params.mlli_dma_addr,
 500			areq_ctx->mlli_params.mlli_virt_addr);
 501		dma_pool_free(areq_ctx->mlli_params.curr_pool,
 502			      areq_ctx->mlli_params.mlli_virt_addr,
 503			      areq_ctx->mlli_params.mlli_dma_addr);
 504	}
 505
 506	dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
 507		sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
 508		areq_ctx->assoclen, req->cryptlen);
 509
 510	dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
 511		     DMA_BIDIRECTIONAL);
 512	if (req->src != req->dst) {
 513		dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
 514			sg_virt(req->dst));
 515		dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
 516			     DMA_BIDIRECTIONAL);
 517	}
 518	if (drvdata->coherent &&
 519	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
 520	    req->src == req->dst) {
 521		/* copy back mac from temporary location to deal with possible
 522		 * data memory overriding that caused by cache coherence
 523		 * problem.
 524		 */
 525		cc_copy_mac(dev, req, CC_SG_FROM_BUF);
 526	}
 527}
 528
 529static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
 530			   u32 last_entry_data_size)
 531{
 532	return ((sgl_nents > 1) && (last_entry_data_size < authsize));
 533}
 534
 535static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
 536			    struct aead_request *req,
 537			    struct buffer_array *sg_data,
 538			    bool is_last, bool do_chain)
 539{
 540	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 541	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 542	struct device *dev = drvdata_to_dev(drvdata);
 543	gfp_t flags = cc_gfp_flags(&req->base);
 544	int rc = 0;
 545
 546	if (!req->iv) {
 547		areq_ctx->gen_ctx.iv_dma_addr = 0;
 548		areq_ctx->gen_ctx.iv = NULL;
 549		goto chain_iv_exit;
 550	}
 551
 552	areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
 553	if (!areq_ctx->gen_ctx.iv)
 554		return -ENOMEM;
 555
 556	areq_ctx->gen_ctx.iv_dma_addr =
 557		dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
 558			       DMA_BIDIRECTIONAL);
 559	if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
 560		dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 561			hw_iv_size, req->iv);
 562		kfree_sensitive(areq_ctx->gen_ctx.iv);
 563		areq_ctx->gen_ctx.iv = NULL;
 564		rc = -ENOMEM;
 565		goto chain_iv_exit;
 566	}
 567
 568	dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 569		hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
 570
 571chain_iv_exit:
 572	return rc;
 573}
 574
 575static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
 576			       struct aead_request *req,
 577			       struct buffer_array *sg_data,
 578			       bool is_last, bool do_chain)
 579{
 580	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 581	int rc = 0;
 582	int mapped_nents = 0;
 583	struct device *dev = drvdata_to_dev(drvdata);
 584
 585	if (!sg_data) {
 586		rc = -EINVAL;
 587		goto chain_assoc_exit;
 588	}
 589
 590	if (areq_ctx->assoclen == 0) {
 591		areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
 592		areq_ctx->assoc.nents = 0;
 593		areq_ctx->assoc.mlli_nents = 0;
 594		dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
 595			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 596			areq_ctx->assoc.nents);
 597		goto chain_assoc_exit;
 598	}
 599
 600	mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen);
 601	if (mapped_nents < 0)
 602		return mapped_nents;
 603
 604	if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 605		dev_err(dev, "Too many fragments. current %d max %d\n",
 606			mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 607		return -ENOMEM;
 608	}
 609	areq_ctx->assoc.nents = mapped_nents;
 610
 611	/* in CCM case we have additional entry for
 612	 * ccm header configurations
 613	 */
 614	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 615		if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 616			dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
 617				(areq_ctx->assoc.nents + 1),
 618				LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 619			rc = -ENOMEM;
 620			goto chain_assoc_exit;
 621		}
 622	}
 623
 624	if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
 625		areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
 626	else
 627		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 628
 629	if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 630		dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
 631			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 632			areq_ctx->assoc.nents);
 633		cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
 634				areq_ctx->assoclen, 0, is_last,
 635				&areq_ctx->assoc.mlli_nents);
 636		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 637	}
 638
 639chain_assoc_exit:
 640	return rc;
 641}
 642
 643static void cc_prepare_aead_data_dlli(struct aead_request *req,
 644				      u32 *src_last_bytes, u32 *dst_last_bytes)
 645{
 646	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 647	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 648	unsigned int authsize = areq_ctx->req_authsize;
 649	struct scatterlist *sg;
 650	ssize_t offset;
 651
 652	areq_ctx->is_icv_fragmented = false;
 653
 654	if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 655		sg = areq_ctx->src_sgl;
 656		offset = *src_last_bytes - authsize;
 657	} else {
 658		sg = areq_ctx->dst_sgl;
 659		offset = *dst_last_bytes - authsize;
 660	}
 661
 662	areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
 663	areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
 664}
 665
 666static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
 667				      struct aead_request *req,
 668				      struct buffer_array *sg_data,
 669				      u32 *src_last_bytes, u32 *dst_last_bytes,
 670				      bool is_last_table)
 671{
 672	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 673	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 674	unsigned int authsize = areq_ctx->req_authsize;
 675	struct device *dev = drvdata_to_dev(drvdata);
 676	struct scatterlist *sg;
 677
 678	if (req->src == req->dst) {
 679		/*INPLACE*/
 680		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 681				areq_ctx->src_sgl, areq_ctx->cryptlen,
 682				areq_ctx->src_offset, is_last_table,
 683				&areq_ctx->src.mlli_nents);
 684
 685		areq_ctx->is_icv_fragmented =
 686			cc_is_icv_frag(areq_ctx->src.nents, authsize,
 687				       *src_last_bytes);
 688
 689		if (areq_ctx->is_icv_fragmented) {
 690			/* Backup happens only when ICV is fragmented, ICV
 691			 * verification is made by CPU compare in order to
 692			 * simplify MAC verification upon request completion
 693			 */
 694			if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 695				/* In coherent platforms (e.g. ACP)
 696				 * already copying ICV for any
 697				 * INPLACE-DECRYPT operation, hence
 698				 * we must neglect this code.
 699				 */
 700				if (!drvdata->coherent)
 701					cc_copy_mac(dev, req, CC_SG_TO_BUF);
 702
 703				areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 704			} else {
 705				areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 706				areq_ctx->icv_dma_addr =
 707					areq_ctx->mac_buf_dma_addr;
 708			}
 709		} else { /* Contig. ICV */
 710			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 711			/*Should hanlde if the sg is not contig.*/
 712			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 713				(*src_last_bytes - authsize);
 714			areq_ctx->icv_virt_addr = sg_virt(sg) +
 715				(*src_last_bytes - authsize);
 716		}
 717
 718	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 719		/*NON-INPLACE and DECRYPT*/
 720		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 721				areq_ctx->src_sgl, areq_ctx->cryptlen,
 722				areq_ctx->src_offset, is_last_table,
 723				&areq_ctx->src.mlli_nents);
 724		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 725				areq_ctx->dst_sgl, areq_ctx->cryptlen,
 726				areq_ctx->dst_offset, is_last_table,
 727				&areq_ctx->dst.mlli_nents);
 728
 729		areq_ctx->is_icv_fragmented =
 730			cc_is_icv_frag(areq_ctx->src.nents, authsize,
 731				       *src_last_bytes);
 732		/* Backup happens only when ICV is fragmented, ICV
 733
 734		 * verification is made by CPU compare in order to simplify
 735		 * MAC verification upon request completion
 736		 */
 737		if (areq_ctx->is_icv_fragmented) {
 738			cc_copy_mac(dev, req, CC_SG_TO_BUF);
 739			areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 740
 741		} else { /* Contig. ICV */
 742			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 743			/*Should hanlde if the sg is not contig.*/
 744			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 745				(*src_last_bytes - authsize);
 746			areq_ctx->icv_virt_addr = sg_virt(sg) +
 747				(*src_last_bytes - authsize);
 748		}
 749
 750	} else {
 751		/*NON-INPLACE and ENCRYPT*/
 752		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 753				areq_ctx->dst_sgl, areq_ctx->cryptlen,
 754				areq_ctx->dst_offset, is_last_table,
 755				&areq_ctx->dst.mlli_nents);
 756		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 757				areq_ctx->src_sgl, areq_ctx->cryptlen,
 758				areq_ctx->src_offset, is_last_table,
 759				&areq_ctx->src.mlli_nents);
 760
 761		areq_ctx->is_icv_fragmented =
 762			cc_is_icv_frag(areq_ctx->dst.nents, authsize,
 763				       *dst_last_bytes);
 764
 765		if (!areq_ctx->is_icv_fragmented) {
 766			sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
 767			/* Contig. ICV */
 768			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 769				(*dst_last_bytes - authsize);
 770			areq_ctx->icv_virt_addr = sg_virt(sg) +
 771				(*dst_last_bytes - authsize);
 772		} else {
 773			areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
 774			areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 775		}
 776	}
 777}
 778
 779static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 780			      struct aead_request *req,
 781			      struct buffer_array *sg_data,
 782			      bool is_last_table, bool do_chain)
 783{
 784	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 785	struct device *dev = drvdata_to_dev(drvdata);
 786	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 787	unsigned int authsize = areq_ctx->req_authsize;
 788	unsigned int src_last_bytes = 0, dst_last_bytes = 0;
 789	int rc = 0;
 790	u32 src_mapped_nents = 0, dst_mapped_nents = 0;
 791	u32 offset = 0;
 792	/* non-inplace mode */
 793	unsigned int size_for_map = req->assoclen + req->cryptlen;
 794	u32 sg_index = 0;
 795	u32 size_to_skip = req->assoclen;
 796	struct scatterlist *sgl;
 797
 798	offset = size_to_skip;
 799
 800	if (!sg_data)
 801		return -EINVAL;
 802
 803	areq_ctx->src_sgl = req->src;
 804	areq_ctx->dst_sgl = req->dst;
 805
 806	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 807			authsize : 0;
 808	src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
 809					    &src_last_bytes);
 810	sg_index = areq_ctx->src_sgl->length;
 811	//check where the data starts
 812	while (src_mapped_nents && (sg_index <= size_to_skip)) {
 813		src_mapped_nents--;
 814		offset -= areq_ctx->src_sgl->length;
 815		sgl = sg_next(areq_ctx->src_sgl);
 816		if (!sgl)
 817			break;
 818		areq_ctx->src_sgl = sgl;
 819		sg_index += areq_ctx->src_sgl->length;
 820	}
 821	if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 822		dev_err(dev, "Too many fragments. current %d max %d\n",
 823			src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 824		return -ENOMEM;
 825	}
 826
 827	areq_ctx->src.nents = src_mapped_nents;
 828
 829	areq_ctx->src_offset = offset;
 830
 831	if (req->src != req->dst) {
 832		size_for_map = req->assoclen + req->cryptlen;
 833
 834		if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
 835			size_for_map += authsize;
 836		else
 837			size_for_map -= authsize;
 838
 839		rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
 840			       &areq_ctx->dst.mapped_nents,
 841			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
 842			       &dst_mapped_nents);
 843		if (rc)
 844			goto chain_data_exit;
 845	}
 846
 847	dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
 848					    &dst_last_bytes);
 849	sg_index = areq_ctx->dst_sgl->length;
 850	offset = size_to_skip;
 851
 852	//check where the data starts
 853	while (dst_mapped_nents && sg_index <= size_to_skip) {
 854		dst_mapped_nents--;
 855		offset -= areq_ctx->dst_sgl->length;
 856		sgl = sg_next(areq_ctx->dst_sgl);
 857		if (!sgl)
 858			break;
 859		areq_ctx->dst_sgl = sgl;
 860		sg_index += areq_ctx->dst_sgl->length;
 861	}
 862	if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 863		dev_err(dev, "Too many fragments. current %d max %d\n",
 864			dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 865		return -ENOMEM;
 866	}
 867	areq_ctx->dst.nents = dst_mapped_nents;
 868	areq_ctx->dst_offset = offset;
 869	if (src_mapped_nents > 1 ||
 870	    dst_mapped_nents  > 1 ||
 871	    do_chain) {
 872		areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
 873		cc_prepare_aead_data_mlli(drvdata, req, sg_data,
 874					  &src_last_bytes, &dst_last_bytes,
 875					  is_last_table);
 876	} else {
 877		areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
 878		cc_prepare_aead_data_dlli(req, &src_last_bytes,
 879					  &dst_last_bytes);
 880	}
 881
 882chain_data_exit:
 883	return rc;
 884}
 885
 886static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
 887				      struct aead_request *req)
 888{
 889	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 890	u32 curr_mlli_size = 0;
 891
 892	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 893		areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
 894		curr_mlli_size = areq_ctx->assoc.mlli_nents *
 895						LLI_ENTRY_BYTE_SIZE;
 896	}
 897
 898	if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
 899		/*Inplace case dst nents equal to src nents*/
 900		if (req->src == req->dst) {
 901			areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
 902			areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
 903								curr_mlli_size;
 904			areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
 905			if (!areq_ctx->is_single_pass)
 906				areq_ctx->assoc.mlli_nents +=
 907					areq_ctx->src.mlli_nents;
 908		} else {
 909			if (areq_ctx->gen_ctx.op_type ==
 910					DRV_CRYPTO_DIRECTION_DECRYPT) {
 911				areq_ctx->src.sram_addr =
 912						drvdata->mlli_sram_addr +
 913								curr_mlli_size;
 914				areq_ctx->dst.sram_addr =
 915						areq_ctx->src.sram_addr +
 916						areq_ctx->src.mlli_nents *
 917						LLI_ENTRY_BYTE_SIZE;
 918				if (!areq_ctx->is_single_pass)
 919					areq_ctx->assoc.mlli_nents +=
 920						areq_ctx->src.mlli_nents;
 921			} else {
 922				areq_ctx->dst.sram_addr =
 923						drvdata->mlli_sram_addr +
 924								curr_mlli_size;
 925				areq_ctx->src.sram_addr =
 926						areq_ctx->dst.sram_addr +
 927						areq_ctx->dst.mlli_nents *
 928						LLI_ENTRY_BYTE_SIZE;
 929				if (!areq_ctx->is_single_pass)
 930					areq_ctx->assoc.mlli_nents +=
 931						areq_ctx->dst.mlli_nents;
 932			}
 933		}
 934	}
 935}
 936
 937int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
 938{
 939	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 940	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
 941	struct device *dev = drvdata_to_dev(drvdata);
 942	struct buffer_array sg_data;
 943	unsigned int authsize = areq_ctx->req_authsize;
 944	int rc = 0;
 945	dma_addr_t dma_addr;
 946	u32 mapped_nents = 0;
 947	u32 dummy = 0; /*used for the assoc data fragments */
 948	u32 size_to_map;
 949	gfp_t flags = cc_gfp_flags(&req->base);
 950
 951	mlli_params->curr_pool = NULL;
 952	sg_data.num_of_buffers = 0;
 953
 954	/* copy mac to a temporary location to deal with possible
 955	 * data memory overriding that caused by cache coherence problem.
 956	 */
 957	if (drvdata->coherent &&
 958	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
 959	    req->src == req->dst)
 960		cc_copy_mac(dev, req, CC_SG_TO_BUF);
 961
 962	/* cacluate the size for cipher remove ICV in decrypt*/
 963	areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
 964				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 965				req->cryptlen :
 966				(req->cryptlen - authsize);
 967
 968	dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
 969				  DMA_BIDIRECTIONAL);
 970	if (dma_mapping_error(dev, dma_addr)) {
 971		dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
 972			MAX_MAC_SIZE, areq_ctx->mac_buf);
 973		rc = -ENOMEM;
 974		goto aead_map_failure;
 975	}
 976	areq_ctx->mac_buf_dma_addr = dma_addr;
 977
 978	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 979		void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
 980
 981		dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
 982					  DMA_TO_DEVICE);
 983
 984		if (dma_mapping_error(dev, dma_addr)) {
 985			dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
 986				AES_BLOCK_SIZE, addr);
 987			areq_ctx->ccm_iv0_dma_addr = 0;
 988			rc = -ENOMEM;
 989			goto aead_map_failure;
 990		}
 991		areq_ctx->ccm_iv0_dma_addr = dma_addr;
 992
 993		rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
 994					  &sg_data, areq_ctx->assoclen);
 995		if (rc)
 996			goto aead_map_failure;
 997	}
 998
 999	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1000		dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1001					  DMA_BIDIRECTIONAL);
1002		if (dma_mapping_error(dev, dma_addr)) {
1003			dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1004				AES_BLOCK_SIZE, areq_ctx->hkey);
1005			rc = -ENOMEM;
1006			goto aead_map_failure;
1007		}
1008		areq_ctx->hkey_dma_addr = dma_addr;
1009
1010		dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1011					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1012		if (dma_mapping_error(dev, dma_addr)) {
1013			dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1014				AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1015			rc = -ENOMEM;
1016			goto aead_map_failure;
1017		}
1018		areq_ctx->gcm_block_len_dma_addr = dma_addr;
1019
1020		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1021					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1022
1023		if (dma_mapping_error(dev, dma_addr)) {
1024			dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1025				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1026			areq_ctx->gcm_iv_inc1_dma_addr = 0;
1027			rc = -ENOMEM;
1028			goto aead_map_failure;
1029		}
1030		areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1031
1032		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1033					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1034
1035		if (dma_mapping_error(dev, dma_addr)) {
1036			dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1037				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1038			areq_ctx->gcm_iv_inc2_dma_addr = 0;
1039			rc = -ENOMEM;
1040			goto aead_map_failure;
1041		}
1042		areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1043	}
1044
1045	size_to_map = req->cryptlen + req->assoclen;
1046	/* If we do in-place encryption, we also need the auth tag */
1047	if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
1048	   (req->src == req->dst)) {
1049		size_to_map += authsize;
1050	}
1051
1052	rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
 
1053		       &areq_ctx->src.mapped_nents,
1054		       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1055			LLI_MAX_NUM_OF_DATA_ENTRIES),
1056		       &dummy, &mapped_nents);
1057	if (rc)
1058		goto aead_map_failure;
1059
1060	if (areq_ctx->is_single_pass) {
1061		/*
1062		 * Create MLLI table for:
1063		 *   (1) Assoc. data
1064		 *   (2) Src/Dst SGLs
1065		 *   Note: IV is contg. buffer (not an SGL)
1066		 */
1067		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1068		if (rc)
1069			goto aead_map_failure;
1070		rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1071		if (rc)
1072			goto aead_map_failure;
1073		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1074		if (rc)
1075			goto aead_map_failure;
1076	} else { /* DOUBLE-PASS flow */
1077		/*
1078		 * Prepare MLLI table(s) in this order:
1079		 *
1080		 * If ENCRYPT/DECRYPT (inplace):
1081		 *   (1) MLLI table for assoc
1082		 *   (2) IV entry (chained right after end of assoc)
1083		 *   (3) MLLI for src/dst (inplace operation)
1084		 *
1085		 * If ENCRYPT (non-inplace)
1086		 *   (1) MLLI table for assoc
1087		 *   (2) IV entry (chained right after end of assoc)
1088		 *   (3) MLLI for dst
1089		 *   (4) MLLI for src
1090		 *
1091		 * If DECRYPT (non-inplace)
1092		 *   (1) MLLI table for assoc
1093		 *   (2) IV entry (chained right after end of assoc)
1094		 *   (3) MLLI for src
1095		 *   (4) MLLI for dst
1096		 */
1097		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1098		if (rc)
1099			goto aead_map_failure;
1100		rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1101		if (rc)
1102			goto aead_map_failure;
1103		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1104		if (rc)
1105			goto aead_map_failure;
1106	}
1107
1108	/* Mlli support -start building the MLLI according to the above
1109	 * results
1110	 */
1111	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1112	    areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1113		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1114		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1115		if (rc)
1116			goto aead_map_failure;
1117
1118		cc_update_aead_mlli_nents(drvdata, req);
1119		dev_dbg(dev, "assoc params mn %d\n",
1120			areq_ctx->assoc.mlli_nents);
1121		dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1122		dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1123	}
1124	return 0;
1125
1126aead_map_failure:
1127	cc_unmap_aead_request(dev, req);
1128	return rc;
1129}
1130
1131int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1132			      struct scatterlist *src, unsigned int nbytes,
1133			      bool do_update, gfp_t flags)
1134{
1135	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1136	struct device *dev = drvdata_to_dev(drvdata);
1137	u8 *curr_buff = cc_hash_buf(areq_ctx);
1138	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1139	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1140	struct buffer_array sg_data;
1141	int rc = 0;
1142	u32 dummy = 0;
1143	u32 mapped_nents = 0;
1144
1145	dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1146		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1147	/* Init the type of the dma buffer */
1148	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1149	mlli_params->curr_pool = NULL;
1150	sg_data.num_of_buffers = 0;
1151	areq_ctx->in_nents = 0;
1152
1153	if (nbytes == 0 && *curr_buff_cnt == 0) {
1154		/* nothing to do */
1155		return 0;
1156	}
1157
1158	/* map the previous buffer */
1159	if (*curr_buff_cnt) {
1160		rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1161				     &sg_data);
1162		if (rc)
1163			return rc;
1164	}
1165
1166	if (src && nbytes > 0 && do_update) {
1167		rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1168			       &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1169			       &dummy, &mapped_nents);
1170		if (rc)
1171			goto unmap_curr_buff;
1172		if (src && mapped_nents == 1 &&
1173		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1174			memcpy(areq_ctx->buff_sg, src,
1175			       sizeof(struct scatterlist));
1176			areq_ctx->buff_sg->length = nbytes;
1177			areq_ctx->curr_sg = areq_ctx->buff_sg;
1178			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1179		} else {
1180			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1181		}
1182	}
1183
1184	/*build mlli */
1185	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1186		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1187		/* add the src data to the sg_data */
1188		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1189				0, true, &areq_ctx->mlli_nents);
1190		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1191		if (rc)
1192			goto fail_unmap_din;
1193	}
1194	/* change the buffer index for the unmap function */
1195	areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1196	dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1197		cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1198	return 0;
1199
1200fail_unmap_din:
1201	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1202
1203unmap_curr_buff:
1204	if (*curr_buff_cnt)
1205		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1206
1207	return rc;
1208}
1209
1210int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1211			       struct scatterlist *src, unsigned int nbytes,
1212			       unsigned int block_size, gfp_t flags)
1213{
1214	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1215	struct device *dev = drvdata_to_dev(drvdata);
1216	u8 *curr_buff = cc_hash_buf(areq_ctx);
1217	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1218	u8 *next_buff = cc_next_buf(areq_ctx);
1219	u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1220	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1221	unsigned int update_data_len;
1222	u32 total_in_len = nbytes + *curr_buff_cnt;
1223	struct buffer_array sg_data;
1224	unsigned int swap_index = 0;
1225	int rc = 0;
1226	u32 dummy = 0;
1227	u32 mapped_nents = 0;
1228
1229	dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1230		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1231	/* Init the type of the dma buffer */
1232	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1233	mlli_params->curr_pool = NULL;
1234	areq_ctx->curr_sg = NULL;
1235	sg_data.num_of_buffers = 0;
1236	areq_ctx->in_nents = 0;
1237
1238	if (total_in_len < block_size) {
1239		dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1240			curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1241		areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
1242		sg_copy_to_buffer(src, areq_ctx->in_nents,
1243				  &curr_buff[*curr_buff_cnt], nbytes);
1244		*curr_buff_cnt += nbytes;
1245		return 1;
1246	}
1247
1248	/* Calculate the residue size*/
1249	*next_buff_cnt = total_in_len & (block_size - 1);
1250	/* update data len */
1251	update_data_len = total_in_len - *next_buff_cnt;
1252
1253	dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1254		*next_buff_cnt, update_data_len);
1255
1256	/* Copy the new residue to next buffer */
1257	if (*next_buff_cnt) {
1258		dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1259			next_buff, (update_data_len - *curr_buff_cnt),
1260			*next_buff_cnt);
1261		cc_copy_sg_portion(dev, next_buff, src,
1262				   (update_data_len - *curr_buff_cnt),
1263				   nbytes, CC_SG_TO_BUF);
1264		/* change the buffer index for next operation */
1265		swap_index = 1;
1266	}
1267
1268	if (*curr_buff_cnt) {
1269		rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1270				     &sg_data);
1271		if (rc)
1272			return rc;
1273		/* change the buffer index for next operation */
1274		swap_index = 1;
1275	}
1276
1277	if (update_data_len > *curr_buff_cnt) {
1278		rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1279			       DMA_TO_DEVICE, &areq_ctx->in_nents,
1280			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1281			       &mapped_nents);
1282		if (rc)
1283			goto unmap_curr_buff;
1284		if (mapped_nents == 1 &&
1285		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1286			/* only one entry in the SG and no previous data */
1287			memcpy(areq_ctx->buff_sg, src,
1288			       sizeof(struct scatterlist));
1289			areq_ctx->buff_sg->length = update_data_len;
1290			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1291			areq_ctx->curr_sg = areq_ctx->buff_sg;
1292		} else {
1293			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1294		}
1295	}
1296
1297	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1298		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1299		/* add the src data to the sg_data */
1300		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1301				(update_data_len - *curr_buff_cnt), 0, true,
1302				&areq_ctx->mlli_nents);
1303		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1304		if (rc)
1305			goto fail_unmap_din;
1306	}
1307	areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1308
1309	return 0;
1310
1311fail_unmap_din:
1312	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1313
1314unmap_curr_buff:
1315	if (*curr_buff_cnt)
1316		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1317
1318	return rc;
1319}
1320
1321void cc_unmap_hash_request(struct device *dev, void *ctx,
1322			   struct scatterlist *src, bool do_revert)
1323{
1324	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1325	u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1326
1327	/*In case a pool was set, a table was
1328	 *allocated and should be released
1329	 */
1330	if (areq_ctx->mlli_params.curr_pool) {
1331		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1332			&areq_ctx->mlli_params.mlli_dma_addr,
1333			areq_ctx->mlli_params.mlli_virt_addr);
1334		dma_pool_free(areq_ctx->mlli_params.curr_pool,
1335			      areq_ctx->mlli_params.mlli_virt_addr,
1336			      areq_ctx->mlli_params.mlli_dma_addr);
1337	}
1338
1339	if (src && areq_ctx->in_nents) {
1340		dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1341			sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1342		dma_unmap_sg(dev, src,
1343			     areq_ctx->in_nents, DMA_TO_DEVICE);
1344	}
1345
1346	if (*prev_len) {
1347		dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1348			sg_virt(areq_ctx->buff_sg),
1349			&sg_dma_address(areq_ctx->buff_sg),
1350			sg_dma_len(areq_ctx->buff_sg));
1351		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1352		if (!do_revert) {
1353			/* clean the previous data length for update
1354			 * operation
1355			 */
1356			*prev_len = 0;
1357		} else {
1358			areq_ctx->buff_index ^= 1;
1359		}
1360	}
1361}
1362
1363int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1364{
1365	struct device *dev = drvdata_to_dev(drvdata);
1366
1367	drvdata->mlli_buffs_pool =
1368		dma_pool_create("dx_single_mlli_tables", dev,
1369				MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1370				LLI_ENTRY_BYTE_SIZE,
1371				MLLI_TABLE_MIN_ALIGNMENT, 0);
1372
1373	if (!drvdata->mlli_buffs_pool)
1374		return -ENOMEM;
1375
1376	return 0;
1377}
1378
1379int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1380{
1381	dma_pool_destroy(drvdata->mlli_buffs_pool);
1382	return 0;
1383}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
   3
   4#include <crypto/internal/aead.h>
   5#include <crypto/authenc.h>
   6#include <crypto/scatterwalk.h>
   7#include <linux/dmapool.h>
   8#include <linux/dma-mapping.h>
   9
  10#include "cc_buffer_mgr.h"
  11#include "cc_lli_defs.h"
  12#include "cc_cipher.h"
  13#include "cc_hash.h"
  14#include "cc_aead.h"
  15
  16union buffer_array_entry {
  17	struct scatterlist *sgl;
  18	dma_addr_t buffer_dma;
  19};
  20
  21struct buffer_array {
  22	unsigned int num_of_buffers;
  23	union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
  24	unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
  25	int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  26	int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
  27	bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
  28	u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  29};
  30
  31static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
  32{
  33	switch (type) {
  34	case CC_DMA_BUF_NULL:
  35		return "BUF_NULL";
  36	case CC_DMA_BUF_DLLI:
  37		return "BUF_DLLI";
  38	case CC_DMA_BUF_MLLI:
  39		return "BUF_MLLI";
  40	default:
  41		return "BUF_INVALID";
  42	}
  43}
  44
  45/**
  46 * cc_copy_mac() - Copy MAC to temporary location
  47 *
  48 * @dev: device object
  49 * @req: aead request object
  50 * @dir: [IN] copy from/to sgl
  51 */
  52static void cc_copy_mac(struct device *dev, struct aead_request *req,
  53			enum cc_sg_cpy_direct dir)
  54{
  55	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
  56	u32 skip = req->assoclen + req->cryptlen;
  57
  58	cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
  59			   (skip - areq_ctx->req_authsize), skip, dir);
  60}
  61
  62/**
  63 * cc_get_sgl_nents() - Get scatterlist number of entries.
  64 *
  65 * @dev: Device object
  66 * @sg_list: SG list
  67 * @nbytes: [IN] Total SGL data bytes.
  68 * @lbytes: [OUT] Returns the amount of bytes at the last entry
  69 *
  70 * Return:
  71 * Number of entries in the scatterlist
  72 */
  73static unsigned int cc_get_sgl_nents(struct device *dev,
  74				     struct scatterlist *sg_list,
  75				     unsigned int nbytes, u32 *lbytes)
  76{
  77	unsigned int nents = 0;
  78
  79	*lbytes = 0;
  80
  81	while (nbytes && sg_list) {
  82		nents++;
  83		/* get the number of bytes in the last entry */
  84		*lbytes = nbytes;
  85		nbytes -= (sg_list->length > nbytes) ?
  86				nbytes : sg_list->length;
  87		sg_list = sg_next(sg_list);
  88	}
  89
  90	dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
  91	return nents;
  92}
  93
  94/**
  95 * cc_copy_sg_portion() - Copy scatter list data,
  96 * from to_skip to end, to dest and vice versa
  97 *
  98 * @dev: Device object
  99 * @dest: Buffer to copy to/from
 100 * @sg: SG list
 101 * @to_skip: Number of bytes to skip before copying
 102 * @end: Offset of last byte to copy
 103 * @direct: Transfer direction (true == from SG list to buffer, false == from
 104 *          buffer to SG list)
 105 */
 106void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
 107			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
 108{
 109	u32 nents;
 110
 111	nents = sg_nents_for_len(sg, end);
 112	sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip,
 113		       (direct == CC_SG_TO_BUF));
 114}
 115
 116static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
 117				  u32 buff_size, u32 *curr_nents,
 118				  u32 **mlli_entry_pp)
 119{
 120	u32 *mlli_entry_p = *mlli_entry_pp;
 121	u32 new_nents;
 122
 123	/* Verify there is no memory overflow*/
 124	new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
 125	if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
 126		dev_err(dev, "Too many mlli entries. current %d max %d\n",
 127			new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
 128		return -ENOMEM;
 129	}
 130
 131	/*handle buffer longer than 64 kbytes */
 132	while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
 133		cc_lli_set_addr(mlli_entry_p, buff_dma);
 134		cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
 135		dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 136			*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 137			mlli_entry_p[LLI_WORD1_OFFSET]);
 138		buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
 139		buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
 140		mlli_entry_p = mlli_entry_p + 2;
 141		(*curr_nents)++;
 142	}
 143	/*Last entry */
 144	cc_lli_set_addr(mlli_entry_p, buff_dma);
 145	cc_lli_set_size(mlli_entry_p, buff_size);
 146	dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 147		*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 148		mlli_entry_p[LLI_WORD1_OFFSET]);
 149	mlli_entry_p = mlli_entry_p + 2;
 150	*mlli_entry_pp = mlli_entry_p;
 151	(*curr_nents)++;
 152	return 0;
 153}
 154
 155static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
 156				u32 sgl_data_len, u32 sgl_offset,
 157				u32 *curr_nents, u32 **mlli_entry_pp)
 158{
 159	struct scatterlist *curr_sgl = sgl;
 160	u32 *mlli_entry_p = *mlli_entry_pp;
 161	s32 rc = 0;
 162
 163	for ( ; (curr_sgl && sgl_data_len);
 164	      curr_sgl = sg_next(curr_sgl)) {
 165		u32 entry_data_len =
 166			(sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
 167				sg_dma_len(curr_sgl) - sgl_offset :
 168				sgl_data_len;
 169		sgl_data_len -= entry_data_len;
 170		rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
 171					    sgl_offset, entry_data_len,
 172					    curr_nents, &mlli_entry_p);
 173		if (rc)
 174			return rc;
 175
 176		sgl_offset = 0;
 177	}
 178	*mlli_entry_pp = mlli_entry_p;
 179	return 0;
 180}
 181
 182static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
 183			    struct mlli_params *mlli_params, gfp_t flags)
 184{
 185	u32 *mlli_p;
 186	u32 total_nents = 0, prev_total_nents = 0;
 187	int rc = 0, i;
 188
 189	dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
 190
 191	/* Allocate memory from the pointed pool */
 192	mlli_params->mlli_virt_addr =
 193		dma_pool_alloc(mlli_params->curr_pool, flags,
 194			       &mlli_params->mlli_dma_addr);
 195	if (!mlli_params->mlli_virt_addr) {
 196		dev_err(dev, "dma_pool_alloc() failed\n");
 197		rc = -ENOMEM;
 198		goto build_mlli_exit;
 199	}
 200	/* Point to start of MLLI */
 201	mlli_p = mlli_params->mlli_virt_addr;
 202	/* go over all SG's and link it to one MLLI table */
 203	for (i = 0; i < sg_data->num_of_buffers; i++) {
 204		union buffer_array_entry *entry = &sg_data->entry[i];
 205		u32 tot_len = sg_data->total_data_len[i];
 206		u32 offset = sg_data->offset[i];
 207
 208		rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset,
 209					  &total_nents, &mlli_p);
 210		if (rc)
 211			return rc;
 212
 213		/* set last bit in the current table */
 214		if (sg_data->mlli_nents[i]) {
 215			/*Calculate the current MLLI table length for the
 216			 *length field in the descriptor
 217			 */
 218			*sg_data->mlli_nents[i] +=
 219				(total_nents - prev_total_nents);
 220			prev_total_nents = total_nents;
 221		}
 222	}
 223
 224	/* Set MLLI size for the bypass operation */
 225	mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
 226
 227	dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
 228		mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
 229		mlli_params->mlli_len);
 230
 231build_mlli_exit:
 232	return rc;
 233}
 234
 235static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
 236			    unsigned int nents, struct scatterlist *sgl,
 237			    unsigned int data_len, unsigned int data_offset,
 238			    bool is_last_table, u32 *mlli_nents)
 239{
 240	unsigned int index = sgl_data->num_of_buffers;
 241
 242	dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
 243		index, nents, sgl, data_len, is_last_table);
 244	sgl_data->nents[index] = nents;
 245	sgl_data->entry[index].sgl = sgl;
 246	sgl_data->offset[index] = data_offset;
 247	sgl_data->total_data_len[index] = data_len;
 248	sgl_data->is_last[index] = is_last_table;
 249	sgl_data->mlli_nents[index] = mlli_nents;
 250	if (sgl_data->mlli_nents[index])
 251		*sgl_data->mlli_nents[index] = 0;
 252	sgl_data->num_of_buffers++;
 253}
 254
 255static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 256		     unsigned int nbytes, int direction, u32 *nents,
 257		     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
 258{
 259	int ret = 0;
 260
 261	if (!nbytes) {
 262		*mapped_nents = 0;
 263		*lbytes = 0;
 264		*nents = 0;
 265		return 0;
 266	}
 267
 268	*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
 269	if (*nents > max_sg_nents) {
 270		*nents = 0;
 271		dev_err(dev, "Too many fragments. current %d max %d\n",
 272			*nents, max_sg_nents);
 273		return -ENOMEM;
 274	}
 275
 276	ret = dma_map_sg(dev, sg, *nents, direction);
 277	if (!ret) {
 278		*nents = 0;
 279		dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
 280		return -ENOMEM;
 281	}
 282
 283	*mapped_nents = ret;
 284
 285	return 0;
 286}
 287
 288static int
 289cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
 290		     u8 *config_data, struct buffer_array *sg_data,
 291		     unsigned int assoclen)
 292{
 293	dev_dbg(dev, " handle additional data config set to DLLI\n");
 294	/* create sg for the current buffer */
 295	sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
 296		    AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
 297	if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
 298		dev_err(dev, "dma_map_sg() config buffer failed\n");
 299		return -ENOMEM;
 300	}
 301	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 302		&sg_dma_address(&areq_ctx->ccm_adata_sg),
 303		sg_page(&areq_ctx->ccm_adata_sg),
 304		sg_virt(&areq_ctx->ccm_adata_sg),
 305		areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
 306	/* prepare for case of MLLI */
 307	if (assoclen > 0) {
 308		cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
 309				(AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
 310				0, false, NULL);
 311	}
 312	return 0;
 313}
 314
 315static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
 316			   u8 *curr_buff, u32 curr_buff_cnt,
 317			   struct buffer_array *sg_data)
 318{
 319	dev_dbg(dev, " handle curr buff %x set to   DLLI\n", curr_buff_cnt);
 320	/* create sg for the current buffer */
 321	sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
 322	if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
 323		dev_err(dev, "dma_map_sg() src buffer failed\n");
 324		return -ENOMEM;
 325	}
 326	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 327		&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
 328		sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
 329		areq_ctx->buff_sg->length);
 330	areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
 331	areq_ctx->curr_sg = areq_ctx->buff_sg;
 332	areq_ctx->in_nents = 0;
 333	/* prepare for case of MLLI */
 334	cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
 335			false, NULL);
 336	return 0;
 337}
 338
 339void cc_unmap_cipher_request(struct device *dev, void *ctx,
 340				unsigned int ivsize, struct scatterlist *src,
 341				struct scatterlist *dst)
 342{
 343	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 344
 345	if (req_ctx->gen_ctx.iv_dma_addr) {
 346		dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
 347			&req_ctx->gen_ctx.iv_dma_addr, ivsize);
 348		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
 349				 ivsize, DMA_BIDIRECTIONAL);
 350	}
 351	/* Release pool */
 352	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
 353	    req_ctx->mlli_params.mlli_virt_addr) {
 354		dma_pool_free(req_ctx->mlli_params.curr_pool,
 355			      req_ctx->mlli_params.mlli_virt_addr,
 356			      req_ctx->mlli_params.mlli_dma_addr);
 357	}
 358
 
 
 
 359	if (src != dst) {
 360		dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
 361		dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
 362		dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
 363		dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
 364	} else {
 365		dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
 366		dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
 367	}
 368}
 369
 370int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
 371			  unsigned int ivsize, unsigned int nbytes,
 372			  void *info, struct scatterlist *src,
 373			  struct scatterlist *dst, gfp_t flags)
 374{
 375	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 376	struct mlli_params *mlli_params = &req_ctx->mlli_params;
 377	struct device *dev = drvdata_to_dev(drvdata);
 378	struct buffer_array sg_data;
 379	u32 dummy = 0;
 380	int rc = 0;
 381	u32 mapped_nents = 0;
 382	int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
 383
 384	req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
 385	mlli_params->curr_pool = NULL;
 386	sg_data.num_of_buffers = 0;
 387
 388	/* Map IV buffer */
 389	if (ivsize) {
 390		dump_byte_array("iv", info, ivsize);
 391		req_ctx->gen_ctx.iv_dma_addr =
 392			dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
 393		if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
 394			dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 395				ivsize, info);
 396			return -ENOMEM;
 397		}
 398		dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 399			ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
 400	} else {
 401		req_ctx->gen_ctx.iv_dma_addr = 0;
 402	}
 403
 404	/* Map the src SGL */
 405	rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents,
 406		       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
 407	if (rc)
 408		goto cipher_exit;
 409	if (mapped_nents > 1)
 410		req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 411
 412	if (src == dst) {
 413		/* Handle inplace operation */
 414		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 415			req_ctx->out_nents = 0;
 416			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 417					nbytes, 0, true,
 418					&req_ctx->in_mlli_nents);
 419		}
 420	} else {
 421		/* Map the dst sg */
 422		rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE,
 423			       &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
 424			       &dummy, &mapped_nents);
 425		if (rc)
 426			goto cipher_exit;
 427		if (mapped_nents > 1)
 428			req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 429
 430		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 431			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 432					nbytes, 0, true,
 433					&req_ctx->in_mlli_nents);
 434			cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
 435					nbytes, 0, true,
 436					&req_ctx->out_mlli_nents);
 437		}
 438	}
 439
 440	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 441		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
 442		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
 443		if (rc)
 444			goto cipher_exit;
 445	}
 446
 447	dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
 448		cc_dma_buf_type(req_ctx->dma_buf_type));
 449
 450	return 0;
 451
 452cipher_exit:
 453	cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
 454	return rc;
 455}
 456
 457void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 458{
 459	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 460	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 461	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
 462	int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
 463
 464	if (areq_ctx->mac_buf_dma_addr) {
 465		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
 466				 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
 467	}
 468
 469	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
 470		if (areq_ctx->hkey_dma_addr) {
 471			dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
 472					 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
 473		}
 474
 475		if (areq_ctx->gcm_block_len_dma_addr) {
 476			dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
 477					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 478		}
 479
 480		if (areq_ctx->gcm_iv_inc1_dma_addr) {
 481			dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
 482					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 483		}
 484
 485		if (areq_ctx->gcm_iv_inc2_dma_addr) {
 486			dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
 487					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 488		}
 489	}
 490
 491	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 492		if (areq_ctx->ccm_iv0_dma_addr) {
 493			dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
 494					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 495		}
 496
 497		dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
 498	}
 499	if (areq_ctx->gen_ctx.iv_dma_addr) {
 500		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
 501				 hw_iv_size, DMA_BIDIRECTIONAL);
 502		kfree_sensitive(areq_ctx->gen_ctx.iv);
 503	}
 504
 505	/* Release pool */
 506	if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
 507	     areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
 508	    (areq_ctx->mlli_params.mlli_virt_addr)) {
 509		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
 510			&areq_ctx->mlli_params.mlli_dma_addr,
 511			areq_ctx->mlli_params.mlli_virt_addr);
 512		dma_pool_free(areq_ctx->mlli_params.curr_pool,
 513			      areq_ctx->mlli_params.mlli_virt_addr,
 514			      areq_ctx->mlli_params.mlli_dma_addr);
 515	}
 516
 517	dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
 518		sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
 519		areq_ctx->assoclen, req->cryptlen);
 520
 521	dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
 
 522	if (req->src != req->dst) {
 523		dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
 524			sg_virt(req->dst));
 525		dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
 
 526	}
 527	if (drvdata->coherent &&
 528	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
 529	    req->src == req->dst) {
 530		/* copy back mac from temporary location to deal with possible
 531		 * data memory overriding that caused by cache coherence
 532		 * problem.
 533		 */
 534		cc_copy_mac(dev, req, CC_SG_FROM_BUF);
 535	}
 536}
 537
 538static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
 539			   u32 last_entry_data_size)
 540{
 541	return ((sgl_nents > 1) && (last_entry_data_size < authsize));
 542}
 543
 544static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
 545			    struct aead_request *req,
 546			    struct buffer_array *sg_data,
 547			    bool is_last, bool do_chain)
 548{
 549	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 550	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 551	struct device *dev = drvdata_to_dev(drvdata);
 552	gfp_t flags = cc_gfp_flags(&req->base);
 553	int rc = 0;
 554
 555	if (!req->iv) {
 556		areq_ctx->gen_ctx.iv_dma_addr = 0;
 557		areq_ctx->gen_ctx.iv = NULL;
 558		goto chain_iv_exit;
 559	}
 560
 561	areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
 562	if (!areq_ctx->gen_ctx.iv)
 563		return -ENOMEM;
 564
 565	areq_ctx->gen_ctx.iv_dma_addr =
 566		dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
 567			       DMA_BIDIRECTIONAL);
 568	if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
 569		dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 570			hw_iv_size, req->iv);
 571		kfree_sensitive(areq_ctx->gen_ctx.iv);
 572		areq_ctx->gen_ctx.iv = NULL;
 573		rc = -ENOMEM;
 574		goto chain_iv_exit;
 575	}
 576
 577	dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 578		hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
 579
 580chain_iv_exit:
 581	return rc;
 582}
 583
 584static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
 585			       struct aead_request *req,
 586			       struct buffer_array *sg_data,
 587			       bool is_last, bool do_chain)
 588{
 589	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 590	int rc = 0;
 591	int mapped_nents = 0;
 592	struct device *dev = drvdata_to_dev(drvdata);
 593
 594	if (!sg_data) {
 595		rc = -EINVAL;
 596		goto chain_assoc_exit;
 597	}
 598
 599	if (areq_ctx->assoclen == 0) {
 600		areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
 601		areq_ctx->assoc.nents = 0;
 602		areq_ctx->assoc.mlli_nents = 0;
 603		dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
 604			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 605			areq_ctx->assoc.nents);
 606		goto chain_assoc_exit;
 607	}
 608
 609	mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen);
 610	if (mapped_nents < 0)
 611		return mapped_nents;
 612
 613	if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 614		dev_err(dev, "Too many fragments. current %d max %d\n",
 615			mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 616		return -ENOMEM;
 617	}
 618	areq_ctx->assoc.nents = mapped_nents;
 619
 620	/* in CCM case we have additional entry for
 621	 * ccm header configurations
 622	 */
 623	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 624		if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 625			dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
 626				(areq_ctx->assoc.nents + 1),
 627				LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 628			rc = -ENOMEM;
 629			goto chain_assoc_exit;
 630		}
 631	}
 632
 633	if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
 634		areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
 635	else
 636		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 637
 638	if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 639		dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
 640			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 641			areq_ctx->assoc.nents);
 642		cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
 643				areq_ctx->assoclen, 0, is_last,
 644				&areq_ctx->assoc.mlli_nents);
 645		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 646	}
 647
 648chain_assoc_exit:
 649	return rc;
 650}
 651
 652static void cc_prepare_aead_data_dlli(struct aead_request *req,
 653				      u32 *src_last_bytes, u32 *dst_last_bytes)
 654{
 655	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 656	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 657	unsigned int authsize = areq_ctx->req_authsize;
 658	struct scatterlist *sg;
 659	ssize_t offset;
 660
 661	areq_ctx->is_icv_fragmented = false;
 662
 663	if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 664		sg = areq_ctx->src_sgl;
 665		offset = *src_last_bytes - authsize;
 666	} else {
 667		sg = areq_ctx->dst_sgl;
 668		offset = *dst_last_bytes - authsize;
 669	}
 670
 671	areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
 672	areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
 673}
 674
 675static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
 676				      struct aead_request *req,
 677				      struct buffer_array *sg_data,
 678				      u32 *src_last_bytes, u32 *dst_last_bytes,
 679				      bool is_last_table)
 680{
 681	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 682	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 683	unsigned int authsize = areq_ctx->req_authsize;
 684	struct device *dev = drvdata_to_dev(drvdata);
 685	struct scatterlist *sg;
 686
 687	if (req->src == req->dst) {
 688		/*INPLACE*/
 689		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 690				areq_ctx->src_sgl, areq_ctx->cryptlen,
 691				areq_ctx->src_offset, is_last_table,
 692				&areq_ctx->src.mlli_nents);
 693
 694		areq_ctx->is_icv_fragmented =
 695			cc_is_icv_frag(areq_ctx->src.nents, authsize,
 696				       *src_last_bytes);
 697
 698		if (areq_ctx->is_icv_fragmented) {
 699			/* Backup happens only when ICV is fragmented, ICV
 700			 * verification is made by CPU compare in order to
 701			 * simplify MAC verification upon request completion
 702			 */
 703			if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 704				/* In coherent platforms (e.g. ACP)
 705				 * already copying ICV for any
 706				 * INPLACE-DECRYPT operation, hence
 707				 * we must neglect this code.
 708				 */
 709				if (!drvdata->coherent)
 710					cc_copy_mac(dev, req, CC_SG_TO_BUF);
 711
 712				areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 713			} else {
 714				areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 715				areq_ctx->icv_dma_addr =
 716					areq_ctx->mac_buf_dma_addr;
 717			}
 718		} else { /* Contig. ICV */
 719			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 720			/*Should hanlde if the sg is not contig.*/
 721			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 722				(*src_last_bytes - authsize);
 723			areq_ctx->icv_virt_addr = sg_virt(sg) +
 724				(*src_last_bytes - authsize);
 725		}
 726
 727	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 728		/*NON-INPLACE and DECRYPT*/
 729		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 730				areq_ctx->src_sgl, areq_ctx->cryptlen,
 731				areq_ctx->src_offset, is_last_table,
 732				&areq_ctx->src.mlli_nents);
 733		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 734				areq_ctx->dst_sgl, areq_ctx->cryptlen,
 735				areq_ctx->dst_offset, is_last_table,
 736				&areq_ctx->dst.mlli_nents);
 737
 738		areq_ctx->is_icv_fragmented =
 739			cc_is_icv_frag(areq_ctx->src.nents, authsize,
 740				       *src_last_bytes);
 741		/* Backup happens only when ICV is fragmented, ICV
 742
 743		 * verification is made by CPU compare in order to simplify
 744		 * MAC verification upon request completion
 745		 */
 746		if (areq_ctx->is_icv_fragmented) {
 747			cc_copy_mac(dev, req, CC_SG_TO_BUF);
 748			areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 749
 750		} else { /* Contig. ICV */
 751			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 752			/*Should hanlde if the sg is not contig.*/
 753			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 754				(*src_last_bytes - authsize);
 755			areq_ctx->icv_virt_addr = sg_virt(sg) +
 756				(*src_last_bytes - authsize);
 757		}
 758
 759	} else {
 760		/*NON-INPLACE and ENCRYPT*/
 761		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 762				areq_ctx->dst_sgl, areq_ctx->cryptlen,
 763				areq_ctx->dst_offset, is_last_table,
 764				&areq_ctx->dst.mlli_nents);
 765		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 766				areq_ctx->src_sgl, areq_ctx->cryptlen,
 767				areq_ctx->src_offset, is_last_table,
 768				&areq_ctx->src.mlli_nents);
 769
 770		areq_ctx->is_icv_fragmented =
 771			cc_is_icv_frag(areq_ctx->dst.nents, authsize,
 772				       *dst_last_bytes);
 773
 774		if (!areq_ctx->is_icv_fragmented) {
 775			sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
 776			/* Contig. ICV */
 777			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 778				(*dst_last_bytes - authsize);
 779			areq_ctx->icv_virt_addr = sg_virt(sg) +
 780				(*dst_last_bytes - authsize);
 781		} else {
 782			areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
 783			areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 784		}
 785	}
 786}
 787
 788static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 789			      struct aead_request *req,
 790			      struct buffer_array *sg_data,
 791			      bool is_last_table, bool do_chain)
 792{
 793	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 794	struct device *dev = drvdata_to_dev(drvdata);
 795	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 796	unsigned int authsize = areq_ctx->req_authsize;
 797	unsigned int src_last_bytes = 0, dst_last_bytes = 0;
 798	int rc = 0;
 799	u32 src_mapped_nents = 0, dst_mapped_nents = 0;
 800	u32 offset = 0;
 801	/* non-inplace mode */
 802	unsigned int size_for_map = req->assoclen + req->cryptlen;
 803	u32 sg_index = 0;
 804	u32 size_to_skip = req->assoclen;
 805	struct scatterlist *sgl;
 806
 807	offset = size_to_skip;
 808
 809	if (!sg_data)
 810		return -EINVAL;
 811
 812	areq_ctx->src_sgl = req->src;
 813	areq_ctx->dst_sgl = req->dst;
 814
 815	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 816			authsize : 0;
 817	src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
 818					    &src_last_bytes);
 819	sg_index = areq_ctx->src_sgl->length;
 820	//check where the data starts
 821	while (src_mapped_nents && (sg_index <= size_to_skip)) {
 822		src_mapped_nents--;
 823		offset -= areq_ctx->src_sgl->length;
 824		sgl = sg_next(areq_ctx->src_sgl);
 825		if (!sgl)
 826			break;
 827		areq_ctx->src_sgl = sgl;
 828		sg_index += areq_ctx->src_sgl->length;
 829	}
 830	if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 831		dev_err(dev, "Too many fragments. current %d max %d\n",
 832			src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 833		return -ENOMEM;
 834	}
 835
 836	areq_ctx->src.nents = src_mapped_nents;
 837
 838	areq_ctx->src_offset = offset;
 839
 840	if (req->src != req->dst) {
 841		size_for_map = req->assoclen + req->cryptlen;
 842
 843		if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
 844			size_for_map += authsize;
 845		else
 846			size_for_map -= authsize;
 847
 848		rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE,
 849			       &areq_ctx->dst.mapped_nents,
 850			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
 851			       &dst_mapped_nents);
 852		if (rc)
 853			goto chain_data_exit;
 854	}
 855
 856	dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
 857					    &dst_last_bytes);
 858	sg_index = areq_ctx->dst_sgl->length;
 859	offset = size_to_skip;
 860
 861	//check where the data starts
 862	while (dst_mapped_nents && sg_index <= size_to_skip) {
 863		dst_mapped_nents--;
 864		offset -= areq_ctx->dst_sgl->length;
 865		sgl = sg_next(areq_ctx->dst_sgl);
 866		if (!sgl)
 867			break;
 868		areq_ctx->dst_sgl = sgl;
 869		sg_index += areq_ctx->dst_sgl->length;
 870	}
 871	if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 872		dev_err(dev, "Too many fragments. current %d max %d\n",
 873			dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 874		return -ENOMEM;
 875	}
 876	areq_ctx->dst.nents = dst_mapped_nents;
 877	areq_ctx->dst_offset = offset;
 878	if (src_mapped_nents > 1 ||
 879	    dst_mapped_nents  > 1 ||
 880	    do_chain) {
 881		areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
 882		cc_prepare_aead_data_mlli(drvdata, req, sg_data,
 883					  &src_last_bytes, &dst_last_bytes,
 884					  is_last_table);
 885	} else {
 886		areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
 887		cc_prepare_aead_data_dlli(req, &src_last_bytes,
 888					  &dst_last_bytes);
 889	}
 890
 891chain_data_exit:
 892	return rc;
 893}
 894
 895static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
 896				      struct aead_request *req)
 897{
 898	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 899	u32 curr_mlli_size = 0;
 900
 901	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 902		areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
 903		curr_mlli_size = areq_ctx->assoc.mlli_nents *
 904						LLI_ENTRY_BYTE_SIZE;
 905	}
 906
 907	if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
 908		/*Inplace case dst nents equal to src nents*/
 909		if (req->src == req->dst) {
 910			areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
 911			areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
 912								curr_mlli_size;
 913			areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
 914			if (!areq_ctx->is_single_pass)
 915				areq_ctx->assoc.mlli_nents +=
 916					areq_ctx->src.mlli_nents;
 917		} else {
 918			if (areq_ctx->gen_ctx.op_type ==
 919					DRV_CRYPTO_DIRECTION_DECRYPT) {
 920				areq_ctx->src.sram_addr =
 921						drvdata->mlli_sram_addr +
 922								curr_mlli_size;
 923				areq_ctx->dst.sram_addr =
 924						areq_ctx->src.sram_addr +
 925						areq_ctx->src.mlli_nents *
 926						LLI_ENTRY_BYTE_SIZE;
 927				if (!areq_ctx->is_single_pass)
 928					areq_ctx->assoc.mlli_nents +=
 929						areq_ctx->src.mlli_nents;
 930			} else {
 931				areq_ctx->dst.sram_addr =
 932						drvdata->mlli_sram_addr +
 933								curr_mlli_size;
 934				areq_ctx->src.sram_addr =
 935						areq_ctx->dst.sram_addr +
 936						areq_ctx->dst.mlli_nents *
 937						LLI_ENTRY_BYTE_SIZE;
 938				if (!areq_ctx->is_single_pass)
 939					areq_ctx->assoc.mlli_nents +=
 940						areq_ctx->dst.mlli_nents;
 941			}
 942		}
 943	}
 944}
 945
 946int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
 947{
 948	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 949	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
 950	struct device *dev = drvdata_to_dev(drvdata);
 951	struct buffer_array sg_data;
 952	unsigned int authsize = areq_ctx->req_authsize;
 953	int rc = 0;
 954	dma_addr_t dma_addr;
 955	u32 mapped_nents = 0;
 956	u32 dummy = 0; /*used for the assoc data fragments */
 957	u32 size_to_map;
 958	gfp_t flags = cc_gfp_flags(&req->base);
 959
 960	mlli_params->curr_pool = NULL;
 961	sg_data.num_of_buffers = 0;
 962
 963	/* copy mac to a temporary location to deal with possible
 964	 * data memory overriding that caused by cache coherence problem.
 965	 */
 966	if (drvdata->coherent &&
 967	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
 968	    req->src == req->dst)
 969		cc_copy_mac(dev, req, CC_SG_TO_BUF);
 970
 971	/* cacluate the size for cipher remove ICV in decrypt*/
 972	areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
 973				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 974				req->cryptlen :
 975				(req->cryptlen - authsize);
 976
 977	dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
 978				  DMA_BIDIRECTIONAL);
 979	if (dma_mapping_error(dev, dma_addr)) {
 980		dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
 981			MAX_MAC_SIZE, areq_ctx->mac_buf);
 982		rc = -ENOMEM;
 983		goto aead_map_failure;
 984	}
 985	areq_ctx->mac_buf_dma_addr = dma_addr;
 986
 987	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 988		void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
 989
 990		dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
 991					  DMA_TO_DEVICE);
 992
 993		if (dma_mapping_error(dev, dma_addr)) {
 994			dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
 995				AES_BLOCK_SIZE, addr);
 996			areq_ctx->ccm_iv0_dma_addr = 0;
 997			rc = -ENOMEM;
 998			goto aead_map_failure;
 999		}
1000		areq_ctx->ccm_iv0_dma_addr = dma_addr;
1001
1002		rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1003					  &sg_data, areq_ctx->assoclen);
1004		if (rc)
1005			goto aead_map_failure;
1006	}
1007
1008	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1009		dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1010					  DMA_BIDIRECTIONAL);
1011		if (dma_mapping_error(dev, dma_addr)) {
1012			dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1013				AES_BLOCK_SIZE, areq_ctx->hkey);
1014			rc = -ENOMEM;
1015			goto aead_map_failure;
1016		}
1017		areq_ctx->hkey_dma_addr = dma_addr;
1018
1019		dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1020					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1021		if (dma_mapping_error(dev, dma_addr)) {
1022			dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1023				AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1024			rc = -ENOMEM;
1025			goto aead_map_failure;
1026		}
1027		areq_ctx->gcm_block_len_dma_addr = dma_addr;
1028
1029		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1030					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1031
1032		if (dma_mapping_error(dev, dma_addr)) {
1033			dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1034				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1035			areq_ctx->gcm_iv_inc1_dma_addr = 0;
1036			rc = -ENOMEM;
1037			goto aead_map_failure;
1038		}
1039		areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1040
1041		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1042					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1043
1044		if (dma_mapping_error(dev, dma_addr)) {
1045			dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1046				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1047			areq_ctx->gcm_iv_inc2_dma_addr = 0;
1048			rc = -ENOMEM;
1049			goto aead_map_failure;
1050		}
1051		areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1052	}
1053
1054	size_to_map = req->cryptlen + req->assoclen;
1055	/* If we do in-place encryption, we also need the auth tag */
1056	if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
1057	   (req->src == req->dst)) {
1058		size_to_map += authsize;
1059	}
1060
1061	rc = cc_map_sg(dev, req->src, size_to_map,
1062		       (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL),
1063		       &areq_ctx->src.mapped_nents,
1064		       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1065			LLI_MAX_NUM_OF_DATA_ENTRIES),
1066		       &dummy, &mapped_nents);
1067	if (rc)
1068		goto aead_map_failure;
1069
1070	if (areq_ctx->is_single_pass) {
1071		/*
1072		 * Create MLLI table for:
1073		 *   (1) Assoc. data
1074		 *   (2) Src/Dst SGLs
1075		 *   Note: IV is contg. buffer (not an SGL)
1076		 */
1077		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1078		if (rc)
1079			goto aead_map_failure;
1080		rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1081		if (rc)
1082			goto aead_map_failure;
1083		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1084		if (rc)
1085			goto aead_map_failure;
1086	} else { /* DOUBLE-PASS flow */
1087		/*
1088		 * Prepare MLLI table(s) in this order:
1089		 *
1090		 * If ENCRYPT/DECRYPT (inplace):
1091		 *   (1) MLLI table for assoc
1092		 *   (2) IV entry (chained right after end of assoc)
1093		 *   (3) MLLI for src/dst (inplace operation)
1094		 *
1095		 * If ENCRYPT (non-inplace)
1096		 *   (1) MLLI table for assoc
1097		 *   (2) IV entry (chained right after end of assoc)
1098		 *   (3) MLLI for dst
1099		 *   (4) MLLI for src
1100		 *
1101		 * If DECRYPT (non-inplace)
1102		 *   (1) MLLI table for assoc
1103		 *   (2) IV entry (chained right after end of assoc)
1104		 *   (3) MLLI for src
1105		 *   (4) MLLI for dst
1106		 */
1107		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1108		if (rc)
1109			goto aead_map_failure;
1110		rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1111		if (rc)
1112			goto aead_map_failure;
1113		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1114		if (rc)
1115			goto aead_map_failure;
1116	}
1117
1118	/* Mlli support -start building the MLLI according to the above
1119	 * results
1120	 */
1121	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1122	    areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1123		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1124		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1125		if (rc)
1126			goto aead_map_failure;
1127
1128		cc_update_aead_mlli_nents(drvdata, req);
1129		dev_dbg(dev, "assoc params mn %d\n",
1130			areq_ctx->assoc.mlli_nents);
1131		dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1132		dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1133	}
1134	return 0;
1135
1136aead_map_failure:
1137	cc_unmap_aead_request(dev, req);
1138	return rc;
1139}
1140
1141int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1142			      struct scatterlist *src, unsigned int nbytes,
1143			      bool do_update, gfp_t flags)
1144{
1145	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1146	struct device *dev = drvdata_to_dev(drvdata);
1147	u8 *curr_buff = cc_hash_buf(areq_ctx);
1148	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1149	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1150	struct buffer_array sg_data;
1151	int rc = 0;
1152	u32 dummy = 0;
1153	u32 mapped_nents = 0;
1154
1155	dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1156		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1157	/* Init the type of the dma buffer */
1158	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1159	mlli_params->curr_pool = NULL;
1160	sg_data.num_of_buffers = 0;
1161	areq_ctx->in_nents = 0;
1162
1163	if (nbytes == 0 && *curr_buff_cnt == 0) {
1164		/* nothing to do */
1165		return 0;
1166	}
1167
1168	/* map the previous buffer */
1169	if (*curr_buff_cnt) {
1170		rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1171				     &sg_data);
1172		if (rc)
1173			return rc;
1174	}
1175
1176	if (src && nbytes > 0 && do_update) {
1177		rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1178			       &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1179			       &dummy, &mapped_nents);
1180		if (rc)
1181			goto unmap_curr_buff;
1182		if (src && mapped_nents == 1 &&
1183		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1184			memcpy(areq_ctx->buff_sg, src,
1185			       sizeof(struct scatterlist));
1186			areq_ctx->buff_sg->length = nbytes;
1187			areq_ctx->curr_sg = areq_ctx->buff_sg;
1188			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1189		} else {
1190			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1191		}
1192	}
1193
1194	/*build mlli */
1195	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1196		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1197		/* add the src data to the sg_data */
1198		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1199				0, true, &areq_ctx->mlli_nents);
1200		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1201		if (rc)
1202			goto fail_unmap_din;
1203	}
1204	/* change the buffer index for the unmap function */
1205	areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1206	dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1207		cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1208	return 0;
1209
1210fail_unmap_din:
1211	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1212
1213unmap_curr_buff:
1214	if (*curr_buff_cnt)
1215		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1216
1217	return rc;
1218}
1219
1220int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1221			       struct scatterlist *src, unsigned int nbytes,
1222			       unsigned int block_size, gfp_t flags)
1223{
1224	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1225	struct device *dev = drvdata_to_dev(drvdata);
1226	u8 *curr_buff = cc_hash_buf(areq_ctx);
1227	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1228	u8 *next_buff = cc_next_buf(areq_ctx);
1229	u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1230	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1231	unsigned int update_data_len;
1232	u32 total_in_len = nbytes + *curr_buff_cnt;
1233	struct buffer_array sg_data;
1234	unsigned int swap_index = 0;
1235	int rc = 0;
1236	u32 dummy = 0;
1237	u32 mapped_nents = 0;
1238
1239	dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1240		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1241	/* Init the type of the dma buffer */
1242	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1243	mlli_params->curr_pool = NULL;
1244	areq_ctx->curr_sg = NULL;
1245	sg_data.num_of_buffers = 0;
1246	areq_ctx->in_nents = 0;
1247
1248	if (total_in_len < block_size) {
1249		dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1250			curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1251		areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
1252		sg_copy_to_buffer(src, areq_ctx->in_nents,
1253				  &curr_buff[*curr_buff_cnt], nbytes);
1254		*curr_buff_cnt += nbytes;
1255		return 1;
1256	}
1257
1258	/* Calculate the residue size*/
1259	*next_buff_cnt = total_in_len & (block_size - 1);
1260	/* update data len */
1261	update_data_len = total_in_len - *next_buff_cnt;
1262
1263	dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1264		*next_buff_cnt, update_data_len);
1265
1266	/* Copy the new residue to next buffer */
1267	if (*next_buff_cnt) {
1268		dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1269			next_buff, (update_data_len - *curr_buff_cnt),
1270			*next_buff_cnt);
1271		cc_copy_sg_portion(dev, next_buff, src,
1272				   (update_data_len - *curr_buff_cnt),
1273				   nbytes, CC_SG_TO_BUF);
1274		/* change the buffer index for next operation */
1275		swap_index = 1;
1276	}
1277
1278	if (*curr_buff_cnt) {
1279		rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1280				     &sg_data);
1281		if (rc)
1282			return rc;
1283		/* change the buffer index for next operation */
1284		swap_index = 1;
1285	}
1286
1287	if (update_data_len > *curr_buff_cnt) {
1288		rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1289			       DMA_TO_DEVICE, &areq_ctx->in_nents,
1290			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1291			       &mapped_nents);
1292		if (rc)
1293			goto unmap_curr_buff;
1294		if (mapped_nents == 1 &&
1295		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1296			/* only one entry in the SG and no previous data */
1297			memcpy(areq_ctx->buff_sg, src,
1298			       sizeof(struct scatterlist));
1299			areq_ctx->buff_sg->length = update_data_len;
1300			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1301			areq_ctx->curr_sg = areq_ctx->buff_sg;
1302		} else {
1303			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1304		}
1305	}
1306
1307	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1308		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1309		/* add the src data to the sg_data */
1310		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1311				(update_data_len - *curr_buff_cnt), 0, true,
1312				&areq_ctx->mlli_nents);
1313		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1314		if (rc)
1315			goto fail_unmap_din;
1316	}
1317	areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1318
1319	return 0;
1320
1321fail_unmap_din:
1322	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1323
1324unmap_curr_buff:
1325	if (*curr_buff_cnt)
1326		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1327
1328	return rc;
1329}
1330
1331void cc_unmap_hash_request(struct device *dev, void *ctx,
1332			   struct scatterlist *src, bool do_revert)
1333{
1334	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1335	u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1336
1337	/*In case a pool was set, a table was
1338	 *allocated and should be released
1339	 */
1340	if (areq_ctx->mlli_params.curr_pool) {
1341		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1342			&areq_ctx->mlli_params.mlli_dma_addr,
1343			areq_ctx->mlli_params.mlli_virt_addr);
1344		dma_pool_free(areq_ctx->mlli_params.curr_pool,
1345			      areq_ctx->mlli_params.mlli_virt_addr,
1346			      areq_ctx->mlli_params.mlli_dma_addr);
1347	}
1348
1349	if (src && areq_ctx->in_nents) {
1350		dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1351			sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1352		dma_unmap_sg(dev, src,
1353			     areq_ctx->in_nents, DMA_TO_DEVICE);
1354	}
1355
1356	if (*prev_len) {
1357		dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1358			sg_virt(areq_ctx->buff_sg),
1359			&sg_dma_address(areq_ctx->buff_sg),
1360			sg_dma_len(areq_ctx->buff_sg));
1361		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1362		if (!do_revert) {
1363			/* clean the previous data length for update
1364			 * operation
1365			 */
1366			*prev_len = 0;
1367		} else {
1368			areq_ctx->buff_index ^= 1;
1369		}
1370	}
1371}
1372
1373int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1374{
1375	struct device *dev = drvdata_to_dev(drvdata);
1376
1377	drvdata->mlli_buffs_pool =
1378		dma_pool_create("dx_single_mlli_tables", dev,
1379				MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1380				LLI_ENTRY_BYTE_SIZE,
1381				MLLI_TABLE_MIN_ALIGNMENT, 0);
1382
1383	if (!drvdata->mlli_buffs_pool)
1384		return -ENOMEM;
1385
1386	return 0;
1387}
1388
1389int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1390{
1391	dma_pool_destroy(drvdata->mlli_buffs_pool);
1392	return 0;
1393}