Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
   3
   4#include <crypto/internal/aead.h>
   5#include <crypto/authenc.h>
   6#include <crypto/scatterwalk.h>
   7#include <linux/dmapool.h>
   8#include <linux/dma-mapping.h>
   9
  10#include "cc_buffer_mgr.h"
  11#include "cc_lli_defs.h"
  12#include "cc_cipher.h"
  13#include "cc_hash.h"
  14#include "cc_aead.h"
  15
 
 
 
 
 
 
 
 
 
 
  16union buffer_array_entry {
  17	struct scatterlist *sgl;
  18	dma_addr_t buffer_dma;
  19};
  20
  21struct buffer_array {
  22	unsigned int num_of_buffers;
  23	union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
  24	unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
  25	int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  26	int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
 
  27	bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
  28	u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  29};
  30
  31static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
  32{
  33	switch (type) {
  34	case CC_DMA_BUF_NULL:
  35		return "BUF_NULL";
  36	case CC_DMA_BUF_DLLI:
  37		return "BUF_DLLI";
  38	case CC_DMA_BUF_MLLI:
  39		return "BUF_MLLI";
  40	default:
  41		return "BUF_INVALID";
  42	}
  43}
  44
  45/**
  46 * cc_copy_mac() - Copy MAC to temporary location
  47 *
  48 * @dev: device object
  49 * @req: aead request object
  50 * @dir: [IN] copy from/to sgl
  51 */
  52static void cc_copy_mac(struct device *dev, struct aead_request *req,
  53			enum cc_sg_cpy_direct dir)
  54{
  55	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  56	u32 skip = req->assoclen + req->cryptlen;
 
 
 
 
  57
  58	cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
  59			   (skip - areq_ctx->req_authsize), skip, dir);
  60}
  61
  62/**
  63 * cc_get_sgl_nents() - Get scatterlist number of entries.
  64 *
  65 * @dev: Device object
  66 * @sg_list: SG list
  67 * @nbytes: [IN] Total SGL data bytes.
  68 * @lbytes: [OUT] Returns the amount of bytes at the last entry
  69 *
  70 * Return:
  71 * Number of entries in the scatterlist
  72 */
  73static unsigned int cc_get_sgl_nents(struct device *dev,
  74				     struct scatterlist *sg_list,
  75				     unsigned int nbytes, u32 *lbytes)
  76{
  77	unsigned int nents = 0;
  78
  79	*lbytes = 0;
  80
  81	while (nbytes && sg_list) {
  82		nents++;
  83		/* get the number of bytes in the last entry */
  84		*lbytes = nbytes;
  85		nbytes -= (sg_list->length > nbytes) ?
  86				nbytes : sg_list->length;
  87		sg_list = sg_next(sg_list);
  88	}
  89
  90	dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
  91	return nents;
  92}
  93
  94/**
  95 * cc_copy_sg_portion() - Copy scatter list data,
  96 * from to_skip to end, to dest and vice versa
  97 *
  98 * @dev: Device object
  99 * @dest: Buffer to copy to/from
 100 * @sg: SG list
 101 * @to_skip: Number of bytes to skip before copying
 102 * @end: Offset of last byte to copy
 103 * @direct: Transfer direction (true == from SG list to buffer, false == from
 104 *          buffer to SG list)
 105 */
 106void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
 107			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
 108{
 109	u32 nents;
 110
 111	nents = sg_nents_for_len(sg, end);
 112	sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip,
 113		       (direct == CC_SG_TO_BUF));
 114}
 115
 116static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
 117				  u32 buff_size, u32 *curr_nents,
 118				  u32 **mlli_entry_pp)
 119{
 120	u32 *mlli_entry_p = *mlli_entry_pp;
 121	u32 new_nents;
 122
 123	/* Verify there is no memory overflow*/
 124	new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
 125	if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
 126		dev_err(dev, "Too many mlli entries. current %d max %d\n",
 127			new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
 128		return -ENOMEM;
 129	}
 130
 131	/*handle buffer longer than 64 kbytes */
 132	while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
 133		cc_lli_set_addr(mlli_entry_p, buff_dma);
 134		cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
 135		dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 136			*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 137			mlli_entry_p[LLI_WORD1_OFFSET]);
 138		buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
 139		buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
 140		mlli_entry_p = mlli_entry_p + 2;
 141		(*curr_nents)++;
 142	}
 143	/*Last entry */
 144	cc_lli_set_addr(mlli_entry_p, buff_dma);
 145	cc_lli_set_size(mlli_entry_p, buff_size);
 146	dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 147		*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 148		mlli_entry_p[LLI_WORD1_OFFSET]);
 149	mlli_entry_p = mlli_entry_p + 2;
 150	*mlli_entry_pp = mlli_entry_p;
 151	(*curr_nents)++;
 152	return 0;
 153}
 154
 155static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
 156				u32 sgl_data_len, u32 sgl_offset,
 157				u32 *curr_nents, u32 **mlli_entry_pp)
 158{
 159	struct scatterlist *curr_sgl = sgl;
 160	u32 *mlli_entry_p = *mlli_entry_pp;
 161	s32 rc = 0;
 162
 163	for ( ; (curr_sgl && sgl_data_len);
 164	      curr_sgl = sg_next(curr_sgl)) {
 165		u32 entry_data_len =
 166			(sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
 167				sg_dma_len(curr_sgl) - sgl_offset :
 168				sgl_data_len;
 169		sgl_data_len -= entry_data_len;
 170		rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
 171					    sgl_offset, entry_data_len,
 172					    curr_nents, &mlli_entry_p);
 173		if (rc)
 174			return rc;
 175
 176		sgl_offset = 0;
 177	}
 178	*mlli_entry_pp = mlli_entry_p;
 179	return 0;
 180}
 181
 182static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
 183			    struct mlli_params *mlli_params, gfp_t flags)
 184{
 185	u32 *mlli_p;
 186	u32 total_nents = 0, prev_total_nents = 0;
 187	int rc = 0, i;
 188
 189	dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
 190
 191	/* Allocate memory from the pointed pool */
 192	mlli_params->mlli_virt_addr =
 193		dma_pool_alloc(mlli_params->curr_pool, flags,
 194			       &mlli_params->mlli_dma_addr);
 195	if (!mlli_params->mlli_virt_addr) {
 196		dev_err(dev, "dma_pool_alloc() failed\n");
 197		rc = -ENOMEM;
 198		goto build_mlli_exit;
 199	}
 200	/* Point to start of MLLI */
 201	mlli_p = mlli_params->mlli_virt_addr;
 202	/* go over all SG's and link it to one MLLI table */
 203	for (i = 0; i < sg_data->num_of_buffers; i++) {
 204		union buffer_array_entry *entry = &sg_data->entry[i];
 205		u32 tot_len = sg_data->total_data_len[i];
 206		u32 offset = sg_data->offset[i];
 207
 208		rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset,
 209					  &total_nents, &mlli_p);
 
 
 
 
 
 
 210		if (rc)
 211			return rc;
 212
 213		/* set last bit in the current table */
 214		if (sg_data->mlli_nents[i]) {
 215			/*Calculate the current MLLI table length for the
 216			 *length field in the descriptor
 217			 */
 218			*sg_data->mlli_nents[i] +=
 219				(total_nents - prev_total_nents);
 220			prev_total_nents = total_nents;
 221		}
 222	}
 223
 224	/* Set MLLI size for the bypass operation */
 225	mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
 226
 227	dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
 228		mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
 229		mlli_params->mlli_len);
 230
 231build_mlli_exit:
 232	return rc;
 233}
 234
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 235static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
 236			    unsigned int nents, struct scatterlist *sgl,
 237			    unsigned int data_len, unsigned int data_offset,
 238			    bool is_last_table, u32 *mlli_nents)
 239{
 240	unsigned int index = sgl_data->num_of_buffers;
 241
 242	dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
 243		index, nents, sgl, data_len, is_last_table);
 244	sgl_data->nents[index] = nents;
 245	sgl_data->entry[index].sgl = sgl;
 246	sgl_data->offset[index] = data_offset;
 247	sgl_data->total_data_len[index] = data_len;
 
 248	sgl_data->is_last[index] = is_last_table;
 249	sgl_data->mlli_nents[index] = mlli_nents;
 250	if (sgl_data->mlli_nents[index])
 251		*sgl_data->mlli_nents[index] = 0;
 252	sgl_data->num_of_buffers++;
 253}
 254
 255static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 256		     unsigned int nbytes, int direction, u32 *nents,
 257		     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
 258{
 259	int ret = 0;
 260
 261	*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
 262	if (*nents > max_sg_nents) {
 263		*nents = 0;
 264		dev_err(dev, "Too many fragments. current %d max %d\n",
 265			*nents, max_sg_nents);
 266		return -ENOMEM;
 267	}
 268
 269	ret = dma_map_sg(dev, sg, *nents, direction);
 270	if (dma_mapping_error(dev, ret)) {
 271		*nents = 0;
 272		dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
 273		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 274	}
 275
 276	*mapped_nents = ret;
 277
 278	return 0;
 279}
 280
 281static int
 282cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
 283		     u8 *config_data, struct buffer_array *sg_data,
 284		     unsigned int assoclen)
 285{
 286	dev_dbg(dev, " handle additional data config set to DLLI\n");
 287	/* create sg for the current buffer */
 288	sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
 289		    AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
 290	if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
 291		dev_err(dev, "dma_map_sg() config buffer failed\n");
 292		return -ENOMEM;
 293	}
 294	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 295		&sg_dma_address(&areq_ctx->ccm_adata_sg),
 296		sg_page(&areq_ctx->ccm_adata_sg),
 297		sg_virt(&areq_ctx->ccm_adata_sg),
 298		areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
 299	/* prepare for case of MLLI */
 300	if (assoclen > 0) {
 301		cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
 302				(AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
 303				0, false, NULL);
 304	}
 305	return 0;
 306}
 307
 308static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
 309			   u8 *curr_buff, u32 curr_buff_cnt,
 310			   struct buffer_array *sg_data)
 311{
 312	dev_dbg(dev, " handle curr buff %x set to   DLLI\n", curr_buff_cnt);
 313	/* create sg for the current buffer */
 314	sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
 315	if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
 316		dev_err(dev, "dma_map_sg() src buffer failed\n");
 317		return -ENOMEM;
 318	}
 319	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 320		&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
 321		sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
 322		areq_ctx->buff_sg->length);
 323	areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
 324	areq_ctx->curr_sg = areq_ctx->buff_sg;
 325	areq_ctx->in_nents = 0;
 326	/* prepare for case of MLLI */
 327	cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
 328			false, NULL);
 329	return 0;
 330}
 331
 332void cc_unmap_cipher_request(struct device *dev, void *ctx,
 333				unsigned int ivsize, struct scatterlist *src,
 334				struct scatterlist *dst)
 335{
 336	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 337
 338	if (req_ctx->gen_ctx.iv_dma_addr) {
 339		dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
 340			&req_ctx->gen_ctx.iv_dma_addr, ivsize);
 341		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
 342				 ivsize, DMA_BIDIRECTIONAL);
 343	}
 344	/* Release pool */
 345	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
 346	    req_ctx->mlli_params.mlli_virt_addr) {
 347		dma_pool_free(req_ctx->mlli_params.curr_pool,
 348			      req_ctx->mlli_params.mlli_virt_addr,
 349			      req_ctx->mlli_params.mlli_dma_addr);
 350	}
 351
 352	dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
 353	dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
 354
 355	if (src != dst) {
 356		dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
 357		dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
 358	}
 359}
 360
 361int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
 362			  unsigned int ivsize, unsigned int nbytes,
 363			  void *info, struct scatterlist *src,
 364			  struct scatterlist *dst, gfp_t flags)
 365{
 366	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 367	struct mlli_params *mlli_params = &req_ctx->mlli_params;
 
 368	struct device *dev = drvdata_to_dev(drvdata);
 369	struct buffer_array sg_data;
 370	u32 dummy = 0;
 371	int rc = 0;
 372	u32 mapped_nents = 0;
 373
 374	req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
 375	mlli_params->curr_pool = NULL;
 376	sg_data.num_of_buffers = 0;
 377
 378	/* Map IV buffer */
 379	if (ivsize) {
 380		dump_byte_array("iv", info, ivsize);
 381		req_ctx->gen_ctx.iv_dma_addr =
 382			dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
 
 383		if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
 384			dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 385				ivsize, info);
 386			return -ENOMEM;
 387		}
 388		dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 389			ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
 390	} else {
 391		req_ctx->gen_ctx.iv_dma_addr = 0;
 392	}
 393
 394	/* Map the src SGL */
 395	rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
 396		       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
 397	if (rc)
 398		goto cipher_exit;
 399	if (mapped_nents > 1)
 400		req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 401
 402	if (src == dst) {
 403		/* Handle inplace operation */
 404		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 405			req_ctx->out_nents = 0;
 406			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 407					nbytes, 0, true,
 408					&req_ctx->in_mlli_nents);
 409		}
 410	} else {
 411		/* Map the dst sg */
 412		rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
 413			       &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
 414			       &dummy, &mapped_nents);
 415		if (rc)
 416			goto cipher_exit;
 417		if (mapped_nents > 1)
 418			req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 419
 420		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 421			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 422					nbytes, 0, true,
 423					&req_ctx->in_mlli_nents);
 424			cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
 425					nbytes, 0, true,
 426					&req_ctx->out_mlli_nents);
 427		}
 428	}
 429
 430	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 431		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
 432		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
 433		if (rc)
 434			goto cipher_exit;
 435	}
 436
 437	dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
 438		cc_dma_buf_type(req_ctx->dma_buf_type));
 439
 440	return 0;
 441
 442cipher_exit:
 443	cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
 444	return rc;
 445}
 446
 447void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 448{
 449	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 450	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 451	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
 452
 453	if (areq_ctx->mac_buf_dma_addr) {
 454		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
 455				 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
 456	}
 457
 458	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
 459		if (areq_ctx->hkey_dma_addr) {
 460			dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
 461					 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
 462		}
 463
 464		if (areq_ctx->gcm_block_len_dma_addr) {
 465			dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
 466					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 467		}
 468
 469		if (areq_ctx->gcm_iv_inc1_dma_addr) {
 470			dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
 471					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 472		}
 473
 474		if (areq_ctx->gcm_iv_inc2_dma_addr) {
 475			dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
 476					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 477		}
 478	}
 479
 480	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 481		if (areq_ctx->ccm_iv0_dma_addr) {
 482			dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
 483					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 484		}
 485
 486		dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
 487	}
 488	if (areq_ctx->gen_ctx.iv_dma_addr) {
 489		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
 490				 hw_iv_size, DMA_BIDIRECTIONAL);
 491		kfree_sensitive(areq_ctx->gen_ctx.iv);
 492	}
 493
 494	/* Release pool */
 495	if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
 496	     areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
 497	    (areq_ctx->mlli_params.mlli_virt_addr)) {
 498		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
 499			&areq_ctx->mlli_params.mlli_dma_addr,
 500			areq_ctx->mlli_params.mlli_virt_addr);
 501		dma_pool_free(areq_ctx->mlli_params.curr_pool,
 502			      areq_ctx->mlli_params.mlli_virt_addr,
 503			      areq_ctx->mlli_params.mlli_dma_addr);
 504	}
 505
 506	dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
 507		sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
 508		areq_ctx->assoclen, req->cryptlen);
 509
 510	dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
 511		     DMA_BIDIRECTIONAL);
 512	if (req->src != req->dst) {
 513		dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
 514			sg_virt(req->dst));
 515		dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
 516			     DMA_BIDIRECTIONAL);
 517	}
 518	if (drvdata->coherent &&
 519	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
 520	    req->src == req->dst) {
 521		/* copy back mac from temporary location to deal with possible
 522		 * data memory overriding that caused by cache coherence
 523		 * problem.
 524		 */
 525		cc_copy_mac(dev, req, CC_SG_FROM_BUF);
 526	}
 527}
 528
 529static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
 530			   u32 last_entry_data_size)
 531{
 532	return ((sgl_nents > 1) && (last_entry_data_size < authsize));
 533}
 534
 535static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
 536			    struct aead_request *req,
 537			    struct buffer_array *sg_data,
 538			    bool is_last, bool do_chain)
 539{
 540	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 541	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 542	struct device *dev = drvdata_to_dev(drvdata);
 543	gfp_t flags = cc_gfp_flags(&req->base);
 544	int rc = 0;
 545
 546	if (!req->iv) {
 547		areq_ctx->gen_ctx.iv_dma_addr = 0;
 548		areq_ctx->gen_ctx.iv = NULL;
 549		goto chain_iv_exit;
 550	}
 551
 552	areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
 553	if (!areq_ctx->gen_ctx.iv)
 554		return -ENOMEM;
 555
 556	areq_ctx->gen_ctx.iv_dma_addr =
 557		dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
 558			       DMA_BIDIRECTIONAL);
 559	if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
 560		dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 561			hw_iv_size, req->iv);
 562		kfree_sensitive(areq_ctx->gen_ctx.iv);
 563		areq_ctx->gen_ctx.iv = NULL;
 564		rc = -ENOMEM;
 565		goto chain_iv_exit;
 566	}
 567
 568	dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 569		hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
 
 
 
 
 
 
 
 
 
 
 
 
 570
 571chain_iv_exit:
 572	return rc;
 573}
 574
 575static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
 576			       struct aead_request *req,
 577			       struct buffer_array *sg_data,
 578			       bool is_last, bool do_chain)
 579{
 580	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 581	int rc = 0;
 582	int mapped_nents = 0;
 
 
 583	struct device *dev = drvdata_to_dev(drvdata);
 584
 
 
 
 585	if (!sg_data) {
 586		rc = -EINVAL;
 587		goto chain_assoc_exit;
 588	}
 589
 590	if (areq_ctx->assoclen == 0) {
 591		areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
 592		areq_ctx->assoc.nents = 0;
 593		areq_ctx->assoc.mlli_nents = 0;
 594		dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
 595			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 596			areq_ctx->assoc.nents);
 597		goto chain_assoc_exit;
 598	}
 599
 600	mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen);
 601	if (mapped_nents < 0)
 602		return mapped_nents;
 603
 604	if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 605		dev_err(dev, "Too many fragments. current %d max %d\n",
 606			mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 607		return -ENOMEM;
 608	}
 609	areq_ctx->assoc.nents = mapped_nents;
 610
 611	/* in CCM case we have additional entry for
 612	 * ccm header configurations
 613	 */
 614	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 615		if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 616			dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
 617				(areq_ctx->assoc.nents + 1),
 618				LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 619			rc = -ENOMEM;
 620			goto chain_assoc_exit;
 621		}
 622	}
 623
 624	if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
 625		areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
 626	else
 627		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 628
 629	if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 630		dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
 631			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 632			areq_ctx->assoc.nents);
 633		cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
 634				areq_ctx->assoclen, 0, is_last,
 635				&areq_ctx->assoc.mlli_nents);
 636		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 637	}
 638
 639chain_assoc_exit:
 640	return rc;
 641}
 642
 643static void cc_prepare_aead_data_dlli(struct aead_request *req,
 644				      u32 *src_last_bytes, u32 *dst_last_bytes)
 645{
 646	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 647	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 648	unsigned int authsize = areq_ctx->req_authsize;
 649	struct scatterlist *sg;
 650	ssize_t offset;
 651
 652	areq_ctx->is_icv_fragmented = false;
 653
 654	if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 655		sg = areq_ctx->src_sgl;
 656		offset = *src_last_bytes - authsize;
 657	} else {
 658		sg = areq_ctx->dst_sgl;
 659		offset = *dst_last_bytes - authsize;
 660	}
 661
 662	areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
 663	areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
 664}
 665
 666static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
 667				      struct aead_request *req,
 668				      struct buffer_array *sg_data,
 669				      u32 *src_last_bytes, u32 *dst_last_bytes,
 670				      bool is_last_table)
 671{
 672	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 673	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 674	unsigned int authsize = areq_ctx->req_authsize;
 675	struct device *dev = drvdata_to_dev(drvdata);
 676	struct scatterlist *sg;
 677
 678	if (req->src == req->dst) {
 679		/*INPLACE*/
 680		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 681				areq_ctx->src_sgl, areq_ctx->cryptlen,
 682				areq_ctx->src_offset, is_last_table,
 683				&areq_ctx->src.mlli_nents);
 684
 685		areq_ctx->is_icv_fragmented =
 686			cc_is_icv_frag(areq_ctx->src.nents, authsize,
 687				       *src_last_bytes);
 688
 689		if (areq_ctx->is_icv_fragmented) {
 690			/* Backup happens only when ICV is fragmented, ICV
 691			 * verification is made by CPU compare in order to
 692			 * simplify MAC verification upon request completion
 693			 */
 694			if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 695				/* In coherent platforms (e.g. ACP)
 696				 * already copying ICV for any
 697				 * INPLACE-DECRYPT operation, hence
 698				 * we must neglect this code.
 699				 */
 700				if (!drvdata->coherent)
 701					cc_copy_mac(dev, req, CC_SG_TO_BUF);
 702
 703				areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 704			} else {
 705				areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 706				areq_ctx->icv_dma_addr =
 707					areq_ctx->mac_buf_dma_addr;
 708			}
 709		} else { /* Contig. ICV */
 710			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 711			/*Should hanlde if the sg is not contig.*/
 712			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 713				(*src_last_bytes - authsize);
 714			areq_ctx->icv_virt_addr = sg_virt(sg) +
 715				(*src_last_bytes - authsize);
 716		}
 717
 718	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 719		/*NON-INPLACE and DECRYPT*/
 720		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 721				areq_ctx->src_sgl, areq_ctx->cryptlen,
 722				areq_ctx->src_offset, is_last_table,
 723				&areq_ctx->src.mlli_nents);
 724		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 725				areq_ctx->dst_sgl, areq_ctx->cryptlen,
 726				areq_ctx->dst_offset, is_last_table,
 727				&areq_ctx->dst.mlli_nents);
 728
 729		areq_ctx->is_icv_fragmented =
 730			cc_is_icv_frag(areq_ctx->src.nents, authsize,
 731				       *src_last_bytes);
 732		/* Backup happens only when ICV is fragmented, ICV
 733
 734		 * verification is made by CPU compare in order to simplify
 735		 * MAC verification upon request completion
 736		 */
 737		if (areq_ctx->is_icv_fragmented) {
 738			cc_copy_mac(dev, req, CC_SG_TO_BUF);
 739			areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 740
 741		} else { /* Contig. ICV */
 742			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 743			/*Should hanlde if the sg is not contig.*/
 744			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 745				(*src_last_bytes - authsize);
 746			areq_ctx->icv_virt_addr = sg_virt(sg) +
 747				(*src_last_bytes - authsize);
 748		}
 749
 750	} else {
 751		/*NON-INPLACE and ENCRYPT*/
 752		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 753				areq_ctx->dst_sgl, areq_ctx->cryptlen,
 754				areq_ctx->dst_offset, is_last_table,
 755				&areq_ctx->dst.mlli_nents);
 756		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 757				areq_ctx->src_sgl, areq_ctx->cryptlen,
 758				areq_ctx->src_offset, is_last_table,
 759				&areq_ctx->src.mlli_nents);
 760
 761		areq_ctx->is_icv_fragmented =
 762			cc_is_icv_frag(areq_ctx->dst.nents, authsize,
 763				       *dst_last_bytes);
 764
 765		if (!areq_ctx->is_icv_fragmented) {
 766			sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
 767			/* Contig. ICV */
 768			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 769				(*dst_last_bytes - authsize);
 770			areq_ctx->icv_virt_addr = sg_virt(sg) +
 771				(*dst_last_bytes - authsize);
 772		} else {
 773			areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
 774			areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 775		}
 776	}
 777}
 778
 779static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 780			      struct aead_request *req,
 781			      struct buffer_array *sg_data,
 782			      bool is_last_table, bool do_chain)
 783{
 784	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 785	struct device *dev = drvdata_to_dev(drvdata);
 786	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 787	unsigned int authsize = areq_ctx->req_authsize;
 788	unsigned int src_last_bytes = 0, dst_last_bytes = 0;
 789	int rc = 0;
 790	u32 src_mapped_nents = 0, dst_mapped_nents = 0;
 791	u32 offset = 0;
 792	/* non-inplace mode */
 793	unsigned int size_for_map = req->assoclen + req->cryptlen;
 
 794	u32 sg_index = 0;
 795	u32 size_to_skip = req->assoclen;
 
 796	struct scatterlist *sgl;
 797
 
 
 
 798	offset = size_to_skip;
 799
 800	if (!sg_data)
 801		return -EINVAL;
 802
 803	areq_ctx->src_sgl = req->src;
 804	areq_ctx->dst_sgl = req->dst;
 805
 
 
 
 806	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 807			authsize : 0;
 808	src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
 809					    &src_last_bytes);
 810	sg_index = areq_ctx->src_sgl->length;
 811	//check where the data starts
 812	while (src_mapped_nents && (sg_index <= size_to_skip)) {
 813		src_mapped_nents--;
 814		offset -= areq_ctx->src_sgl->length;
 815		sgl = sg_next(areq_ctx->src_sgl);
 816		if (!sgl)
 817			break;
 818		areq_ctx->src_sgl = sgl;
 819		sg_index += areq_ctx->src_sgl->length;
 820	}
 821	if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 822		dev_err(dev, "Too many fragments. current %d max %d\n",
 823			src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 824		return -ENOMEM;
 825	}
 826
 827	areq_ctx->src.nents = src_mapped_nents;
 828
 829	areq_ctx->src_offset = offset;
 830
 831	if (req->src != req->dst) {
 832		size_for_map = req->assoclen + req->cryptlen;
 833
 834		if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
 835			size_for_map += authsize;
 836		else
 837			size_for_map -= authsize;
 838
 839		rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
 840			       &areq_ctx->dst.mapped_nents,
 841			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
 842			       &dst_mapped_nents);
 843		if (rc)
 844			goto chain_data_exit;
 845	}
 846
 847	dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
 848					    &dst_last_bytes);
 849	sg_index = areq_ctx->dst_sgl->length;
 850	offset = size_to_skip;
 851
 852	//check where the data starts
 853	while (dst_mapped_nents && sg_index <= size_to_skip) {
 854		dst_mapped_nents--;
 855		offset -= areq_ctx->dst_sgl->length;
 856		sgl = sg_next(areq_ctx->dst_sgl);
 857		if (!sgl)
 858			break;
 859		areq_ctx->dst_sgl = sgl;
 860		sg_index += areq_ctx->dst_sgl->length;
 861	}
 862	if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 863		dev_err(dev, "Too many fragments. current %d max %d\n",
 864			dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 865		return -ENOMEM;
 866	}
 867	areq_ctx->dst.nents = dst_mapped_nents;
 868	areq_ctx->dst_offset = offset;
 869	if (src_mapped_nents > 1 ||
 870	    dst_mapped_nents  > 1 ||
 871	    do_chain) {
 872		areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
 873		cc_prepare_aead_data_mlli(drvdata, req, sg_data,
 874					  &src_last_bytes, &dst_last_bytes,
 875					  is_last_table);
 876	} else {
 877		areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
 878		cc_prepare_aead_data_dlli(req, &src_last_bytes,
 879					  &dst_last_bytes);
 880	}
 881
 882chain_data_exit:
 883	return rc;
 884}
 885
 886static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
 887				      struct aead_request *req)
 888{
 889	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 890	u32 curr_mlli_size = 0;
 891
 892	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 893		areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
 894		curr_mlli_size = areq_ctx->assoc.mlli_nents *
 895						LLI_ENTRY_BYTE_SIZE;
 896	}
 897
 898	if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
 899		/*Inplace case dst nents equal to src nents*/
 900		if (req->src == req->dst) {
 901			areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
 902			areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
 903								curr_mlli_size;
 904			areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
 905			if (!areq_ctx->is_single_pass)
 906				areq_ctx->assoc.mlli_nents +=
 907					areq_ctx->src.mlli_nents;
 908		} else {
 909			if (areq_ctx->gen_ctx.op_type ==
 910					DRV_CRYPTO_DIRECTION_DECRYPT) {
 911				areq_ctx->src.sram_addr =
 912						drvdata->mlli_sram_addr +
 913								curr_mlli_size;
 914				areq_ctx->dst.sram_addr =
 915						areq_ctx->src.sram_addr +
 916						areq_ctx->src.mlli_nents *
 917						LLI_ENTRY_BYTE_SIZE;
 918				if (!areq_ctx->is_single_pass)
 919					areq_ctx->assoc.mlli_nents +=
 920						areq_ctx->src.mlli_nents;
 921			} else {
 922				areq_ctx->dst.sram_addr =
 923						drvdata->mlli_sram_addr +
 924								curr_mlli_size;
 925				areq_ctx->src.sram_addr =
 926						areq_ctx->dst.sram_addr +
 927						areq_ctx->dst.mlli_nents *
 928						LLI_ENTRY_BYTE_SIZE;
 929				if (!areq_ctx->is_single_pass)
 930					areq_ctx->assoc.mlli_nents +=
 931						areq_ctx->dst.mlli_nents;
 932			}
 933		}
 934	}
 935}
 936
 937int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
 938{
 939	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 940	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
 941	struct device *dev = drvdata_to_dev(drvdata);
 942	struct buffer_array sg_data;
 943	unsigned int authsize = areq_ctx->req_authsize;
 
 944	int rc = 0;
 
 
 945	dma_addr_t dma_addr;
 946	u32 mapped_nents = 0;
 947	u32 dummy = 0; /*used for the assoc data fragments */
 948	u32 size_to_map;
 949	gfp_t flags = cc_gfp_flags(&req->base);
 950
 951	mlli_params->curr_pool = NULL;
 952	sg_data.num_of_buffers = 0;
 953
 954	/* copy mac to a temporary location to deal with possible
 955	 * data memory overriding that caused by cache coherence problem.
 956	 */
 957	if (drvdata->coherent &&
 958	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
 959	    req->src == req->dst)
 960		cc_copy_mac(dev, req, CC_SG_TO_BUF);
 961
 962	/* cacluate the size for cipher remove ICV in decrypt*/
 963	areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
 964				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 965				req->cryptlen :
 966				(req->cryptlen - authsize);
 967
 968	dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
 969				  DMA_BIDIRECTIONAL);
 970	if (dma_mapping_error(dev, dma_addr)) {
 971		dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
 972			MAX_MAC_SIZE, areq_ctx->mac_buf);
 973		rc = -ENOMEM;
 974		goto aead_map_failure;
 975	}
 976	areq_ctx->mac_buf_dma_addr = dma_addr;
 977
 978	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 979		void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
 980
 981		dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
 982					  DMA_TO_DEVICE);
 983
 984		if (dma_mapping_error(dev, dma_addr)) {
 985			dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
 986				AES_BLOCK_SIZE, addr);
 987			areq_ctx->ccm_iv0_dma_addr = 0;
 988			rc = -ENOMEM;
 989			goto aead_map_failure;
 990		}
 991		areq_ctx->ccm_iv0_dma_addr = dma_addr;
 992
 993		rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
 994					  &sg_data, areq_ctx->assoclen);
 995		if (rc)
 996			goto aead_map_failure;
 997	}
 998
 999	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1000		dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1001					  DMA_BIDIRECTIONAL);
1002		if (dma_mapping_error(dev, dma_addr)) {
1003			dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1004				AES_BLOCK_SIZE, areq_ctx->hkey);
1005			rc = -ENOMEM;
1006			goto aead_map_failure;
1007		}
1008		areq_ctx->hkey_dma_addr = dma_addr;
1009
1010		dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1011					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1012		if (dma_mapping_error(dev, dma_addr)) {
1013			dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1014				AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1015			rc = -ENOMEM;
1016			goto aead_map_failure;
1017		}
1018		areq_ctx->gcm_block_len_dma_addr = dma_addr;
1019
1020		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1021					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1022
1023		if (dma_mapping_error(dev, dma_addr)) {
1024			dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1025				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1026			areq_ctx->gcm_iv_inc1_dma_addr = 0;
1027			rc = -ENOMEM;
1028			goto aead_map_failure;
1029		}
1030		areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1031
1032		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1033					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1034
1035		if (dma_mapping_error(dev, dma_addr)) {
1036			dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1037				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1038			areq_ctx->gcm_iv_inc2_dma_addr = 0;
1039			rc = -ENOMEM;
1040			goto aead_map_failure;
1041		}
1042		areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1043	}
1044
1045	size_to_map = req->cryptlen + req->assoclen;
1046	/* If we do in-place encryption, we also need the auth tag */
1047	if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
1048	   (req->src == req->dst)) {
1049		size_to_map += authsize;
1050	}
1051
 
 
1052	rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
1053		       &areq_ctx->src.mapped_nents,
1054		       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1055			LLI_MAX_NUM_OF_DATA_ENTRIES),
1056		       &dummy, &mapped_nents);
1057	if (rc)
1058		goto aead_map_failure;
1059
1060	if (areq_ctx->is_single_pass) {
1061		/*
1062		 * Create MLLI table for:
1063		 *   (1) Assoc. data
1064		 *   (2) Src/Dst SGLs
1065		 *   Note: IV is contg. buffer (not an SGL)
1066		 */
1067		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1068		if (rc)
1069			goto aead_map_failure;
1070		rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1071		if (rc)
1072			goto aead_map_failure;
1073		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1074		if (rc)
1075			goto aead_map_failure;
1076	} else { /* DOUBLE-PASS flow */
1077		/*
1078		 * Prepare MLLI table(s) in this order:
1079		 *
1080		 * If ENCRYPT/DECRYPT (inplace):
1081		 *   (1) MLLI table for assoc
1082		 *   (2) IV entry (chained right after end of assoc)
1083		 *   (3) MLLI for src/dst (inplace operation)
1084		 *
1085		 * If ENCRYPT (non-inplace)
1086		 *   (1) MLLI table for assoc
1087		 *   (2) IV entry (chained right after end of assoc)
1088		 *   (3) MLLI for dst
1089		 *   (4) MLLI for src
1090		 *
1091		 * If DECRYPT (non-inplace)
1092		 *   (1) MLLI table for assoc
1093		 *   (2) IV entry (chained right after end of assoc)
1094		 *   (3) MLLI for src
1095		 *   (4) MLLI for dst
1096		 */
1097		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1098		if (rc)
1099			goto aead_map_failure;
1100		rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1101		if (rc)
1102			goto aead_map_failure;
1103		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1104		if (rc)
1105			goto aead_map_failure;
1106	}
1107
1108	/* Mlli support -start building the MLLI according to the above
1109	 * results
1110	 */
1111	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1112	    areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1113		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1114		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1115		if (rc)
1116			goto aead_map_failure;
1117
1118		cc_update_aead_mlli_nents(drvdata, req);
1119		dev_dbg(dev, "assoc params mn %d\n",
1120			areq_ctx->assoc.mlli_nents);
1121		dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1122		dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1123	}
1124	return 0;
1125
1126aead_map_failure:
1127	cc_unmap_aead_request(dev, req);
1128	return rc;
1129}
1130
1131int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1132			      struct scatterlist *src, unsigned int nbytes,
1133			      bool do_update, gfp_t flags)
1134{
1135	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1136	struct device *dev = drvdata_to_dev(drvdata);
1137	u8 *curr_buff = cc_hash_buf(areq_ctx);
1138	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1139	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1140	struct buffer_array sg_data;
 
1141	int rc = 0;
1142	u32 dummy = 0;
1143	u32 mapped_nents = 0;
1144
1145	dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1146		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1147	/* Init the type of the dma buffer */
1148	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1149	mlli_params->curr_pool = NULL;
1150	sg_data.num_of_buffers = 0;
1151	areq_ctx->in_nents = 0;
1152
1153	if (nbytes == 0 && *curr_buff_cnt == 0) {
1154		/* nothing to do */
1155		return 0;
1156	}
1157
 
1158	/* map the previous buffer */
1159	if (*curr_buff_cnt) {
1160		rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1161				     &sg_data);
1162		if (rc)
1163			return rc;
1164	}
1165
1166	if (src && nbytes > 0 && do_update) {
1167		rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1168			       &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1169			       &dummy, &mapped_nents);
1170		if (rc)
1171			goto unmap_curr_buff;
1172		if (src && mapped_nents == 1 &&
1173		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1174			memcpy(areq_ctx->buff_sg, src,
1175			       sizeof(struct scatterlist));
1176			areq_ctx->buff_sg->length = nbytes;
1177			areq_ctx->curr_sg = areq_ctx->buff_sg;
1178			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1179		} else {
1180			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1181		}
1182	}
1183
1184	/*build mlli */
1185	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1186		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1187		/* add the src data to the sg_data */
1188		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1189				0, true, &areq_ctx->mlli_nents);
1190		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1191		if (rc)
1192			goto fail_unmap_din;
1193	}
1194	/* change the buffer index for the unmap function */
1195	areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1196	dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1197		cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1198	return 0;
1199
1200fail_unmap_din:
1201	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1202
1203unmap_curr_buff:
1204	if (*curr_buff_cnt)
1205		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1206
1207	return rc;
1208}
1209
1210int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1211			       struct scatterlist *src, unsigned int nbytes,
1212			       unsigned int block_size, gfp_t flags)
1213{
1214	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1215	struct device *dev = drvdata_to_dev(drvdata);
1216	u8 *curr_buff = cc_hash_buf(areq_ctx);
1217	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1218	u8 *next_buff = cc_next_buf(areq_ctx);
1219	u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1220	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1221	unsigned int update_data_len;
1222	u32 total_in_len = nbytes + *curr_buff_cnt;
1223	struct buffer_array sg_data;
 
1224	unsigned int swap_index = 0;
1225	int rc = 0;
1226	u32 dummy = 0;
1227	u32 mapped_nents = 0;
1228
1229	dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1230		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1231	/* Init the type of the dma buffer */
1232	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1233	mlli_params->curr_pool = NULL;
1234	areq_ctx->curr_sg = NULL;
1235	sg_data.num_of_buffers = 0;
1236	areq_ctx->in_nents = 0;
1237
1238	if (total_in_len < block_size) {
1239		dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1240			curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1241		areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
1242		sg_copy_to_buffer(src, areq_ctx->in_nents,
1243				  &curr_buff[*curr_buff_cnt], nbytes);
1244		*curr_buff_cnt += nbytes;
1245		return 1;
1246	}
1247
1248	/* Calculate the residue size*/
1249	*next_buff_cnt = total_in_len & (block_size - 1);
1250	/* update data len */
1251	update_data_len = total_in_len - *next_buff_cnt;
1252
1253	dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1254		*next_buff_cnt, update_data_len);
1255
1256	/* Copy the new residue to next buffer */
1257	if (*next_buff_cnt) {
1258		dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1259			next_buff, (update_data_len - *curr_buff_cnt),
1260			*next_buff_cnt);
1261		cc_copy_sg_portion(dev, next_buff, src,
1262				   (update_data_len - *curr_buff_cnt),
1263				   nbytes, CC_SG_TO_BUF);
1264		/* change the buffer index for next operation */
1265		swap_index = 1;
1266	}
1267
1268	if (*curr_buff_cnt) {
1269		rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1270				     &sg_data);
1271		if (rc)
1272			return rc;
1273		/* change the buffer index for next operation */
1274		swap_index = 1;
1275	}
1276
1277	if (update_data_len > *curr_buff_cnt) {
1278		rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1279			       DMA_TO_DEVICE, &areq_ctx->in_nents,
1280			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1281			       &mapped_nents);
1282		if (rc)
1283			goto unmap_curr_buff;
1284		if (mapped_nents == 1 &&
1285		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1286			/* only one entry in the SG and no previous data */
1287			memcpy(areq_ctx->buff_sg, src,
1288			       sizeof(struct scatterlist));
1289			areq_ctx->buff_sg->length = update_data_len;
1290			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1291			areq_ctx->curr_sg = areq_ctx->buff_sg;
1292		} else {
1293			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1294		}
1295	}
1296
1297	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1298		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1299		/* add the src data to the sg_data */
1300		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1301				(update_data_len - *curr_buff_cnt), 0, true,
1302				&areq_ctx->mlli_nents);
1303		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1304		if (rc)
1305			goto fail_unmap_din;
1306	}
1307	areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1308
1309	return 0;
1310
1311fail_unmap_din:
1312	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1313
1314unmap_curr_buff:
1315	if (*curr_buff_cnt)
1316		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1317
1318	return rc;
1319}
1320
1321void cc_unmap_hash_request(struct device *dev, void *ctx,
1322			   struct scatterlist *src, bool do_revert)
1323{
1324	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1325	u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1326
1327	/*In case a pool was set, a table was
1328	 *allocated and should be released
1329	 */
1330	if (areq_ctx->mlli_params.curr_pool) {
1331		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1332			&areq_ctx->mlli_params.mlli_dma_addr,
1333			areq_ctx->mlli_params.mlli_virt_addr);
1334		dma_pool_free(areq_ctx->mlli_params.curr_pool,
1335			      areq_ctx->mlli_params.mlli_virt_addr,
1336			      areq_ctx->mlli_params.mlli_dma_addr);
1337	}
1338
1339	if (src && areq_ctx->in_nents) {
1340		dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1341			sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1342		dma_unmap_sg(dev, src,
1343			     areq_ctx->in_nents, DMA_TO_DEVICE);
1344	}
1345
1346	if (*prev_len) {
1347		dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1348			sg_virt(areq_ctx->buff_sg),
1349			&sg_dma_address(areq_ctx->buff_sg),
1350			sg_dma_len(areq_ctx->buff_sg));
1351		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1352		if (!do_revert) {
1353			/* clean the previous data length for update
1354			 * operation
1355			 */
1356			*prev_len = 0;
1357		} else {
1358			areq_ctx->buff_index ^= 1;
1359		}
1360	}
1361}
1362
1363int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1364{
 
1365	struct device *dev = drvdata_to_dev(drvdata);
1366
1367	drvdata->mlli_buffs_pool =
 
 
 
 
 
 
1368		dma_pool_create("dx_single_mlli_tables", dev,
1369				MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1370				LLI_ENTRY_BYTE_SIZE,
1371				MLLI_TABLE_MIN_ALIGNMENT, 0);
1372
1373	if (!drvdata->mlli_buffs_pool)
1374		return -ENOMEM;
1375
1376	return 0;
 
 
 
 
1377}
1378
1379int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1380{
1381	dma_pool_destroy(drvdata->mlli_buffs_pool);
 
 
 
 
 
 
1382	return 0;
1383}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
   3
   4#include <crypto/internal/aead.h>
   5#include <crypto/authenc.h>
   6#include <crypto/scatterwalk.h>
   7#include <linux/dmapool.h>
   8#include <linux/dma-mapping.h>
   9
  10#include "cc_buffer_mgr.h"
  11#include "cc_lli_defs.h"
  12#include "cc_cipher.h"
  13#include "cc_hash.h"
  14#include "cc_aead.h"
  15
  16enum dma_buffer_type {
  17	DMA_NULL_TYPE = -1,
  18	DMA_SGL_TYPE = 1,
  19	DMA_BUFF_TYPE = 2,
  20};
  21
  22struct buff_mgr_handle {
  23	struct dma_pool *mlli_buffs_pool;
  24};
  25
  26union buffer_array_entry {
  27	struct scatterlist *sgl;
  28	dma_addr_t buffer_dma;
  29};
  30
  31struct buffer_array {
  32	unsigned int num_of_buffers;
  33	union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
  34	unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
  35	int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  36	int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
  37	enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
  38	bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
  39	u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  40};
  41
  42static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
  43{
  44	switch (type) {
  45	case CC_DMA_BUF_NULL:
  46		return "BUF_NULL";
  47	case CC_DMA_BUF_DLLI:
  48		return "BUF_DLLI";
  49	case CC_DMA_BUF_MLLI:
  50		return "BUF_MLLI";
  51	default:
  52		return "BUF_INVALID";
  53	}
  54}
  55
  56/**
  57 * cc_copy_mac() - Copy MAC to temporary location
  58 *
  59 * @dev: device object
  60 * @req: aead request object
  61 * @dir: [IN] copy from/to sgl
  62 */
  63static void cc_copy_mac(struct device *dev, struct aead_request *req,
  64			enum cc_sg_cpy_direct dir)
  65{
  66	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  67	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  68	u32 skip = areq_ctx->assoclen + req->cryptlen;
  69
  70	if (areq_ctx->is_gcm4543)
  71		skip += crypto_aead_ivsize(tfm);
  72
  73	cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
  74			   (skip - areq_ctx->req_authsize), skip, dir);
  75}
  76
  77/**
  78 * cc_get_sgl_nents() - Get scatterlist number of entries.
  79 *
 
  80 * @sg_list: SG list
  81 * @nbytes: [IN] Total SGL data bytes.
  82 * @lbytes: [OUT] Returns the amount of bytes at the last entry
 
 
 
  83 */
  84static unsigned int cc_get_sgl_nents(struct device *dev,
  85				     struct scatterlist *sg_list,
  86				     unsigned int nbytes, u32 *lbytes)
  87{
  88	unsigned int nents = 0;
  89
 
 
  90	while (nbytes && sg_list) {
  91		nents++;
  92		/* get the number of bytes in the last entry */
  93		*lbytes = nbytes;
  94		nbytes -= (sg_list->length > nbytes) ?
  95				nbytes : sg_list->length;
  96		sg_list = sg_next(sg_list);
  97	}
 
  98	dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
  99	return nents;
 100}
 101
 102/**
 103 * cc_copy_sg_portion() - Copy scatter list data,
 104 * from to_skip to end, to dest and vice versa
 105 *
 106 * @dest:
 107 * @sg:
 108 * @to_skip:
 109 * @end:
 110 * @direct:
 
 
 111 */
 112void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
 113			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
 114{
 115	u32 nents;
 116
 117	nents = sg_nents_for_len(sg, end);
 118	sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
 119		       (direct == CC_SG_TO_BUF));
 120}
 121
 122static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
 123				  u32 buff_size, u32 *curr_nents,
 124				  u32 **mlli_entry_pp)
 125{
 126	u32 *mlli_entry_p = *mlli_entry_pp;
 127	u32 new_nents;
 128
 129	/* Verify there is no memory overflow*/
 130	new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
 131	if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
 132		dev_err(dev, "Too many mlli entries. current %d max %d\n",
 133			new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
 134		return -ENOMEM;
 135	}
 136
 137	/*handle buffer longer than 64 kbytes */
 138	while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
 139		cc_lli_set_addr(mlli_entry_p, buff_dma);
 140		cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
 141		dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 142			*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 143			mlli_entry_p[LLI_WORD1_OFFSET]);
 144		buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
 145		buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
 146		mlli_entry_p = mlli_entry_p + 2;
 147		(*curr_nents)++;
 148	}
 149	/*Last entry */
 150	cc_lli_set_addr(mlli_entry_p, buff_dma);
 151	cc_lli_set_size(mlli_entry_p, buff_size);
 152	dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 153		*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 154		mlli_entry_p[LLI_WORD1_OFFSET]);
 155	mlli_entry_p = mlli_entry_p + 2;
 156	*mlli_entry_pp = mlli_entry_p;
 157	(*curr_nents)++;
 158	return 0;
 159}
 160
 161static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
 162				u32 sgl_data_len, u32 sgl_offset,
 163				u32 *curr_nents, u32 **mlli_entry_pp)
 164{
 165	struct scatterlist *curr_sgl = sgl;
 166	u32 *mlli_entry_p = *mlli_entry_pp;
 167	s32 rc = 0;
 168
 169	for ( ; (curr_sgl && sgl_data_len);
 170	      curr_sgl = sg_next(curr_sgl)) {
 171		u32 entry_data_len =
 172			(sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
 173				sg_dma_len(curr_sgl) - sgl_offset :
 174				sgl_data_len;
 175		sgl_data_len -= entry_data_len;
 176		rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
 177					    sgl_offset, entry_data_len,
 178					    curr_nents, &mlli_entry_p);
 179		if (rc)
 180			return rc;
 181
 182		sgl_offset = 0;
 183	}
 184	*mlli_entry_pp = mlli_entry_p;
 185	return 0;
 186}
 187
 188static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
 189			    struct mlli_params *mlli_params, gfp_t flags)
 190{
 191	u32 *mlli_p;
 192	u32 total_nents = 0, prev_total_nents = 0;
 193	int rc = 0, i;
 194
 195	dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
 196
 197	/* Allocate memory from the pointed pool */
 198	mlli_params->mlli_virt_addr =
 199		dma_pool_alloc(mlli_params->curr_pool, flags,
 200			       &mlli_params->mlli_dma_addr);
 201	if (!mlli_params->mlli_virt_addr) {
 202		dev_err(dev, "dma_pool_alloc() failed\n");
 203		rc = -ENOMEM;
 204		goto build_mlli_exit;
 205	}
 206	/* Point to start of MLLI */
 207	mlli_p = (u32 *)mlli_params->mlli_virt_addr;
 208	/* go over all SG's and link it to one MLLI table */
 209	for (i = 0; i < sg_data->num_of_buffers; i++) {
 210		union buffer_array_entry *entry = &sg_data->entry[i];
 211		u32 tot_len = sg_data->total_data_len[i];
 212		u32 offset = sg_data->offset[i];
 213
 214		if (sg_data->type[i] == DMA_SGL_TYPE)
 215			rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
 216						  offset, &total_nents,
 217						  &mlli_p);
 218		else /*DMA_BUFF_TYPE*/
 219			rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
 220						    tot_len, &total_nents,
 221						    &mlli_p);
 222		if (rc)
 223			return rc;
 224
 225		/* set last bit in the current table */
 226		if (sg_data->mlli_nents[i]) {
 227			/*Calculate the current MLLI table length for the
 228			 *length field in the descriptor
 229			 */
 230			*sg_data->mlli_nents[i] +=
 231				(total_nents - prev_total_nents);
 232			prev_total_nents = total_nents;
 233		}
 234	}
 235
 236	/* Set MLLI size for the bypass operation */
 237	mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
 238
 239	dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
 240		mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
 241		mlli_params->mlli_len);
 242
 243build_mlli_exit:
 244	return rc;
 245}
 246
 247static void cc_add_buffer_entry(struct device *dev,
 248				struct buffer_array *sgl_data,
 249				dma_addr_t buffer_dma, unsigned int buffer_len,
 250				bool is_last_entry, u32 *mlli_nents)
 251{
 252	unsigned int index = sgl_data->num_of_buffers;
 253
 254	dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
 255		index, &buffer_dma, buffer_len, is_last_entry);
 256	sgl_data->nents[index] = 1;
 257	sgl_data->entry[index].buffer_dma = buffer_dma;
 258	sgl_data->offset[index] = 0;
 259	sgl_data->total_data_len[index] = buffer_len;
 260	sgl_data->type[index] = DMA_BUFF_TYPE;
 261	sgl_data->is_last[index] = is_last_entry;
 262	sgl_data->mlli_nents[index] = mlli_nents;
 263	if (sgl_data->mlli_nents[index])
 264		*sgl_data->mlli_nents[index] = 0;
 265	sgl_data->num_of_buffers++;
 266}
 267
 268static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
 269			    unsigned int nents, struct scatterlist *sgl,
 270			    unsigned int data_len, unsigned int data_offset,
 271			    bool is_last_table, u32 *mlli_nents)
 272{
 273	unsigned int index = sgl_data->num_of_buffers;
 274
 275	dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
 276		index, nents, sgl, data_len, is_last_table);
 277	sgl_data->nents[index] = nents;
 278	sgl_data->entry[index].sgl = sgl;
 279	sgl_data->offset[index] = data_offset;
 280	sgl_data->total_data_len[index] = data_len;
 281	sgl_data->type[index] = DMA_SGL_TYPE;
 282	sgl_data->is_last[index] = is_last_table;
 283	sgl_data->mlli_nents[index] = mlli_nents;
 284	if (sgl_data->mlli_nents[index])
 285		*sgl_data->mlli_nents[index] = 0;
 286	sgl_data->num_of_buffers++;
 287}
 288
 289static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 290		     unsigned int nbytes, int direction, u32 *nents,
 291		     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
 292{
 293	if (sg_is_last(sg)) {
 294		/* One entry only case -set to DLLI */
 295		if (dma_map_sg(dev, sg, 1, direction) != 1) {
 296			dev_err(dev, "dma_map_sg() single buffer failed\n");
 297			return -ENOMEM;
 298		}
 299		dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 300			&sg_dma_address(sg), sg_page(sg), sg_virt(sg),
 301			sg->offset, sg->length);
 302		*lbytes = nbytes;
 303		*nents = 1;
 304		*mapped_nents = 1;
 305	} else {  /*sg_is_last*/
 306		*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
 307		if (*nents > max_sg_nents) {
 308			*nents = 0;
 309			dev_err(dev, "Too many fragments. current %d max %d\n",
 310				*nents, max_sg_nents);
 311			return -ENOMEM;
 312		}
 313		/* In case of mmu the number of mapped nents might
 314		 * be changed from the original sgl nents
 315		 */
 316		*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
 317		if (*mapped_nents == 0) {
 318			*nents = 0;
 319			dev_err(dev, "dma_map_sg() sg buffer failed\n");
 320			return -ENOMEM;
 321		}
 322	}
 323
 
 
 324	return 0;
 325}
 326
 327static int
 328cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
 329		     u8 *config_data, struct buffer_array *sg_data,
 330		     unsigned int assoclen)
 331{
 332	dev_dbg(dev, " handle additional data config set to DLLI\n");
 333	/* create sg for the current buffer */
 334	sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
 335		    AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
 336	if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
 337		dev_err(dev, "dma_map_sg() config buffer failed\n");
 338		return -ENOMEM;
 339	}
 340	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 341		&sg_dma_address(&areq_ctx->ccm_adata_sg),
 342		sg_page(&areq_ctx->ccm_adata_sg),
 343		sg_virt(&areq_ctx->ccm_adata_sg),
 344		areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
 345	/* prepare for case of MLLI */
 346	if (assoclen > 0) {
 347		cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
 348				(AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
 349				0, false, NULL);
 350	}
 351	return 0;
 352}
 353
 354static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
 355			   u8 *curr_buff, u32 curr_buff_cnt,
 356			   struct buffer_array *sg_data)
 357{
 358	dev_dbg(dev, " handle curr buff %x set to   DLLI\n", curr_buff_cnt);
 359	/* create sg for the current buffer */
 360	sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
 361	if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
 362		dev_err(dev, "dma_map_sg() src buffer failed\n");
 363		return -ENOMEM;
 364	}
 365	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 366		&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
 367		sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
 368		areq_ctx->buff_sg->length);
 369	areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
 370	areq_ctx->curr_sg = areq_ctx->buff_sg;
 371	areq_ctx->in_nents = 0;
 372	/* prepare for case of MLLI */
 373	cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
 374			false, NULL);
 375	return 0;
 376}
 377
 378void cc_unmap_cipher_request(struct device *dev, void *ctx,
 379				unsigned int ivsize, struct scatterlist *src,
 380				struct scatterlist *dst)
 381{
 382	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 383
 384	if (req_ctx->gen_ctx.iv_dma_addr) {
 385		dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
 386			&req_ctx->gen_ctx.iv_dma_addr, ivsize);
 387		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
 388				 ivsize, DMA_BIDIRECTIONAL);
 389	}
 390	/* Release pool */
 391	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
 392	    req_ctx->mlli_params.mlli_virt_addr) {
 393		dma_pool_free(req_ctx->mlli_params.curr_pool,
 394			      req_ctx->mlli_params.mlli_virt_addr,
 395			      req_ctx->mlli_params.mlli_dma_addr);
 396	}
 397
 398	dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
 399	dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
 400
 401	if (src != dst) {
 402		dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
 403		dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
 404	}
 405}
 406
 407int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
 408			  unsigned int ivsize, unsigned int nbytes,
 409			  void *info, struct scatterlist *src,
 410			  struct scatterlist *dst, gfp_t flags)
 411{
 412	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 413	struct mlli_params *mlli_params = &req_ctx->mlli_params;
 414	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
 415	struct device *dev = drvdata_to_dev(drvdata);
 416	struct buffer_array sg_data;
 417	u32 dummy = 0;
 418	int rc = 0;
 419	u32 mapped_nents = 0;
 420
 421	req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
 422	mlli_params->curr_pool = NULL;
 423	sg_data.num_of_buffers = 0;
 424
 425	/* Map IV buffer */
 426	if (ivsize) {
 427		dump_byte_array("iv", (u8 *)info, ivsize);
 428		req_ctx->gen_ctx.iv_dma_addr =
 429			dma_map_single(dev, (void *)info,
 430				       ivsize, DMA_BIDIRECTIONAL);
 431		if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
 432			dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 433				ivsize, info);
 434			return -ENOMEM;
 435		}
 436		dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 437			ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
 438	} else {
 439		req_ctx->gen_ctx.iv_dma_addr = 0;
 440	}
 441
 442	/* Map the src SGL */
 443	rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
 444		       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
 445	if (rc)
 446		goto cipher_exit;
 447	if (mapped_nents > 1)
 448		req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 449
 450	if (src == dst) {
 451		/* Handle inplace operation */
 452		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 453			req_ctx->out_nents = 0;
 454			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 455					nbytes, 0, true,
 456					&req_ctx->in_mlli_nents);
 457		}
 458	} else {
 459		/* Map the dst sg */
 460		rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
 461			       &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
 462			       &dummy, &mapped_nents);
 463		if (rc)
 464			goto cipher_exit;
 465		if (mapped_nents > 1)
 466			req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 467
 468		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 469			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 470					nbytes, 0, true,
 471					&req_ctx->in_mlli_nents);
 472			cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
 473					nbytes, 0, true,
 474					&req_ctx->out_mlli_nents);
 475		}
 476	}
 477
 478	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 479		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
 480		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
 481		if (rc)
 482			goto cipher_exit;
 483	}
 484
 485	dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
 486		cc_dma_buf_type(req_ctx->dma_buf_type));
 487
 488	return 0;
 489
 490cipher_exit:
 491	cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
 492	return rc;
 493}
 494
 495void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 496{
 497	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 498	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 499	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
 500
 501	if (areq_ctx->mac_buf_dma_addr) {
 502		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
 503				 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
 504	}
 505
 506	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
 507		if (areq_ctx->hkey_dma_addr) {
 508			dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
 509					 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
 510		}
 511
 512		if (areq_ctx->gcm_block_len_dma_addr) {
 513			dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
 514					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 515		}
 516
 517		if (areq_ctx->gcm_iv_inc1_dma_addr) {
 518			dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
 519					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 520		}
 521
 522		if (areq_ctx->gcm_iv_inc2_dma_addr) {
 523			dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
 524					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 525		}
 526	}
 527
 528	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 529		if (areq_ctx->ccm_iv0_dma_addr) {
 530			dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
 531					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 532		}
 533
 534		dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
 535	}
 536	if (areq_ctx->gen_ctx.iv_dma_addr) {
 537		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
 538				 hw_iv_size, DMA_BIDIRECTIONAL);
 539		kzfree(areq_ctx->gen_ctx.iv);
 540	}
 541
 542	/* Release pool */
 543	if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
 544	     areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
 545	    (areq_ctx->mlli_params.mlli_virt_addr)) {
 546		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
 547			&areq_ctx->mlli_params.mlli_dma_addr,
 548			areq_ctx->mlli_params.mlli_virt_addr);
 549		dma_pool_free(areq_ctx->mlli_params.curr_pool,
 550			      areq_ctx->mlli_params.mlli_virt_addr,
 551			      areq_ctx->mlli_params.mlli_dma_addr);
 552	}
 553
 554	dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
 555		sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
 556		areq_ctx->assoclen, req->cryptlen);
 557
 558	dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
 
 559	if (req->src != req->dst) {
 560		dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
 561			sg_virt(req->dst));
 562		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
 563			     DMA_BIDIRECTIONAL);
 564	}
 565	if (drvdata->coherent &&
 566	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
 567	    req->src == req->dst) {
 568		/* copy back mac from temporary location to deal with possible
 569		 * data memory overriding that caused by cache coherence
 570		 * problem.
 571		 */
 572		cc_copy_mac(dev, req, CC_SG_FROM_BUF);
 573	}
 574}
 575
 576static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
 577			   u32 last_entry_data_size)
 578{
 579	return ((sgl_nents > 1) && (last_entry_data_size < authsize));
 580}
 581
 582static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
 583			    struct aead_request *req,
 584			    struct buffer_array *sg_data,
 585			    bool is_last, bool do_chain)
 586{
 587	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 588	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 589	struct device *dev = drvdata_to_dev(drvdata);
 590	gfp_t flags = cc_gfp_flags(&req->base);
 591	int rc = 0;
 592
 593	if (!req->iv) {
 594		areq_ctx->gen_ctx.iv_dma_addr = 0;
 595		areq_ctx->gen_ctx.iv = NULL;
 596		goto chain_iv_exit;
 597	}
 598
 599	areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
 600	if (!areq_ctx->gen_ctx.iv)
 601		return -ENOMEM;
 602
 603	areq_ctx->gen_ctx.iv_dma_addr =
 604		dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
 605			       DMA_BIDIRECTIONAL);
 606	if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
 607		dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 608			hw_iv_size, req->iv);
 609		kzfree(areq_ctx->gen_ctx.iv);
 610		areq_ctx->gen_ctx.iv = NULL;
 611		rc = -ENOMEM;
 612		goto chain_iv_exit;
 613	}
 614
 615	dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 616		hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
 617	// TODO: what about CTR?? ask Ron
 618	if (do_chain && areq_ctx->plaintext_authenticate_only) {
 619		struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 620		unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
 621		unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
 622		/* Chain to given list */
 623		cc_add_buffer_entry(dev, sg_data,
 624				    (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
 625				    iv_size_to_authenc, is_last,
 626				    &areq_ctx->assoc.mlli_nents);
 627		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 628	}
 629
 630chain_iv_exit:
 631	return rc;
 632}
 633
 634static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
 635			       struct aead_request *req,
 636			       struct buffer_array *sg_data,
 637			       bool is_last, bool do_chain)
 638{
 639	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 640	int rc = 0;
 641	int mapped_nents = 0;
 642	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 643	unsigned int size_of_assoc = areq_ctx->assoclen;
 644	struct device *dev = drvdata_to_dev(drvdata);
 645
 646	if (areq_ctx->is_gcm4543)
 647		size_of_assoc += crypto_aead_ivsize(tfm);
 648
 649	if (!sg_data) {
 650		rc = -EINVAL;
 651		goto chain_assoc_exit;
 652	}
 653
 654	if (areq_ctx->assoclen == 0) {
 655		areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
 656		areq_ctx->assoc.nents = 0;
 657		areq_ctx->assoc.mlli_nents = 0;
 658		dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
 659			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 660			areq_ctx->assoc.nents);
 661		goto chain_assoc_exit;
 662	}
 663
 664	mapped_nents = sg_nents_for_len(req->src, size_of_assoc);
 665	if (mapped_nents < 0)
 666		return mapped_nents;
 667
 668	if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 669		dev_err(dev, "Too many fragments. current %d max %d\n",
 670			mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 671		return -ENOMEM;
 672	}
 673	areq_ctx->assoc.nents = mapped_nents;
 674
 675	/* in CCM case we have additional entry for
 676	 * ccm header configurations
 677	 */
 678	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 679		if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 680			dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
 681				(areq_ctx->assoc.nents + 1),
 682				LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 683			rc = -ENOMEM;
 684			goto chain_assoc_exit;
 685		}
 686	}
 687
 688	if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
 689		areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
 690	else
 691		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 692
 693	if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 694		dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
 695			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 696			areq_ctx->assoc.nents);
 697		cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
 698				areq_ctx->assoclen, 0, is_last,
 699				&areq_ctx->assoc.mlli_nents);
 700		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 701	}
 702
 703chain_assoc_exit:
 704	return rc;
 705}
 706
 707static void cc_prepare_aead_data_dlli(struct aead_request *req,
 708				      u32 *src_last_bytes, u32 *dst_last_bytes)
 709{
 710	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 711	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 712	unsigned int authsize = areq_ctx->req_authsize;
 713	struct scatterlist *sg;
 714	ssize_t offset;
 715
 716	areq_ctx->is_icv_fragmented = false;
 717
 718	if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 719		sg = areq_ctx->src_sgl;
 720		offset = *src_last_bytes - authsize;
 721	} else {
 722		sg = areq_ctx->dst_sgl;
 723		offset = *dst_last_bytes - authsize;
 724	}
 725
 726	areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
 727	areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
 728}
 729
 730static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
 731				      struct aead_request *req,
 732				      struct buffer_array *sg_data,
 733				      u32 *src_last_bytes, u32 *dst_last_bytes,
 734				      bool is_last_table)
 735{
 736	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 737	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 738	unsigned int authsize = areq_ctx->req_authsize;
 739	struct device *dev = drvdata_to_dev(drvdata);
 740	struct scatterlist *sg;
 741
 742	if (req->src == req->dst) {
 743		/*INPLACE*/
 744		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 745				areq_ctx->src_sgl, areq_ctx->cryptlen,
 746				areq_ctx->src_offset, is_last_table,
 747				&areq_ctx->src.mlli_nents);
 748
 749		areq_ctx->is_icv_fragmented =
 750			cc_is_icv_frag(areq_ctx->src.nents, authsize,
 751				       *src_last_bytes);
 752
 753		if (areq_ctx->is_icv_fragmented) {
 754			/* Backup happens only when ICV is fragmented, ICV
 755			 * verification is made by CPU compare in order to
 756			 * simplify MAC verification upon request completion
 757			 */
 758			if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 759				/* In coherent platforms (e.g. ACP)
 760				 * already copying ICV for any
 761				 * INPLACE-DECRYPT operation, hence
 762				 * we must neglect this code.
 763				 */
 764				if (!drvdata->coherent)
 765					cc_copy_mac(dev, req, CC_SG_TO_BUF);
 766
 767				areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 768			} else {
 769				areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 770				areq_ctx->icv_dma_addr =
 771					areq_ctx->mac_buf_dma_addr;
 772			}
 773		} else { /* Contig. ICV */
 774			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 775			/*Should hanlde if the sg is not contig.*/
 776			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 777				(*src_last_bytes - authsize);
 778			areq_ctx->icv_virt_addr = sg_virt(sg) +
 779				(*src_last_bytes - authsize);
 780		}
 781
 782	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 783		/*NON-INPLACE and DECRYPT*/
 784		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 785				areq_ctx->src_sgl, areq_ctx->cryptlen,
 786				areq_ctx->src_offset, is_last_table,
 787				&areq_ctx->src.mlli_nents);
 788		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 789				areq_ctx->dst_sgl, areq_ctx->cryptlen,
 790				areq_ctx->dst_offset, is_last_table,
 791				&areq_ctx->dst.mlli_nents);
 792
 793		areq_ctx->is_icv_fragmented =
 794			cc_is_icv_frag(areq_ctx->src.nents, authsize,
 795				       *src_last_bytes);
 796		/* Backup happens only when ICV is fragmented, ICV
 797
 798		 * verification is made by CPU compare in order to simplify
 799		 * MAC verification upon request completion
 800		 */
 801		if (areq_ctx->is_icv_fragmented) {
 802			cc_copy_mac(dev, req, CC_SG_TO_BUF);
 803			areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 804
 805		} else { /* Contig. ICV */
 806			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 807			/*Should hanlde if the sg is not contig.*/
 808			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 809				(*src_last_bytes - authsize);
 810			areq_ctx->icv_virt_addr = sg_virt(sg) +
 811				(*src_last_bytes - authsize);
 812		}
 813
 814	} else {
 815		/*NON-INPLACE and ENCRYPT*/
 816		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 817				areq_ctx->dst_sgl, areq_ctx->cryptlen,
 818				areq_ctx->dst_offset, is_last_table,
 819				&areq_ctx->dst.mlli_nents);
 820		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 821				areq_ctx->src_sgl, areq_ctx->cryptlen,
 822				areq_ctx->src_offset, is_last_table,
 823				&areq_ctx->src.mlli_nents);
 824
 825		areq_ctx->is_icv_fragmented =
 826			cc_is_icv_frag(areq_ctx->dst.nents, authsize,
 827				       *dst_last_bytes);
 828
 829		if (!areq_ctx->is_icv_fragmented) {
 830			sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
 831			/* Contig. ICV */
 832			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 833				(*dst_last_bytes - authsize);
 834			areq_ctx->icv_virt_addr = sg_virt(sg) +
 835				(*dst_last_bytes - authsize);
 836		} else {
 837			areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
 838			areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 839		}
 840	}
 841}
 842
 843static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 844			      struct aead_request *req,
 845			      struct buffer_array *sg_data,
 846			      bool is_last_table, bool do_chain)
 847{
 848	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 849	struct device *dev = drvdata_to_dev(drvdata);
 850	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 851	unsigned int authsize = areq_ctx->req_authsize;
 852	unsigned int src_last_bytes = 0, dst_last_bytes = 0;
 853	int rc = 0;
 854	u32 src_mapped_nents = 0, dst_mapped_nents = 0;
 855	u32 offset = 0;
 856	/* non-inplace mode */
 857	unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
 858	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 859	u32 sg_index = 0;
 860	bool is_gcm4543 = areq_ctx->is_gcm4543;
 861	u32 size_to_skip = areq_ctx->assoclen;
 862	struct scatterlist *sgl;
 863
 864	if (is_gcm4543)
 865		size_to_skip += crypto_aead_ivsize(tfm);
 866
 867	offset = size_to_skip;
 868
 869	if (!sg_data)
 870		return -EINVAL;
 871
 872	areq_ctx->src_sgl = req->src;
 873	areq_ctx->dst_sgl = req->dst;
 874
 875	if (is_gcm4543)
 876		size_for_map += crypto_aead_ivsize(tfm);
 877
 878	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 879			authsize : 0;
 880	src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
 881					    &src_last_bytes);
 882	sg_index = areq_ctx->src_sgl->length;
 883	//check where the data starts
 884	while (sg_index <= size_to_skip) {
 885		src_mapped_nents--;
 886		offset -= areq_ctx->src_sgl->length;
 887		sgl = sg_next(areq_ctx->src_sgl);
 888		if (!sgl)
 889			break;
 890		areq_ctx->src_sgl = sgl;
 891		sg_index += areq_ctx->src_sgl->length;
 892	}
 893	if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 894		dev_err(dev, "Too many fragments. current %d max %d\n",
 895			src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 896		return -ENOMEM;
 897	}
 898
 899	areq_ctx->src.nents = src_mapped_nents;
 900
 901	areq_ctx->src_offset = offset;
 902
 903	if (req->src != req->dst) {
 904		size_for_map = areq_ctx->assoclen + req->cryptlen;
 905		size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 906				authsize : 0;
 907		if (is_gcm4543)
 908			size_for_map += crypto_aead_ivsize(tfm);
 
 909
 910		rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
 911			       &areq_ctx->dst.nents,
 912			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
 913			       &dst_mapped_nents);
 914		if (rc)
 915			goto chain_data_exit;
 916	}
 917
 918	dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
 919					    &dst_last_bytes);
 920	sg_index = areq_ctx->dst_sgl->length;
 921	offset = size_to_skip;
 922
 923	//check where the data starts
 924	while (sg_index <= size_to_skip) {
 925		dst_mapped_nents--;
 926		offset -= areq_ctx->dst_sgl->length;
 927		sgl = sg_next(areq_ctx->dst_sgl);
 928		if (!sgl)
 929			break;
 930		areq_ctx->dst_sgl = sgl;
 931		sg_index += areq_ctx->dst_sgl->length;
 932	}
 933	if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 934		dev_err(dev, "Too many fragments. current %d max %d\n",
 935			dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 936		return -ENOMEM;
 937	}
 938	areq_ctx->dst.nents = dst_mapped_nents;
 939	areq_ctx->dst_offset = offset;
 940	if (src_mapped_nents > 1 ||
 941	    dst_mapped_nents  > 1 ||
 942	    do_chain) {
 943		areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
 944		cc_prepare_aead_data_mlli(drvdata, req, sg_data,
 945					  &src_last_bytes, &dst_last_bytes,
 946					  is_last_table);
 947	} else {
 948		areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
 949		cc_prepare_aead_data_dlli(req, &src_last_bytes,
 950					  &dst_last_bytes);
 951	}
 952
 953chain_data_exit:
 954	return rc;
 955}
 956
 957static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
 958				      struct aead_request *req)
 959{
 960	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 961	u32 curr_mlli_size = 0;
 962
 963	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 964		areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
 965		curr_mlli_size = areq_ctx->assoc.mlli_nents *
 966						LLI_ENTRY_BYTE_SIZE;
 967	}
 968
 969	if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
 970		/*Inplace case dst nents equal to src nents*/
 971		if (req->src == req->dst) {
 972			areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
 973			areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
 974								curr_mlli_size;
 975			areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
 976			if (!areq_ctx->is_single_pass)
 977				areq_ctx->assoc.mlli_nents +=
 978					areq_ctx->src.mlli_nents;
 979		} else {
 980			if (areq_ctx->gen_ctx.op_type ==
 981					DRV_CRYPTO_DIRECTION_DECRYPT) {
 982				areq_ctx->src.sram_addr =
 983						drvdata->mlli_sram_addr +
 984								curr_mlli_size;
 985				areq_ctx->dst.sram_addr =
 986						areq_ctx->src.sram_addr +
 987						areq_ctx->src.mlli_nents *
 988						LLI_ENTRY_BYTE_SIZE;
 989				if (!areq_ctx->is_single_pass)
 990					areq_ctx->assoc.mlli_nents +=
 991						areq_ctx->src.mlli_nents;
 992			} else {
 993				areq_ctx->dst.sram_addr =
 994						drvdata->mlli_sram_addr +
 995								curr_mlli_size;
 996				areq_ctx->src.sram_addr =
 997						areq_ctx->dst.sram_addr +
 998						areq_ctx->dst.mlli_nents *
 999						LLI_ENTRY_BYTE_SIZE;
1000				if (!areq_ctx->is_single_pass)
1001					areq_ctx->assoc.mlli_nents +=
1002						areq_ctx->dst.mlli_nents;
1003			}
1004		}
1005	}
1006}
1007
1008int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
1009{
1010	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1011	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1012	struct device *dev = drvdata_to_dev(drvdata);
1013	struct buffer_array sg_data;
1014	unsigned int authsize = areq_ctx->req_authsize;
1015	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1016	int rc = 0;
1017	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1018	bool is_gcm4543 = areq_ctx->is_gcm4543;
1019	dma_addr_t dma_addr;
1020	u32 mapped_nents = 0;
1021	u32 dummy = 0; /*used for the assoc data fragments */
1022	u32 size_to_map = 0;
1023	gfp_t flags = cc_gfp_flags(&req->base);
1024
1025	mlli_params->curr_pool = NULL;
1026	sg_data.num_of_buffers = 0;
1027
1028	/* copy mac to a temporary location to deal with possible
1029	 * data memory overriding that caused by cache coherence problem.
1030	 */
1031	if (drvdata->coherent &&
1032	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
1033	    req->src == req->dst)
1034		cc_copy_mac(dev, req, CC_SG_TO_BUF);
1035
1036	/* cacluate the size for cipher remove ICV in decrypt*/
1037	areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1038				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1039				req->cryptlen :
1040				(req->cryptlen - authsize);
1041
1042	dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
1043				  DMA_BIDIRECTIONAL);
1044	if (dma_mapping_error(dev, dma_addr)) {
1045		dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1046			MAX_MAC_SIZE, areq_ctx->mac_buf);
1047		rc = -ENOMEM;
1048		goto aead_map_failure;
1049	}
1050	areq_ctx->mac_buf_dma_addr = dma_addr;
1051
1052	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1053		void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1054
1055		dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
1056					  DMA_TO_DEVICE);
1057
1058		if (dma_mapping_error(dev, dma_addr)) {
1059			dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1060				AES_BLOCK_SIZE, addr);
1061			areq_ctx->ccm_iv0_dma_addr = 0;
1062			rc = -ENOMEM;
1063			goto aead_map_failure;
1064		}
1065		areq_ctx->ccm_iv0_dma_addr = dma_addr;
1066
1067		rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1068					  &sg_data, areq_ctx->assoclen);
1069		if (rc)
1070			goto aead_map_failure;
1071	}
1072
1073	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1074		dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1075					  DMA_BIDIRECTIONAL);
1076		if (dma_mapping_error(dev, dma_addr)) {
1077			dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1078				AES_BLOCK_SIZE, areq_ctx->hkey);
1079			rc = -ENOMEM;
1080			goto aead_map_failure;
1081		}
1082		areq_ctx->hkey_dma_addr = dma_addr;
1083
1084		dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1085					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1086		if (dma_mapping_error(dev, dma_addr)) {
1087			dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1088				AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1089			rc = -ENOMEM;
1090			goto aead_map_failure;
1091		}
1092		areq_ctx->gcm_block_len_dma_addr = dma_addr;
1093
1094		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1095					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1096
1097		if (dma_mapping_error(dev, dma_addr)) {
1098			dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1099				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1100			areq_ctx->gcm_iv_inc1_dma_addr = 0;
1101			rc = -ENOMEM;
1102			goto aead_map_failure;
1103		}
1104		areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1105
1106		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1107					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1108
1109		if (dma_mapping_error(dev, dma_addr)) {
1110			dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1111				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1112			areq_ctx->gcm_iv_inc2_dma_addr = 0;
1113			rc = -ENOMEM;
1114			goto aead_map_failure;
1115		}
1116		areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1117	}
1118
1119	size_to_map = req->cryptlen + areq_ctx->assoclen;
1120	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
 
 
1121		size_to_map += authsize;
 
1122
1123	if (is_gcm4543)
1124		size_to_map += crypto_aead_ivsize(tfm);
1125	rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
1126		       &areq_ctx->src.nents,
1127		       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1128			LLI_MAX_NUM_OF_DATA_ENTRIES),
1129		       &dummy, &mapped_nents);
1130	if (rc)
1131		goto aead_map_failure;
1132
1133	if (areq_ctx->is_single_pass) {
1134		/*
1135		 * Create MLLI table for:
1136		 *   (1) Assoc. data
1137		 *   (2) Src/Dst SGLs
1138		 *   Note: IV is contg. buffer (not an SGL)
1139		 */
1140		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1141		if (rc)
1142			goto aead_map_failure;
1143		rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1144		if (rc)
1145			goto aead_map_failure;
1146		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1147		if (rc)
1148			goto aead_map_failure;
1149	} else { /* DOUBLE-PASS flow */
1150		/*
1151		 * Prepare MLLI table(s) in this order:
1152		 *
1153		 * If ENCRYPT/DECRYPT (inplace):
1154		 *   (1) MLLI table for assoc
1155		 *   (2) IV entry (chained right after end of assoc)
1156		 *   (3) MLLI for src/dst (inplace operation)
1157		 *
1158		 * If ENCRYPT (non-inplace)
1159		 *   (1) MLLI table for assoc
1160		 *   (2) IV entry (chained right after end of assoc)
1161		 *   (3) MLLI for dst
1162		 *   (4) MLLI for src
1163		 *
1164		 * If DECRYPT (non-inplace)
1165		 *   (1) MLLI table for assoc
1166		 *   (2) IV entry (chained right after end of assoc)
1167		 *   (3) MLLI for src
1168		 *   (4) MLLI for dst
1169		 */
1170		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1171		if (rc)
1172			goto aead_map_failure;
1173		rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1174		if (rc)
1175			goto aead_map_failure;
1176		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1177		if (rc)
1178			goto aead_map_failure;
1179	}
1180
1181	/* Mlli support -start building the MLLI according to the above
1182	 * results
1183	 */
1184	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1185	    areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1186		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1187		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1188		if (rc)
1189			goto aead_map_failure;
1190
1191		cc_update_aead_mlli_nents(drvdata, req);
1192		dev_dbg(dev, "assoc params mn %d\n",
1193			areq_ctx->assoc.mlli_nents);
1194		dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1195		dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1196	}
1197	return 0;
1198
1199aead_map_failure:
1200	cc_unmap_aead_request(dev, req);
1201	return rc;
1202}
1203
1204int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1205			      struct scatterlist *src, unsigned int nbytes,
1206			      bool do_update, gfp_t flags)
1207{
1208	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1209	struct device *dev = drvdata_to_dev(drvdata);
1210	u8 *curr_buff = cc_hash_buf(areq_ctx);
1211	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1212	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1213	struct buffer_array sg_data;
1214	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1215	int rc = 0;
1216	u32 dummy = 0;
1217	u32 mapped_nents = 0;
1218
1219	dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1220		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1221	/* Init the type of the dma buffer */
1222	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1223	mlli_params->curr_pool = NULL;
1224	sg_data.num_of_buffers = 0;
1225	areq_ctx->in_nents = 0;
1226
1227	if (nbytes == 0 && *curr_buff_cnt == 0) {
1228		/* nothing to do */
1229		return 0;
1230	}
1231
1232	/*TODO: copy data in case that buffer is enough for operation */
1233	/* map the previous buffer */
1234	if (*curr_buff_cnt) {
1235		rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1236				     &sg_data);
1237		if (rc)
1238			return rc;
1239	}
1240
1241	if (src && nbytes > 0 && do_update) {
1242		rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1243			       &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1244			       &dummy, &mapped_nents);
1245		if (rc)
1246			goto unmap_curr_buff;
1247		if (src && mapped_nents == 1 &&
1248		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1249			memcpy(areq_ctx->buff_sg, src,
1250			       sizeof(struct scatterlist));
1251			areq_ctx->buff_sg->length = nbytes;
1252			areq_ctx->curr_sg = areq_ctx->buff_sg;
1253			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1254		} else {
1255			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1256		}
1257	}
1258
1259	/*build mlli */
1260	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1261		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1262		/* add the src data to the sg_data */
1263		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1264				0, true, &areq_ctx->mlli_nents);
1265		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1266		if (rc)
1267			goto fail_unmap_din;
1268	}
1269	/* change the buffer index for the unmap function */
1270	areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1271	dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1272		cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1273	return 0;
1274
1275fail_unmap_din:
1276	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1277
1278unmap_curr_buff:
1279	if (*curr_buff_cnt)
1280		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1281
1282	return rc;
1283}
1284
1285int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1286			       struct scatterlist *src, unsigned int nbytes,
1287			       unsigned int block_size, gfp_t flags)
1288{
1289	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1290	struct device *dev = drvdata_to_dev(drvdata);
1291	u8 *curr_buff = cc_hash_buf(areq_ctx);
1292	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1293	u8 *next_buff = cc_next_buf(areq_ctx);
1294	u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1295	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1296	unsigned int update_data_len;
1297	u32 total_in_len = nbytes + *curr_buff_cnt;
1298	struct buffer_array sg_data;
1299	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1300	unsigned int swap_index = 0;
1301	int rc = 0;
1302	u32 dummy = 0;
1303	u32 mapped_nents = 0;
1304
1305	dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1306		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1307	/* Init the type of the dma buffer */
1308	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1309	mlli_params->curr_pool = NULL;
1310	areq_ctx->curr_sg = NULL;
1311	sg_data.num_of_buffers = 0;
1312	areq_ctx->in_nents = 0;
1313
1314	if (total_in_len < block_size) {
1315		dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1316			curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1317		areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
1318		sg_copy_to_buffer(src, areq_ctx->in_nents,
1319				  &curr_buff[*curr_buff_cnt], nbytes);
1320		*curr_buff_cnt += nbytes;
1321		return 1;
1322	}
1323
1324	/* Calculate the residue size*/
1325	*next_buff_cnt = total_in_len & (block_size - 1);
1326	/* update data len */
1327	update_data_len = total_in_len - *next_buff_cnt;
1328
1329	dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1330		*next_buff_cnt, update_data_len);
1331
1332	/* Copy the new residue to next buffer */
1333	if (*next_buff_cnt) {
1334		dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1335			next_buff, (update_data_len - *curr_buff_cnt),
1336			*next_buff_cnt);
1337		cc_copy_sg_portion(dev, next_buff, src,
1338				   (update_data_len - *curr_buff_cnt),
1339				   nbytes, CC_SG_TO_BUF);
1340		/* change the buffer index for next operation */
1341		swap_index = 1;
1342	}
1343
1344	if (*curr_buff_cnt) {
1345		rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1346				     &sg_data);
1347		if (rc)
1348			return rc;
1349		/* change the buffer index for next operation */
1350		swap_index = 1;
1351	}
1352
1353	if (update_data_len > *curr_buff_cnt) {
1354		rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1355			       DMA_TO_DEVICE, &areq_ctx->in_nents,
1356			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1357			       &mapped_nents);
1358		if (rc)
1359			goto unmap_curr_buff;
1360		if (mapped_nents == 1 &&
1361		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1362			/* only one entry in the SG and no previous data */
1363			memcpy(areq_ctx->buff_sg, src,
1364			       sizeof(struct scatterlist));
1365			areq_ctx->buff_sg->length = update_data_len;
1366			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1367			areq_ctx->curr_sg = areq_ctx->buff_sg;
1368		} else {
1369			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1370		}
1371	}
1372
1373	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1374		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1375		/* add the src data to the sg_data */
1376		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1377				(update_data_len - *curr_buff_cnt), 0, true,
1378				&areq_ctx->mlli_nents);
1379		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1380		if (rc)
1381			goto fail_unmap_din;
1382	}
1383	areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1384
1385	return 0;
1386
1387fail_unmap_din:
1388	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1389
1390unmap_curr_buff:
1391	if (*curr_buff_cnt)
1392		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1393
1394	return rc;
1395}
1396
1397void cc_unmap_hash_request(struct device *dev, void *ctx,
1398			   struct scatterlist *src, bool do_revert)
1399{
1400	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1401	u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1402
1403	/*In case a pool was set, a table was
1404	 *allocated and should be released
1405	 */
1406	if (areq_ctx->mlli_params.curr_pool) {
1407		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1408			&areq_ctx->mlli_params.mlli_dma_addr,
1409			areq_ctx->mlli_params.mlli_virt_addr);
1410		dma_pool_free(areq_ctx->mlli_params.curr_pool,
1411			      areq_ctx->mlli_params.mlli_virt_addr,
1412			      areq_ctx->mlli_params.mlli_dma_addr);
1413	}
1414
1415	if (src && areq_ctx->in_nents) {
1416		dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1417			sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1418		dma_unmap_sg(dev, src,
1419			     areq_ctx->in_nents, DMA_TO_DEVICE);
1420	}
1421
1422	if (*prev_len) {
1423		dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1424			sg_virt(areq_ctx->buff_sg),
1425			&sg_dma_address(areq_ctx->buff_sg),
1426			sg_dma_len(areq_ctx->buff_sg));
1427		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1428		if (!do_revert) {
1429			/* clean the previous data length for update
1430			 * operation
1431			 */
1432			*prev_len = 0;
1433		} else {
1434			areq_ctx->buff_index ^= 1;
1435		}
1436	}
1437}
1438
1439int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1440{
1441	struct buff_mgr_handle *buff_mgr_handle;
1442	struct device *dev = drvdata_to_dev(drvdata);
1443
1444	buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
1445	if (!buff_mgr_handle)
1446		return -ENOMEM;
1447
1448	drvdata->buff_mgr_handle = buff_mgr_handle;
1449
1450	buff_mgr_handle->mlli_buffs_pool =
1451		dma_pool_create("dx_single_mlli_tables", dev,
1452				MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1453				LLI_ENTRY_BYTE_SIZE,
1454				MLLI_TABLE_MIN_ALIGNMENT, 0);
1455
1456	if (!buff_mgr_handle->mlli_buffs_pool)
1457		goto error;
1458
1459	return 0;
1460
1461error:
1462	cc_buffer_mgr_fini(drvdata);
1463	return -ENOMEM;
1464}
1465
1466int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1467{
1468	struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1469
1470	if (buff_mgr_handle) {
1471		dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1472		kfree(drvdata->buff_mgr_handle);
1473		drvdata->buff_mgr_handle = NULL;
1474	}
1475	return 0;
1476}