Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
   3
   4#include <crypto/internal/aead.h>
   5#include <crypto/authenc.h>
   6#include <crypto/scatterwalk.h>
   7#include <linux/dmapool.h>
   8#include <linux/dma-mapping.h>
   9
  10#include "cc_buffer_mgr.h"
  11#include "cc_lli_defs.h"
  12#include "cc_cipher.h"
  13#include "cc_hash.h"
  14#include "cc_aead.h"
  15
 
 
 
 
 
 
 
 
 
 
  16union buffer_array_entry {
  17	struct scatterlist *sgl;
  18	dma_addr_t buffer_dma;
  19};
  20
  21struct buffer_array {
  22	unsigned int num_of_buffers;
  23	union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
  24	unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
  25	int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  26	int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
 
  27	bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
  28	u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  29};
  30
  31static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
  32{
  33	switch (type) {
  34	case CC_DMA_BUF_NULL:
  35		return "BUF_NULL";
  36	case CC_DMA_BUF_DLLI:
  37		return "BUF_DLLI";
  38	case CC_DMA_BUF_MLLI:
  39		return "BUF_MLLI";
  40	default:
  41		return "BUF_INVALID";
  42	}
  43}
  44
  45/**
  46 * cc_copy_mac() - Copy MAC to temporary location
  47 *
  48 * @dev: device object
  49 * @req: aead request object
  50 * @dir: [IN] copy from/to sgl
  51 */
  52static void cc_copy_mac(struct device *dev, struct aead_request *req,
  53			enum cc_sg_cpy_direct dir)
  54{
  55	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 
  56	u32 skip = req->assoclen + req->cryptlen;
  57
 
 
 
  58	cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
  59			   (skip - areq_ctx->req_authsize), skip, dir);
  60}
  61
  62/**
  63 * cc_get_sgl_nents() - Get scatterlist number of entries.
  64 *
  65 * @dev: Device object
  66 * @sg_list: SG list
  67 * @nbytes: [IN] Total SGL data bytes.
  68 * @lbytes: [OUT] Returns the amount of bytes at the last entry
  69 *
  70 * Return:
  71 * Number of entries in the scatterlist
  72 */
  73static unsigned int cc_get_sgl_nents(struct device *dev,
  74				     struct scatterlist *sg_list,
  75				     unsigned int nbytes, u32 *lbytes)
 
  76{
  77	unsigned int nents = 0;
  78
  79	*lbytes = 0;
  80
  81	while (nbytes && sg_list) {
  82		nents++;
  83		/* get the number of bytes in the last entry */
  84		*lbytes = nbytes;
  85		nbytes -= (sg_list->length > nbytes) ?
  86				nbytes : sg_list->length;
  87		sg_list = sg_next(sg_list);
 
 
 
 
 
 
  88	}
  89
  90	dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
  91	return nents;
  92}
  93
  94/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  95 * cc_copy_sg_portion() - Copy scatter list data,
  96 * from to_skip to end, to dest and vice versa
  97 *
  98 * @dev: Device object
  99 * @dest: Buffer to copy to/from
 100 * @sg: SG list
 101 * @to_skip: Number of bytes to skip before copying
 102 * @end: Offset of last byte to copy
 103 * @direct: Transfer direction (true == from SG list to buffer, false == from
 104 *          buffer to SG list)
 105 */
 106void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
 107			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
 108{
 109	u32 nents;
 110
 111	nents = sg_nents_for_len(sg, end);
 112	sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip,
 113		       (direct == CC_SG_TO_BUF));
 114}
 115
 116static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
 117				  u32 buff_size, u32 *curr_nents,
 118				  u32 **mlli_entry_pp)
 119{
 120	u32 *mlli_entry_p = *mlli_entry_pp;
 121	u32 new_nents;
 122
 123	/* Verify there is no memory overflow*/
 124	new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
 125	if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
 126		dev_err(dev, "Too many mlli entries. current %d max %d\n",
 127			new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
 128		return -ENOMEM;
 129	}
 130
 131	/*handle buffer longer than 64 kbytes */
 132	while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
 133		cc_lli_set_addr(mlli_entry_p, buff_dma);
 134		cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
 135		dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 136			*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 137			mlli_entry_p[LLI_WORD1_OFFSET]);
 138		buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
 139		buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
 140		mlli_entry_p = mlli_entry_p + 2;
 141		(*curr_nents)++;
 142	}
 143	/*Last entry */
 144	cc_lli_set_addr(mlli_entry_p, buff_dma);
 145	cc_lli_set_size(mlli_entry_p, buff_size);
 146	dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 147		*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 148		mlli_entry_p[LLI_WORD1_OFFSET]);
 149	mlli_entry_p = mlli_entry_p + 2;
 150	*mlli_entry_pp = mlli_entry_p;
 151	(*curr_nents)++;
 152	return 0;
 153}
 154
 155static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
 156				u32 sgl_data_len, u32 sgl_offset,
 157				u32 *curr_nents, u32 **mlli_entry_pp)
 158{
 159	struct scatterlist *curr_sgl = sgl;
 160	u32 *mlli_entry_p = *mlli_entry_pp;
 161	s32 rc = 0;
 162
 163	for ( ; (curr_sgl && sgl_data_len);
 164	      curr_sgl = sg_next(curr_sgl)) {
 165		u32 entry_data_len =
 166			(sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
 167				sg_dma_len(curr_sgl) - sgl_offset :
 168				sgl_data_len;
 169		sgl_data_len -= entry_data_len;
 170		rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
 171					    sgl_offset, entry_data_len,
 172					    curr_nents, &mlli_entry_p);
 173		if (rc)
 174			return rc;
 175
 176		sgl_offset = 0;
 177	}
 178	*mlli_entry_pp = mlli_entry_p;
 179	return 0;
 180}
 181
 182static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
 183			    struct mlli_params *mlli_params, gfp_t flags)
 184{
 185	u32 *mlli_p;
 186	u32 total_nents = 0, prev_total_nents = 0;
 187	int rc = 0, i;
 188
 189	dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
 190
 191	/* Allocate memory from the pointed pool */
 192	mlli_params->mlli_virt_addr =
 193		dma_pool_alloc(mlli_params->curr_pool, flags,
 194			       &mlli_params->mlli_dma_addr);
 195	if (!mlli_params->mlli_virt_addr) {
 196		dev_err(dev, "dma_pool_alloc() failed\n");
 197		rc = -ENOMEM;
 198		goto build_mlli_exit;
 199	}
 200	/* Point to start of MLLI */
 201	mlli_p = mlli_params->mlli_virt_addr;
 202	/* go over all SG's and link it to one MLLI table */
 203	for (i = 0; i < sg_data->num_of_buffers; i++) {
 204		union buffer_array_entry *entry = &sg_data->entry[i];
 205		u32 tot_len = sg_data->total_data_len[i];
 206		u32 offset = sg_data->offset[i];
 207
 208		rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset,
 209					  &total_nents, &mlli_p);
 
 
 
 
 
 
 210		if (rc)
 211			return rc;
 212
 213		/* set last bit in the current table */
 214		if (sg_data->mlli_nents[i]) {
 215			/*Calculate the current MLLI table length for the
 216			 *length field in the descriptor
 217			 */
 218			*sg_data->mlli_nents[i] +=
 219				(total_nents - prev_total_nents);
 220			prev_total_nents = total_nents;
 221		}
 222	}
 223
 224	/* Set MLLI size for the bypass operation */
 225	mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
 226
 227	dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
 228		mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
 229		mlli_params->mlli_len);
 230
 231build_mlli_exit:
 232	return rc;
 233}
 234
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 235static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
 236			    unsigned int nents, struct scatterlist *sgl,
 237			    unsigned int data_len, unsigned int data_offset,
 238			    bool is_last_table, u32 *mlli_nents)
 239{
 240	unsigned int index = sgl_data->num_of_buffers;
 241
 242	dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
 243		index, nents, sgl, data_len, is_last_table);
 244	sgl_data->nents[index] = nents;
 245	sgl_data->entry[index].sgl = sgl;
 246	sgl_data->offset[index] = data_offset;
 247	sgl_data->total_data_len[index] = data_len;
 
 248	sgl_data->is_last[index] = is_last_table;
 249	sgl_data->mlli_nents[index] = mlli_nents;
 250	if (sgl_data->mlli_nents[index])
 251		*sgl_data->mlli_nents[index] = 0;
 252	sgl_data->num_of_buffers++;
 253}
 254
 255static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 256		     unsigned int nbytes, int direction, u32 *nents,
 257		     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
 258{
 259	int ret = 0;
 
 260
 261	*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
 262	if (*nents > max_sg_nents) {
 263		*nents = 0;
 264		dev_err(dev, "Too many fragments. current %d max %d\n",
 265			*nents, max_sg_nents);
 266		return -ENOMEM;
 
 
 267	}
 
 268
 269	ret = dma_map_sg(dev, sg, *nents, direction);
 270	if (dma_mapping_error(dev, ret)) {
 271		*nents = 0;
 272		dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
 273		return -ENOMEM;
 
 
 274	}
 
 
 275
 276	*mapped_nents = ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 277
 278	return 0;
 279}
 280
 281static int
 282cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
 283		     u8 *config_data, struct buffer_array *sg_data,
 284		     unsigned int assoclen)
 285{
 286	dev_dbg(dev, " handle additional data config set to DLLI\n");
 287	/* create sg for the current buffer */
 288	sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
 289		    AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
 290	if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
 291		dev_err(dev, "dma_map_sg() config buffer failed\n");
 292		return -ENOMEM;
 293	}
 294	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 295		&sg_dma_address(&areq_ctx->ccm_adata_sg),
 296		sg_page(&areq_ctx->ccm_adata_sg),
 297		sg_virt(&areq_ctx->ccm_adata_sg),
 298		areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
 299	/* prepare for case of MLLI */
 300	if (assoclen > 0) {
 301		cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
 302				(AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
 303				0, false, NULL);
 304	}
 305	return 0;
 306}
 307
 308static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
 309			   u8 *curr_buff, u32 curr_buff_cnt,
 310			   struct buffer_array *sg_data)
 311{
 312	dev_dbg(dev, " handle curr buff %x set to   DLLI\n", curr_buff_cnt);
 313	/* create sg for the current buffer */
 314	sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
 315	if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
 316		dev_err(dev, "dma_map_sg() src buffer failed\n");
 317		return -ENOMEM;
 318	}
 319	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 320		&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
 321		sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
 322		areq_ctx->buff_sg->length);
 323	areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
 324	areq_ctx->curr_sg = areq_ctx->buff_sg;
 325	areq_ctx->in_nents = 0;
 326	/* prepare for case of MLLI */
 327	cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
 328			false, NULL);
 329	return 0;
 330}
 331
 332void cc_unmap_cipher_request(struct device *dev, void *ctx,
 333				unsigned int ivsize, struct scatterlist *src,
 334				struct scatterlist *dst)
 335{
 336	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 337
 338	if (req_ctx->gen_ctx.iv_dma_addr) {
 339		dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
 340			&req_ctx->gen_ctx.iv_dma_addr, ivsize);
 341		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
 342				 ivsize, DMA_BIDIRECTIONAL);
 
 
 343	}
 344	/* Release pool */
 345	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
 346	    req_ctx->mlli_params.mlli_virt_addr) {
 347		dma_pool_free(req_ctx->mlli_params.curr_pool,
 348			      req_ctx->mlli_params.mlli_virt_addr,
 349			      req_ctx->mlli_params.mlli_dma_addr);
 350	}
 351
 352	dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
 353	dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
 354
 355	if (src != dst) {
 356		dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
 357		dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
 358	}
 359}
 360
 361int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
 362			  unsigned int ivsize, unsigned int nbytes,
 363			  void *info, struct scatterlist *src,
 364			  struct scatterlist *dst, gfp_t flags)
 365{
 366	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 367	struct mlli_params *mlli_params = &req_ctx->mlli_params;
 
 368	struct device *dev = drvdata_to_dev(drvdata);
 369	struct buffer_array sg_data;
 370	u32 dummy = 0;
 371	int rc = 0;
 372	u32 mapped_nents = 0;
 373
 374	req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
 375	mlli_params->curr_pool = NULL;
 376	sg_data.num_of_buffers = 0;
 377
 378	/* Map IV buffer */
 379	if (ivsize) {
 380		dump_byte_array("iv", info, ivsize);
 381		req_ctx->gen_ctx.iv_dma_addr =
 382			dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
 
 
 
 383		if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
 384			dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 385				ivsize, info);
 386			return -ENOMEM;
 387		}
 388		dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 389			ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
 390	} else {
 391		req_ctx->gen_ctx.iv_dma_addr = 0;
 392	}
 393
 394	/* Map the src SGL */
 395	rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
 396		       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
 397	if (rc)
 
 398		goto cipher_exit;
 
 399	if (mapped_nents > 1)
 400		req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 401
 402	if (src == dst) {
 403		/* Handle inplace operation */
 404		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 405			req_ctx->out_nents = 0;
 406			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 407					nbytes, 0, true,
 408					&req_ctx->in_mlli_nents);
 409		}
 410	} else {
 411		/* Map the dst sg */
 412		rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
 413			       &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
 414			       &dummy, &mapped_nents);
 415		if (rc)
 416			goto cipher_exit;
 
 417		if (mapped_nents > 1)
 418			req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 419
 420		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 421			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 422					nbytes, 0, true,
 423					&req_ctx->in_mlli_nents);
 424			cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
 425					nbytes, 0, true,
 426					&req_ctx->out_mlli_nents);
 427		}
 428	}
 429
 430	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 431		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
 432		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
 433		if (rc)
 434			goto cipher_exit;
 435	}
 436
 437	dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
 438		cc_dma_buf_type(req_ctx->dma_buf_type));
 439
 440	return 0;
 441
 442cipher_exit:
 443	cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
 444	return rc;
 445}
 446
 447void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 448{
 449	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 450	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 
 451	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
 
 
 
 452
 453	if (areq_ctx->mac_buf_dma_addr) {
 454		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
 455				 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
 456	}
 457
 458	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
 459		if (areq_ctx->hkey_dma_addr) {
 460			dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
 461					 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
 462		}
 463
 464		if (areq_ctx->gcm_block_len_dma_addr) {
 465			dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
 466					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 467		}
 468
 469		if (areq_ctx->gcm_iv_inc1_dma_addr) {
 470			dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
 471					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 472		}
 473
 474		if (areq_ctx->gcm_iv_inc2_dma_addr) {
 475			dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
 476					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 477		}
 478	}
 479
 480	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 481		if (areq_ctx->ccm_iv0_dma_addr) {
 482			dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
 483					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 484		}
 485
 486		dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
 487	}
 488	if (areq_ctx->gen_ctx.iv_dma_addr) {
 489		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
 490				 hw_iv_size, DMA_BIDIRECTIONAL);
 491		kfree_sensitive(areq_ctx->gen_ctx.iv);
 492	}
 493
 494	/* Release pool */
 495	if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
 496	     areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
 497	    (areq_ctx->mlli_params.mlli_virt_addr)) {
 498		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
 499			&areq_ctx->mlli_params.mlli_dma_addr,
 500			areq_ctx->mlli_params.mlli_virt_addr);
 501		dma_pool_free(areq_ctx->mlli_params.curr_pool,
 502			      areq_ctx->mlli_params.mlli_virt_addr,
 503			      areq_ctx->mlli_params.mlli_dma_addr);
 504	}
 505
 506	dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
 507		sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
 508		areq_ctx->assoclen, req->cryptlen);
 509
 510	dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
 
 
 
 
 
 
 
 511		     DMA_BIDIRECTIONAL);
 512	if (req->src != req->dst) {
 513		dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
 514			sg_virt(req->dst));
 515		dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
 
 
 516			     DMA_BIDIRECTIONAL);
 517	}
 518	if (drvdata->coherent &&
 519	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
 520	    req->src == req->dst) {
 521		/* copy back mac from temporary location to deal with possible
 522		 * data memory overriding that caused by cache coherence
 523		 * problem.
 524		 */
 525		cc_copy_mac(dev, req, CC_SG_FROM_BUF);
 526	}
 527}
 528
 529static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
 530			   u32 last_entry_data_size)
 531{
 532	return ((sgl_nents > 1) && (last_entry_data_size < authsize));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 533}
 534
 535static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
 536			    struct aead_request *req,
 537			    struct buffer_array *sg_data,
 538			    bool is_last, bool do_chain)
 539{
 540	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 541	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 542	struct device *dev = drvdata_to_dev(drvdata);
 543	gfp_t flags = cc_gfp_flags(&req->base);
 544	int rc = 0;
 545
 546	if (!req->iv) {
 547		areq_ctx->gen_ctx.iv_dma_addr = 0;
 548		areq_ctx->gen_ctx.iv = NULL;
 549		goto chain_iv_exit;
 550	}
 551
 552	areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
 553	if (!areq_ctx->gen_ctx.iv)
 554		return -ENOMEM;
 555
 556	areq_ctx->gen_ctx.iv_dma_addr =
 557		dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
 558			       DMA_BIDIRECTIONAL);
 559	if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
 560		dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 561			hw_iv_size, req->iv);
 562		kfree_sensitive(areq_ctx->gen_ctx.iv);
 563		areq_ctx->gen_ctx.iv = NULL;
 564		rc = -ENOMEM;
 565		goto chain_iv_exit;
 566	}
 567
 568	dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 569		hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
 
 
 
 
 
 
 
 
 
 
 
 
 570
 571chain_iv_exit:
 572	return rc;
 573}
 574
 575static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
 576			       struct aead_request *req,
 577			       struct buffer_array *sg_data,
 578			       bool is_last, bool do_chain)
 579{
 580	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 581	int rc = 0;
 582	int mapped_nents = 0;
 
 
 
 
 583	struct device *dev = drvdata_to_dev(drvdata);
 584
 
 
 
 585	if (!sg_data) {
 586		rc = -EINVAL;
 587		goto chain_assoc_exit;
 588	}
 589
 590	if (areq_ctx->assoclen == 0) {
 591		areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
 592		areq_ctx->assoc.nents = 0;
 593		areq_ctx->assoc.mlli_nents = 0;
 594		dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
 595			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 596			areq_ctx->assoc.nents);
 597		goto chain_assoc_exit;
 598	}
 599
 600	mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen);
 601	if (mapped_nents < 0)
 602		return mapped_nents;
 603
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 604	if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 605		dev_err(dev, "Too many fragments. current %d max %d\n",
 606			mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 607		return -ENOMEM;
 608	}
 609	areq_ctx->assoc.nents = mapped_nents;
 610
 611	/* in CCM case we have additional entry for
 612	 * ccm header configurations
 613	 */
 614	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 615		if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 616			dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
 617				(areq_ctx->assoc.nents + 1),
 618				LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 619			rc = -ENOMEM;
 620			goto chain_assoc_exit;
 621		}
 622	}
 623
 624	if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
 625		areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
 626	else
 627		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 628
 629	if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 630		dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
 631			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 632			areq_ctx->assoc.nents);
 633		cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
 634				areq_ctx->assoclen, 0, is_last,
 635				&areq_ctx->assoc.mlli_nents);
 636		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 637	}
 638
 639chain_assoc_exit:
 640	return rc;
 641}
 642
 643static void cc_prepare_aead_data_dlli(struct aead_request *req,
 644				      u32 *src_last_bytes, u32 *dst_last_bytes)
 645{
 646	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 647	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 648	unsigned int authsize = areq_ctx->req_authsize;
 649	struct scatterlist *sg;
 650	ssize_t offset;
 651
 652	areq_ctx->is_icv_fragmented = false;
 653
 654	if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 655		sg = areq_ctx->src_sgl;
 656		offset = *src_last_bytes - authsize;
 
 
 
 
 
 
 
 
 657	} else {
 658		sg = areq_ctx->dst_sgl;
 659		offset = *dst_last_bytes - authsize;
 
 
 
 660	}
 661
 662	areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
 663	areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
 664}
 665
 666static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
 667				      struct aead_request *req,
 668				      struct buffer_array *sg_data,
 669				      u32 *src_last_bytes, u32 *dst_last_bytes,
 670				      bool is_last_table)
 671{
 672	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 673	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 674	unsigned int authsize = areq_ctx->req_authsize;
 
 675	struct device *dev = drvdata_to_dev(drvdata);
 676	struct scatterlist *sg;
 677
 678	if (req->src == req->dst) {
 679		/*INPLACE*/
 680		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 681				areq_ctx->src_sgl, areq_ctx->cryptlen,
 682				areq_ctx->src_offset, is_last_table,
 683				&areq_ctx->src.mlli_nents);
 684
 685		areq_ctx->is_icv_fragmented =
 686			cc_is_icv_frag(areq_ctx->src.nents, authsize,
 687				       *src_last_bytes);
 
 
 
 
 
 688
 689		if (areq_ctx->is_icv_fragmented) {
 690			/* Backup happens only when ICV is fragmented, ICV
 691			 * verification is made by CPU compare in order to
 692			 * simplify MAC verification upon request completion
 693			 */
 694			if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 695				/* In coherent platforms (e.g. ACP)
 696				 * already copying ICV for any
 697				 * INPLACE-DECRYPT operation, hence
 698				 * we must neglect this code.
 699				 */
 700				if (!drvdata->coherent)
 701					cc_copy_mac(dev, req, CC_SG_TO_BUF);
 702
 703				areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 704			} else {
 705				areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 706				areq_ctx->icv_dma_addr =
 707					areq_ctx->mac_buf_dma_addr;
 708			}
 709		} else { /* Contig. ICV */
 710			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 711			/*Should hanlde if the sg is not contig.*/
 712			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 713				(*src_last_bytes - authsize);
 714			areq_ctx->icv_virt_addr = sg_virt(sg) +
 715				(*src_last_bytes - authsize);
 716		}
 717
 718	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 719		/*NON-INPLACE and DECRYPT*/
 720		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 721				areq_ctx->src_sgl, areq_ctx->cryptlen,
 722				areq_ctx->src_offset, is_last_table,
 723				&areq_ctx->src.mlli_nents);
 724		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 725				areq_ctx->dst_sgl, areq_ctx->cryptlen,
 726				areq_ctx->dst_offset, is_last_table,
 727				&areq_ctx->dst.mlli_nents);
 728
 729		areq_ctx->is_icv_fragmented =
 730			cc_is_icv_frag(areq_ctx->src.nents, authsize,
 731				       *src_last_bytes);
 732		/* Backup happens only when ICV is fragmented, ICV
 
 
 
 
 733
 
 734		 * verification is made by CPU compare in order to simplify
 735		 * MAC verification upon request completion
 736		 */
 737		if (areq_ctx->is_icv_fragmented) {
 738			cc_copy_mac(dev, req, CC_SG_TO_BUF);
 739			areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 740
 741		} else { /* Contig. ICV */
 742			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 743			/*Should hanlde if the sg is not contig.*/
 744			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 745				(*src_last_bytes - authsize);
 746			areq_ctx->icv_virt_addr = sg_virt(sg) +
 747				(*src_last_bytes - authsize);
 748		}
 749
 750	} else {
 751		/*NON-INPLACE and ENCRYPT*/
 752		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 753				areq_ctx->dst_sgl, areq_ctx->cryptlen,
 754				areq_ctx->dst_offset, is_last_table,
 755				&areq_ctx->dst.mlli_nents);
 756		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 757				areq_ctx->src_sgl, areq_ctx->cryptlen,
 758				areq_ctx->src_offset, is_last_table,
 759				&areq_ctx->src.mlli_nents);
 760
 761		areq_ctx->is_icv_fragmented =
 762			cc_is_icv_frag(areq_ctx->dst.nents, authsize,
 763				       *dst_last_bytes);
 
 
 
 
 
 764
 765		if (!areq_ctx->is_icv_fragmented) {
 766			sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
 767			/* Contig. ICV */
 768			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 769				(*dst_last_bytes - authsize);
 770			areq_ctx->icv_virt_addr = sg_virt(sg) +
 771				(*dst_last_bytes - authsize);
 772		} else {
 773			areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
 774			areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 775		}
 776	}
 
 
 
 777}
 778
 779static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 780			      struct aead_request *req,
 781			      struct buffer_array *sg_data,
 782			      bool is_last_table, bool do_chain)
 783{
 784	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 785	struct device *dev = drvdata_to_dev(drvdata);
 786	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 787	unsigned int authsize = areq_ctx->req_authsize;
 788	unsigned int src_last_bytes = 0, dst_last_bytes = 0;
 789	int rc = 0;
 790	u32 src_mapped_nents = 0, dst_mapped_nents = 0;
 791	u32 offset = 0;
 792	/* non-inplace mode */
 793	unsigned int size_for_map = req->assoclen + req->cryptlen;
 
 794	u32 sg_index = 0;
 
 
 795	u32 size_to_skip = req->assoclen;
 796	struct scatterlist *sgl;
 
 
 797
 798	offset = size_to_skip;
 799
 800	if (!sg_data)
 801		return -EINVAL;
 802
 803	areq_ctx->src_sgl = req->src;
 804	areq_ctx->dst_sgl = req->dst;
 805
 
 
 
 806	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 807			authsize : 0;
 808	src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
 809					    &src_last_bytes);
 810	sg_index = areq_ctx->src_sgl->length;
 811	//check where the data starts
 812	while (src_mapped_nents && (sg_index <= size_to_skip)) {
 813		src_mapped_nents--;
 814		offset -= areq_ctx->src_sgl->length;
 815		sgl = sg_next(areq_ctx->src_sgl);
 816		if (!sgl)
 817			break;
 818		areq_ctx->src_sgl = sgl;
 
 
 819		sg_index += areq_ctx->src_sgl->length;
 
 820	}
 821	if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 822		dev_err(dev, "Too many fragments. current %d max %d\n",
 823			src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 824		return -ENOMEM;
 825	}
 826
 827	areq_ctx->src.nents = src_mapped_nents;
 828
 829	areq_ctx->src_offset = offset;
 830
 831	if (req->src != req->dst) {
 832		size_for_map = req->assoclen + req->cryptlen;
 833
 834		if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
 835			size_for_map += authsize;
 836		else
 837			size_for_map -= authsize;
 838
 839		rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
 840			       &areq_ctx->dst.mapped_nents,
 841			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
 842			       &dst_mapped_nents);
 843		if (rc)
 
 844			goto chain_data_exit;
 
 845	}
 846
 847	dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
 848					    &dst_last_bytes);
 849	sg_index = areq_ctx->dst_sgl->length;
 850	offset = size_to_skip;
 851
 852	//check where the data starts
 853	while (dst_mapped_nents && sg_index <= size_to_skip) {
 854		dst_mapped_nents--;
 855		offset -= areq_ctx->dst_sgl->length;
 856		sgl = sg_next(areq_ctx->dst_sgl);
 857		if (!sgl)
 858			break;
 859		areq_ctx->dst_sgl = sgl;
 
 
 860		sg_index += areq_ctx->dst_sgl->length;
 
 861	}
 862	if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 863		dev_err(dev, "Too many fragments. current %d max %d\n",
 864			dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 865		return -ENOMEM;
 866	}
 867	areq_ctx->dst.nents = dst_mapped_nents;
 868	areq_ctx->dst_offset = offset;
 869	if (src_mapped_nents > 1 ||
 870	    dst_mapped_nents  > 1 ||
 871	    do_chain) {
 872		areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
 873		cc_prepare_aead_data_mlli(drvdata, req, sg_data,
 874					  &src_last_bytes, &dst_last_bytes,
 875					  is_last_table);
 876	} else {
 877		areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
 878		cc_prepare_aead_data_dlli(req, &src_last_bytes,
 879					  &dst_last_bytes);
 880	}
 881
 882chain_data_exit:
 883	return rc;
 884}
 885
 886static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
 887				      struct aead_request *req)
 888{
 889	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 890	u32 curr_mlli_size = 0;
 891
 892	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 893		areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
 894		curr_mlli_size = areq_ctx->assoc.mlli_nents *
 895						LLI_ENTRY_BYTE_SIZE;
 896	}
 897
 898	if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
 899		/*Inplace case dst nents equal to src nents*/
 900		if (req->src == req->dst) {
 901			areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
 902			areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
 903								curr_mlli_size;
 904			areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
 905			if (!areq_ctx->is_single_pass)
 906				areq_ctx->assoc.mlli_nents +=
 907					areq_ctx->src.mlli_nents;
 908		} else {
 909			if (areq_ctx->gen_ctx.op_type ==
 910					DRV_CRYPTO_DIRECTION_DECRYPT) {
 911				areq_ctx->src.sram_addr =
 912						drvdata->mlli_sram_addr +
 913								curr_mlli_size;
 914				areq_ctx->dst.sram_addr =
 915						areq_ctx->src.sram_addr +
 916						areq_ctx->src.mlli_nents *
 917						LLI_ENTRY_BYTE_SIZE;
 918				if (!areq_ctx->is_single_pass)
 919					areq_ctx->assoc.mlli_nents +=
 920						areq_ctx->src.mlli_nents;
 921			} else {
 922				areq_ctx->dst.sram_addr =
 923						drvdata->mlli_sram_addr +
 924								curr_mlli_size;
 925				areq_ctx->src.sram_addr =
 926						areq_ctx->dst.sram_addr +
 927						areq_ctx->dst.mlli_nents *
 928						LLI_ENTRY_BYTE_SIZE;
 929				if (!areq_ctx->is_single_pass)
 930					areq_ctx->assoc.mlli_nents +=
 931						areq_ctx->dst.mlli_nents;
 932			}
 933		}
 934	}
 935}
 936
 937int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
 938{
 939	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 940	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
 941	struct device *dev = drvdata_to_dev(drvdata);
 942	struct buffer_array sg_data;
 943	unsigned int authsize = areq_ctx->req_authsize;
 
 944	int rc = 0;
 
 
 945	dma_addr_t dma_addr;
 946	u32 mapped_nents = 0;
 947	u32 dummy = 0; /*used for the assoc data fragments */
 948	u32 size_to_map;
 949	gfp_t flags = cc_gfp_flags(&req->base);
 950
 951	mlli_params->curr_pool = NULL;
 952	sg_data.num_of_buffers = 0;
 953
 954	/* copy mac to a temporary location to deal with possible
 955	 * data memory overriding that caused by cache coherence problem.
 956	 */
 957	if (drvdata->coherent &&
 958	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
 959	    req->src == req->dst)
 960		cc_copy_mac(dev, req, CC_SG_TO_BUF);
 961
 962	/* cacluate the size for cipher remove ICV in decrypt*/
 963	areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
 964				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 965				req->cryptlen :
 966				(req->cryptlen - authsize);
 967
 968	dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
 969				  DMA_BIDIRECTIONAL);
 970	if (dma_mapping_error(dev, dma_addr)) {
 971		dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
 972			MAX_MAC_SIZE, areq_ctx->mac_buf);
 973		rc = -ENOMEM;
 974		goto aead_map_failure;
 975	}
 976	areq_ctx->mac_buf_dma_addr = dma_addr;
 977
 978	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 979		void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
 980
 981		dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
 982					  DMA_TO_DEVICE);
 983
 984		if (dma_mapping_error(dev, dma_addr)) {
 985			dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
 986				AES_BLOCK_SIZE, addr);
 987			areq_ctx->ccm_iv0_dma_addr = 0;
 988			rc = -ENOMEM;
 989			goto aead_map_failure;
 990		}
 991		areq_ctx->ccm_iv0_dma_addr = dma_addr;
 992
 993		rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
 994					  &sg_data, areq_ctx->assoclen);
 995		if (rc)
 996			goto aead_map_failure;
 
 997	}
 998
 999	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1000		dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1001					  DMA_BIDIRECTIONAL);
1002		if (dma_mapping_error(dev, dma_addr)) {
1003			dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1004				AES_BLOCK_SIZE, areq_ctx->hkey);
1005			rc = -ENOMEM;
1006			goto aead_map_failure;
1007		}
1008		areq_ctx->hkey_dma_addr = dma_addr;
1009
1010		dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1011					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1012		if (dma_mapping_error(dev, dma_addr)) {
1013			dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1014				AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1015			rc = -ENOMEM;
1016			goto aead_map_failure;
1017		}
1018		areq_ctx->gcm_block_len_dma_addr = dma_addr;
1019
1020		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1021					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1022
1023		if (dma_mapping_error(dev, dma_addr)) {
1024			dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1025				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1026			areq_ctx->gcm_iv_inc1_dma_addr = 0;
1027			rc = -ENOMEM;
1028			goto aead_map_failure;
1029		}
1030		areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1031
1032		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1033					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1034
1035		if (dma_mapping_error(dev, dma_addr)) {
1036			dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1037				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1038			areq_ctx->gcm_iv_inc2_dma_addr = 0;
1039			rc = -ENOMEM;
1040			goto aead_map_failure;
1041		}
1042		areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1043	}
1044
1045	size_to_map = req->cryptlen + req->assoclen;
1046	/* If we do in-place encryption, we also need the auth tag */
1047	if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
1048	   (req->src == req->dst)) {
1049		size_to_map += authsize;
1050	}
1051
 
 
1052	rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
1053		       &areq_ctx->src.mapped_nents,
1054		       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1055			LLI_MAX_NUM_OF_DATA_ENTRIES),
1056		       &dummy, &mapped_nents);
1057	if (rc)
 
1058		goto aead_map_failure;
 
1059
1060	if (areq_ctx->is_single_pass) {
1061		/*
1062		 * Create MLLI table for:
1063		 *   (1) Assoc. data
1064		 *   (2) Src/Dst SGLs
1065		 *   Note: IV is contg. buffer (not an SGL)
1066		 */
1067		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1068		if (rc)
1069			goto aead_map_failure;
1070		rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1071		if (rc)
1072			goto aead_map_failure;
1073		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1074		if (rc)
1075			goto aead_map_failure;
1076	} else { /* DOUBLE-PASS flow */
1077		/*
1078		 * Prepare MLLI table(s) in this order:
1079		 *
1080		 * If ENCRYPT/DECRYPT (inplace):
1081		 *   (1) MLLI table for assoc
1082		 *   (2) IV entry (chained right after end of assoc)
1083		 *   (3) MLLI for src/dst (inplace operation)
1084		 *
1085		 * If ENCRYPT (non-inplace)
1086		 *   (1) MLLI table for assoc
1087		 *   (2) IV entry (chained right after end of assoc)
1088		 *   (3) MLLI for dst
1089		 *   (4) MLLI for src
1090		 *
1091		 * If DECRYPT (non-inplace)
1092		 *   (1) MLLI table for assoc
1093		 *   (2) IV entry (chained right after end of assoc)
1094		 *   (3) MLLI for src
1095		 *   (4) MLLI for dst
1096		 */
1097		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1098		if (rc)
1099			goto aead_map_failure;
1100		rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1101		if (rc)
1102			goto aead_map_failure;
1103		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1104		if (rc)
1105			goto aead_map_failure;
1106	}
1107
1108	/* Mlli support -start building the MLLI according to the above
1109	 * results
1110	 */
1111	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1112	    areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1113		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1114		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1115		if (rc)
1116			goto aead_map_failure;
1117
1118		cc_update_aead_mlli_nents(drvdata, req);
1119		dev_dbg(dev, "assoc params mn %d\n",
1120			areq_ctx->assoc.mlli_nents);
1121		dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1122		dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1123	}
1124	return 0;
1125
1126aead_map_failure:
1127	cc_unmap_aead_request(dev, req);
1128	return rc;
1129}
1130
1131int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1132			      struct scatterlist *src, unsigned int nbytes,
1133			      bool do_update, gfp_t flags)
1134{
1135	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1136	struct device *dev = drvdata_to_dev(drvdata);
1137	u8 *curr_buff = cc_hash_buf(areq_ctx);
1138	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1139	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1140	struct buffer_array sg_data;
1141	int rc = 0;
1142	u32 dummy = 0;
1143	u32 mapped_nents = 0;
1144
1145	dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1146		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1147	/* Init the type of the dma buffer */
1148	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1149	mlli_params->curr_pool = NULL;
1150	sg_data.num_of_buffers = 0;
1151	areq_ctx->in_nents = 0;
1152
1153	if (nbytes == 0 && *curr_buff_cnt == 0) {
1154		/* nothing to do */
1155		return 0;
1156	}
1157
 
1158	/* map the previous buffer */
1159	if (*curr_buff_cnt) {
1160		rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1161				     &sg_data);
1162		if (rc)
1163			return rc;
1164	}
1165
1166	if (src && nbytes > 0 && do_update) {
1167		rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1168			       &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1169			       &dummy, &mapped_nents);
1170		if (rc)
1171			goto unmap_curr_buff;
 
1172		if (src && mapped_nents == 1 &&
1173		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1174			memcpy(areq_ctx->buff_sg, src,
1175			       sizeof(struct scatterlist));
1176			areq_ctx->buff_sg->length = nbytes;
1177			areq_ctx->curr_sg = areq_ctx->buff_sg;
1178			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1179		} else {
1180			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1181		}
1182	}
1183
1184	/*build mlli */
1185	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1186		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1187		/* add the src data to the sg_data */
1188		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1189				0, true, &areq_ctx->mlli_nents);
1190		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1191		if (rc)
1192			goto fail_unmap_din;
1193	}
1194	/* change the buffer index for the unmap function */
1195	areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1196	dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1197		cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1198	return 0;
1199
1200fail_unmap_din:
1201	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1202
1203unmap_curr_buff:
1204	if (*curr_buff_cnt)
1205		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1206
1207	return rc;
1208}
1209
1210int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1211			       struct scatterlist *src, unsigned int nbytes,
1212			       unsigned int block_size, gfp_t flags)
1213{
1214	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1215	struct device *dev = drvdata_to_dev(drvdata);
1216	u8 *curr_buff = cc_hash_buf(areq_ctx);
1217	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1218	u8 *next_buff = cc_next_buf(areq_ctx);
1219	u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1220	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1221	unsigned int update_data_len;
1222	u32 total_in_len = nbytes + *curr_buff_cnt;
1223	struct buffer_array sg_data;
 
1224	unsigned int swap_index = 0;
1225	int rc = 0;
1226	u32 dummy = 0;
1227	u32 mapped_nents = 0;
1228
1229	dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1230		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1231	/* Init the type of the dma buffer */
1232	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1233	mlli_params->curr_pool = NULL;
1234	areq_ctx->curr_sg = NULL;
1235	sg_data.num_of_buffers = 0;
1236	areq_ctx->in_nents = 0;
1237
1238	if (total_in_len < block_size) {
1239		dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1240			curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1241		areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
 
1242		sg_copy_to_buffer(src, areq_ctx->in_nents,
1243				  &curr_buff[*curr_buff_cnt], nbytes);
1244		*curr_buff_cnt += nbytes;
1245		return 1;
1246	}
1247
1248	/* Calculate the residue size*/
1249	*next_buff_cnt = total_in_len & (block_size - 1);
1250	/* update data len */
1251	update_data_len = total_in_len - *next_buff_cnt;
1252
1253	dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1254		*next_buff_cnt, update_data_len);
1255
1256	/* Copy the new residue to next buffer */
1257	if (*next_buff_cnt) {
1258		dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1259			next_buff, (update_data_len - *curr_buff_cnt),
1260			*next_buff_cnt);
1261		cc_copy_sg_portion(dev, next_buff, src,
1262				   (update_data_len - *curr_buff_cnt),
1263				   nbytes, CC_SG_TO_BUF);
1264		/* change the buffer index for next operation */
1265		swap_index = 1;
1266	}
1267
1268	if (*curr_buff_cnt) {
1269		rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1270				     &sg_data);
1271		if (rc)
1272			return rc;
1273		/* change the buffer index for next operation */
1274		swap_index = 1;
1275	}
1276
1277	if (update_data_len > *curr_buff_cnt) {
1278		rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1279			       DMA_TO_DEVICE, &areq_ctx->in_nents,
1280			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1281			       &mapped_nents);
1282		if (rc)
1283			goto unmap_curr_buff;
 
1284		if (mapped_nents == 1 &&
1285		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1286			/* only one entry in the SG and no previous data */
1287			memcpy(areq_ctx->buff_sg, src,
1288			       sizeof(struct scatterlist));
1289			areq_ctx->buff_sg->length = update_data_len;
1290			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1291			areq_ctx->curr_sg = areq_ctx->buff_sg;
1292		} else {
1293			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1294		}
1295	}
1296
1297	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1298		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1299		/* add the src data to the sg_data */
1300		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1301				(update_data_len - *curr_buff_cnt), 0, true,
1302				&areq_ctx->mlli_nents);
1303		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1304		if (rc)
1305			goto fail_unmap_din;
1306	}
1307	areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1308
1309	return 0;
1310
1311fail_unmap_din:
1312	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1313
1314unmap_curr_buff:
1315	if (*curr_buff_cnt)
1316		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1317
1318	return rc;
1319}
1320
1321void cc_unmap_hash_request(struct device *dev, void *ctx,
1322			   struct scatterlist *src, bool do_revert)
1323{
1324	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1325	u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1326
1327	/*In case a pool was set, a table was
1328	 *allocated and should be released
1329	 */
1330	if (areq_ctx->mlli_params.curr_pool) {
1331		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1332			&areq_ctx->mlli_params.mlli_dma_addr,
1333			areq_ctx->mlli_params.mlli_virt_addr);
1334		dma_pool_free(areq_ctx->mlli_params.curr_pool,
1335			      areq_ctx->mlli_params.mlli_virt_addr,
1336			      areq_ctx->mlli_params.mlli_dma_addr);
1337	}
1338
1339	if (src && areq_ctx->in_nents) {
1340		dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1341			sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1342		dma_unmap_sg(dev, src,
1343			     areq_ctx->in_nents, DMA_TO_DEVICE);
1344	}
1345
1346	if (*prev_len) {
1347		dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1348			sg_virt(areq_ctx->buff_sg),
1349			&sg_dma_address(areq_ctx->buff_sg),
1350			sg_dma_len(areq_ctx->buff_sg));
1351		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1352		if (!do_revert) {
1353			/* clean the previous data length for update
1354			 * operation
1355			 */
1356			*prev_len = 0;
1357		} else {
1358			areq_ctx->buff_index ^= 1;
1359		}
1360	}
1361}
1362
1363int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1364{
 
1365	struct device *dev = drvdata_to_dev(drvdata);
1366
1367	drvdata->mlli_buffs_pool =
 
 
 
 
 
 
1368		dma_pool_create("dx_single_mlli_tables", dev,
1369				MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1370				LLI_ENTRY_BYTE_SIZE,
1371				MLLI_TABLE_MIN_ALIGNMENT, 0);
1372
1373	if (!drvdata->mlli_buffs_pool)
1374		return -ENOMEM;
1375
1376	return 0;
 
 
 
 
1377}
1378
1379int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1380{
1381	dma_pool_destroy(drvdata->mlli_buffs_pool);
 
 
 
 
 
 
1382	return 0;
1383}
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
   3
   4#include <crypto/internal/aead.h>
   5#include <crypto/authenc.h>
   6#include <crypto/scatterwalk.h>
   7#include <linux/dmapool.h>
   8#include <linux/dma-mapping.h>
   9
  10#include "cc_buffer_mgr.h"
  11#include "cc_lli_defs.h"
  12#include "cc_cipher.h"
  13#include "cc_hash.h"
  14#include "cc_aead.h"
  15
  16enum dma_buffer_type {
  17	DMA_NULL_TYPE = -1,
  18	DMA_SGL_TYPE = 1,
  19	DMA_BUFF_TYPE = 2,
  20};
  21
  22struct buff_mgr_handle {
  23	struct dma_pool *mlli_buffs_pool;
  24};
  25
  26union buffer_array_entry {
  27	struct scatterlist *sgl;
  28	dma_addr_t buffer_dma;
  29};
  30
  31struct buffer_array {
  32	unsigned int num_of_buffers;
  33	union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
  34	unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
  35	int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  36	int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
  37	enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
  38	bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
  39	u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  40};
  41
  42static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
  43{
  44	switch (type) {
  45	case CC_DMA_BUF_NULL:
  46		return "BUF_NULL";
  47	case CC_DMA_BUF_DLLI:
  48		return "BUF_DLLI";
  49	case CC_DMA_BUF_MLLI:
  50		return "BUF_MLLI";
  51	default:
  52		return "BUF_INVALID";
  53	}
  54}
  55
  56/**
  57 * cc_copy_mac() - Copy MAC to temporary location
  58 *
  59 * @dev: device object
  60 * @req: aead request object
  61 * @dir: [IN] copy from/to sgl
  62 */
  63static void cc_copy_mac(struct device *dev, struct aead_request *req,
  64			enum cc_sg_cpy_direct dir)
  65{
  66	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  67	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  68	u32 skip = req->assoclen + req->cryptlen;
  69
  70	if (areq_ctx->is_gcm4543)
  71		skip += crypto_aead_ivsize(tfm);
  72
  73	cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
  74			   (skip - areq_ctx->req_authsize), skip, dir);
  75}
  76
  77/**
  78 * cc_get_sgl_nents() - Get scatterlist number of entries.
  79 *
 
  80 * @sg_list: SG list
  81 * @nbytes: [IN] Total SGL data bytes.
  82 * @lbytes: [OUT] Returns the amount of bytes at the last entry
 
 
 
  83 */
  84static unsigned int cc_get_sgl_nents(struct device *dev,
  85				     struct scatterlist *sg_list,
  86				     unsigned int nbytes, u32 *lbytes,
  87				     bool *is_chained)
  88{
  89	unsigned int nents = 0;
  90
 
 
  91	while (nbytes && sg_list) {
  92		if (sg_list->length) {
  93			nents++;
  94			/* get the number of bytes in the last entry */
  95			*lbytes = nbytes;
  96			nbytes -= (sg_list->length > nbytes) ?
  97					nbytes : sg_list->length;
  98			sg_list = sg_next(sg_list);
  99		} else {
 100			sg_list = (struct scatterlist *)sg_page(sg_list);
 101			if (is_chained)
 102				*is_chained = true;
 103		}
 104	}
 
 105	dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
 106	return nents;
 107}
 108
 109/**
 110 * cc_zero_sgl() - Zero scatter scatter list data.
 111 *
 112 * @sgl:
 113 */
 114void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
 115{
 116	struct scatterlist *current_sg = sgl;
 117	int sg_index = 0;
 118
 119	while (sg_index <= data_len) {
 120		if (!current_sg) {
 121			/* reached the end of the sgl --> just return back */
 122			return;
 123		}
 124		memset(sg_virt(current_sg), 0, current_sg->length);
 125		sg_index += current_sg->length;
 126		current_sg = sg_next(current_sg);
 127	}
 128}
 129
 130/**
 131 * cc_copy_sg_portion() - Copy scatter list data,
 132 * from to_skip to end, to dest and vice versa
 133 *
 134 * @dest:
 135 * @sg:
 136 * @to_skip:
 137 * @end:
 138 * @direct:
 
 
 139 */
 140void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
 141			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
 142{
 143	u32 nents, lbytes;
 144
 145	nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
 146	sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
 147		       (direct == CC_SG_TO_BUF));
 148}
 149
 150static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
 151				  u32 buff_size, u32 *curr_nents,
 152				  u32 **mlli_entry_pp)
 153{
 154	u32 *mlli_entry_p = *mlli_entry_pp;
 155	u32 new_nents;
 156
 157	/* Verify there is no memory overflow*/
 158	new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
 159	if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
 
 
 160		return -ENOMEM;
 
 161
 162	/*handle buffer longer than 64 kbytes */
 163	while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
 164		cc_lli_set_addr(mlli_entry_p, buff_dma);
 165		cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
 166		dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 167			*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 168			mlli_entry_p[LLI_WORD1_OFFSET]);
 169		buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
 170		buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
 171		mlli_entry_p = mlli_entry_p + 2;
 172		(*curr_nents)++;
 173	}
 174	/*Last entry */
 175	cc_lli_set_addr(mlli_entry_p, buff_dma);
 176	cc_lli_set_size(mlli_entry_p, buff_size);
 177	dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 178		*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 179		mlli_entry_p[LLI_WORD1_OFFSET]);
 180	mlli_entry_p = mlli_entry_p + 2;
 181	*mlli_entry_pp = mlli_entry_p;
 182	(*curr_nents)++;
 183	return 0;
 184}
 185
 186static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
 187				u32 sgl_data_len, u32 sgl_offset,
 188				u32 *curr_nents, u32 **mlli_entry_pp)
 189{
 190	struct scatterlist *curr_sgl = sgl;
 191	u32 *mlli_entry_p = *mlli_entry_pp;
 192	s32 rc = 0;
 193
 194	for ( ; (curr_sgl && sgl_data_len);
 195	      curr_sgl = sg_next(curr_sgl)) {
 196		u32 entry_data_len =
 197			(sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
 198				sg_dma_len(curr_sgl) - sgl_offset :
 199				sgl_data_len;
 200		sgl_data_len -= entry_data_len;
 201		rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
 202					    sgl_offset, entry_data_len,
 203					    curr_nents, &mlli_entry_p);
 204		if (rc)
 205			return rc;
 206
 207		sgl_offset = 0;
 208	}
 209	*mlli_entry_pp = mlli_entry_p;
 210	return 0;
 211}
 212
 213static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
 214			    struct mlli_params *mlli_params, gfp_t flags)
 215{
 216	u32 *mlli_p;
 217	u32 total_nents = 0, prev_total_nents = 0;
 218	int rc = 0, i;
 219
 220	dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
 221
 222	/* Allocate memory from the pointed pool */
 223	mlli_params->mlli_virt_addr =
 224		dma_pool_alloc(mlli_params->curr_pool, flags,
 225			       &mlli_params->mlli_dma_addr);
 226	if (!mlli_params->mlli_virt_addr) {
 227		dev_err(dev, "dma_pool_alloc() failed\n");
 228		rc = -ENOMEM;
 229		goto build_mlli_exit;
 230	}
 231	/* Point to start of MLLI */
 232	mlli_p = (u32 *)mlli_params->mlli_virt_addr;
 233	/* go over all SG's and link it to one MLLI table */
 234	for (i = 0; i < sg_data->num_of_buffers; i++) {
 235		union buffer_array_entry *entry = &sg_data->entry[i];
 236		u32 tot_len = sg_data->total_data_len[i];
 237		u32 offset = sg_data->offset[i];
 238
 239		if (sg_data->type[i] == DMA_SGL_TYPE)
 240			rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
 241						  offset, &total_nents,
 242						  &mlli_p);
 243		else /*DMA_BUFF_TYPE*/
 244			rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
 245						    tot_len, &total_nents,
 246						    &mlli_p);
 247		if (rc)
 248			return rc;
 249
 250		/* set last bit in the current table */
 251		if (sg_data->mlli_nents[i]) {
 252			/*Calculate the current MLLI table length for the
 253			 *length field in the descriptor
 254			 */
 255			*sg_data->mlli_nents[i] +=
 256				(total_nents - prev_total_nents);
 257			prev_total_nents = total_nents;
 258		}
 259	}
 260
 261	/* Set MLLI size for the bypass operation */
 262	mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
 263
 264	dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
 265		mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
 266		mlli_params->mlli_len);
 267
 268build_mlli_exit:
 269	return rc;
 270}
 271
 272static void cc_add_buffer_entry(struct device *dev,
 273				struct buffer_array *sgl_data,
 274				dma_addr_t buffer_dma, unsigned int buffer_len,
 275				bool is_last_entry, u32 *mlli_nents)
 276{
 277	unsigned int index = sgl_data->num_of_buffers;
 278
 279	dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
 280		index, &buffer_dma, buffer_len, is_last_entry);
 281	sgl_data->nents[index] = 1;
 282	sgl_data->entry[index].buffer_dma = buffer_dma;
 283	sgl_data->offset[index] = 0;
 284	sgl_data->total_data_len[index] = buffer_len;
 285	sgl_data->type[index] = DMA_BUFF_TYPE;
 286	sgl_data->is_last[index] = is_last_entry;
 287	sgl_data->mlli_nents[index] = mlli_nents;
 288	if (sgl_data->mlli_nents[index])
 289		*sgl_data->mlli_nents[index] = 0;
 290	sgl_data->num_of_buffers++;
 291}
 292
 293static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
 294			    unsigned int nents, struct scatterlist *sgl,
 295			    unsigned int data_len, unsigned int data_offset,
 296			    bool is_last_table, u32 *mlli_nents)
 297{
 298	unsigned int index = sgl_data->num_of_buffers;
 299
 300	dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
 301		index, nents, sgl, data_len, is_last_table);
 302	sgl_data->nents[index] = nents;
 303	sgl_data->entry[index].sgl = sgl;
 304	sgl_data->offset[index] = data_offset;
 305	sgl_data->total_data_len[index] = data_len;
 306	sgl_data->type[index] = DMA_SGL_TYPE;
 307	sgl_data->is_last[index] = is_last_table;
 308	sgl_data->mlli_nents[index] = mlli_nents;
 309	if (sgl_data->mlli_nents[index])
 310		*sgl_data->mlli_nents[index] = 0;
 311	sgl_data->num_of_buffers++;
 312}
 313
 314static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
 315			 enum dma_data_direction direction)
 
 316{
 317	u32 i, j;
 318	struct scatterlist *l_sg = sg;
 319
 320	for (i = 0; i < nents; i++) {
 321		if (!l_sg)
 322			break;
 323		if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
 324			dev_err(dev, "dma_map_page() sg buffer failed\n");
 325			goto err;
 326		}
 327		l_sg = sg_next(l_sg);
 328	}
 329	return nents;
 330
 331err:
 332	/* Restore mapped parts */
 333	for (j = 0; j < i; j++) {
 334		if (!sg)
 335			break;
 336		dma_unmap_sg(dev, sg, 1, direction);
 337		sg = sg_next(sg);
 338	}
 339	return 0;
 340}
 341
 342static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 343		     unsigned int nbytes, int direction, u32 *nents,
 344		     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
 345{
 346	bool is_chained = false;
 347
 348	if (sg_is_last(sg)) {
 349		/* One entry only case -set to DLLI */
 350		if (dma_map_sg(dev, sg, 1, direction) != 1) {
 351			dev_err(dev, "dma_map_sg() single buffer failed\n");
 352			return -ENOMEM;
 353		}
 354		dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 355			&sg_dma_address(sg), sg_page(sg), sg_virt(sg),
 356			sg->offset, sg->length);
 357		*lbytes = nbytes;
 358		*nents = 1;
 359		*mapped_nents = 1;
 360	} else {  /*sg_is_last*/
 361		*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
 362					  &is_chained);
 363		if (*nents > max_sg_nents) {
 364			*nents = 0;
 365			dev_err(dev, "Too many fragments. current %d max %d\n",
 366				*nents, max_sg_nents);
 367			return -ENOMEM;
 368		}
 369		if (!is_chained) {
 370			/* In case of mmu the number of mapped nents might
 371			 * be changed from the original sgl nents
 372			 */
 373			*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
 374			if (*mapped_nents == 0) {
 375				*nents = 0;
 376				dev_err(dev, "dma_map_sg() sg buffer failed\n");
 377				return -ENOMEM;
 378			}
 379		} else {
 380			/*In this case the driver maps entry by entry so it
 381			 * must have the same nents before and after map
 382			 */
 383			*mapped_nents = cc_dma_map_sg(dev, sg, *nents,
 384						      direction);
 385			if (*mapped_nents != *nents) {
 386				*nents = *mapped_nents;
 387				dev_err(dev, "dma_map_sg() sg buffer failed\n");
 388				return -ENOMEM;
 389			}
 390		}
 391	}
 392
 393	return 0;
 394}
 395
 396static int
 397cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
 398		     u8 *config_data, struct buffer_array *sg_data,
 399		     unsigned int assoclen)
 400{
 401	dev_dbg(dev, " handle additional data config set to DLLI\n");
 402	/* create sg for the current buffer */
 403	sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
 404		    AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
 405	if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
 406		dev_err(dev, "dma_map_sg() config buffer failed\n");
 407		return -ENOMEM;
 408	}
 409	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 410		&sg_dma_address(&areq_ctx->ccm_adata_sg),
 411		sg_page(&areq_ctx->ccm_adata_sg),
 412		sg_virt(&areq_ctx->ccm_adata_sg),
 413		areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
 414	/* prepare for case of MLLI */
 415	if (assoclen > 0) {
 416		cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
 417				(AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
 418				0, false, NULL);
 419	}
 420	return 0;
 421}
 422
 423static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
 424			   u8 *curr_buff, u32 curr_buff_cnt,
 425			   struct buffer_array *sg_data)
 426{
 427	dev_dbg(dev, " handle curr buff %x set to   DLLI\n", curr_buff_cnt);
 428	/* create sg for the current buffer */
 429	sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
 430	if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
 431		dev_err(dev, "dma_map_sg() src buffer failed\n");
 432		return -ENOMEM;
 433	}
 434	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 435		&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
 436		sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
 437		areq_ctx->buff_sg->length);
 438	areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
 439	areq_ctx->curr_sg = areq_ctx->buff_sg;
 440	areq_ctx->in_nents = 0;
 441	/* prepare for case of MLLI */
 442	cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
 443			false, NULL);
 444	return 0;
 445}
 446
 447void cc_unmap_cipher_request(struct device *dev, void *ctx,
 448				unsigned int ivsize, struct scatterlist *src,
 449				struct scatterlist *dst)
 450{
 451	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 452
 453	if (req_ctx->gen_ctx.iv_dma_addr) {
 454		dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
 455			&req_ctx->gen_ctx.iv_dma_addr, ivsize);
 456		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
 457				 ivsize,
 458				 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
 459				 DMA_TO_DEVICE);
 460	}
 461	/* Release pool */
 462	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
 463	    req_ctx->mlli_params.mlli_virt_addr) {
 464		dma_pool_free(req_ctx->mlli_params.curr_pool,
 465			      req_ctx->mlli_params.mlli_virt_addr,
 466			      req_ctx->mlli_params.mlli_dma_addr);
 467	}
 468
 469	dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
 470	dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
 471
 472	if (src != dst) {
 473		dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
 474		dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
 475	}
 476}
 477
 478int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
 479			  unsigned int ivsize, unsigned int nbytes,
 480			  void *info, struct scatterlist *src,
 481			  struct scatterlist *dst, gfp_t flags)
 482{
 483	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 484	struct mlli_params *mlli_params = &req_ctx->mlli_params;
 485	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
 486	struct device *dev = drvdata_to_dev(drvdata);
 487	struct buffer_array sg_data;
 488	u32 dummy = 0;
 489	int rc = 0;
 490	u32 mapped_nents = 0;
 491
 492	req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
 493	mlli_params->curr_pool = NULL;
 494	sg_data.num_of_buffers = 0;
 495
 496	/* Map IV buffer */
 497	if (ivsize) {
 498		dump_byte_array("iv", (u8 *)info, ivsize);
 499		req_ctx->gen_ctx.iv_dma_addr =
 500			dma_map_single(dev, (void *)info,
 501				       ivsize,
 502				       req_ctx->is_giv ? DMA_BIDIRECTIONAL :
 503				       DMA_TO_DEVICE);
 504		if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
 505			dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 506				ivsize, info);
 507			return -ENOMEM;
 508		}
 509		dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 510			ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
 511	} else {
 512		req_ctx->gen_ctx.iv_dma_addr = 0;
 513	}
 514
 515	/* Map the src SGL */
 516	rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
 517		       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
 518	if (rc) {
 519		rc = -ENOMEM;
 520		goto cipher_exit;
 521	}
 522	if (mapped_nents > 1)
 523		req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 524
 525	if (src == dst) {
 526		/* Handle inplace operation */
 527		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 528			req_ctx->out_nents = 0;
 529			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 530					nbytes, 0, true,
 531					&req_ctx->in_mlli_nents);
 532		}
 533	} else {
 534		/* Map the dst sg */
 535		if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
 536			      &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
 537			      &dummy, &mapped_nents)) {
 538			rc = -ENOMEM;
 539			goto cipher_exit;
 540		}
 541		if (mapped_nents > 1)
 542			req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 543
 544		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 545			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 546					nbytes, 0, true,
 547					&req_ctx->in_mlli_nents);
 548			cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
 549					nbytes, 0, true,
 550					&req_ctx->out_mlli_nents);
 551		}
 552	}
 553
 554	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 555		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
 556		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
 557		if (rc)
 558			goto cipher_exit;
 559	}
 560
 561	dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
 562		cc_dma_buf_type(req_ctx->dma_buf_type));
 563
 564	return 0;
 565
 566cipher_exit:
 567	cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
 568	return rc;
 569}
 570
 571void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 572{
 573	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 574	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 575	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 576	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
 577	u32 dummy;
 578	bool chained;
 579	u32 size_to_unmap = 0;
 580
 581	if (areq_ctx->mac_buf_dma_addr) {
 582		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
 583				 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
 584	}
 585
 586	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
 587		if (areq_ctx->hkey_dma_addr) {
 588			dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
 589					 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
 590		}
 591
 592		if (areq_ctx->gcm_block_len_dma_addr) {
 593			dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
 594					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 595		}
 596
 597		if (areq_ctx->gcm_iv_inc1_dma_addr) {
 598			dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
 599					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 600		}
 601
 602		if (areq_ctx->gcm_iv_inc2_dma_addr) {
 603			dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
 604					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 605		}
 606	}
 607
 608	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 609		if (areq_ctx->ccm_iv0_dma_addr) {
 610			dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
 611					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 612		}
 613
 614		dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
 615	}
 616	if (areq_ctx->gen_ctx.iv_dma_addr) {
 617		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
 618				 hw_iv_size, DMA_BIDIRECTIONAL);
 
 619	}
 620
 621	/*In case a pool was set, a table was
 622	 *allocated and should be released
 623	 */
 624	if (areq_ctx->mlli_params.curr_pool) {
 625		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
 626			&areq_ctx->mlli_params.mlli_dma_addr,
 627			areq_ctx->mlli_params.mlli_virt_addr);
 628		dma_pool_free(areq_ctx->mlli_params.curr_pool,
 629			      areq_ctx->mlli_params.mlli_virt_addr,
 630			      areq_ctx->mlli_params.mlli_dma_addr);
 631	}
 632
 633	dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
 634		sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
 635		req->assoclen, req->cryptlen);
 636	size_to_unmap = req->assoclen + req->cryptlen;
 637	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
 638		size_to_unmap += areq_ctx->req_authsize;
 639	if (areq_ctx->is_gcm4543)
 640		size_to_unmap += crypto_aead_ivsize(tfm);
 641
 642	dma_unmap_sg(dev, req->src,
 643		     cc_get_sgl_nents(dev, req->src, size_to_unmap,
 644				      &dummy, &chained),
 645		     DMA_BIDIRECTIONAL);
 646	if (req->src != req->dst) {
 647		dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
 648			sg_virt(req->dst));
 649		dma_unmap_sg(dev, req->dst,
 650			     cc_get_sgl_nents(dev, req->dst, size_to_unmap,
 651					      &dummy, &chained),
 652			     DMA_BIDIRECTIONAL);
 653	}
 654	if (drvdata->coherent &&
 655	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
 656	    req->src == req->dst) {
 657		/* copy back mac from temporary location to deal with possible
 658		 * data memory overriding that caused by cache coherence
 659		 * problem.
 660		 */
 661		cc_copy_mac(dev, req, CC_SG_FROM_BUF);
 662	}
 663}
 664
 665static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
 666				 unsigned int sgl_nents, unsigned int authsize,
 667				 u32 last_entry_data_size,
 668				 bool *is_icv_fragmented)
 669{
 670	unsigned int icv_max_size = 0;
 671	unsigned int icv_required_size = authsize > last_entry_data_size ?
 672					(authsize - last_entry_data_size) :
 673					authsize;
 674	unsigned int nents;
 675	unsigned int i;
 676
 677	if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
 678		*is_icv_fragmented = false;
 679		return 0;
 680	}
 681
 682	for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
 683		if (!sgl)
 684			break;
 685		sgl = sg_next(sgl);
 686	}
 687
 688	if (sgl)
 689		icv_max_size = sgl->length;
 690
 691	if (last_entry_data_size > authsize) {
 692		/* ICV attached to data in last entry (not fragmented!) */
 693		nents = 0;
 694		*is_icv_fragmented = false;
 695	} else if (last_entry_data_size == authsize) {
 696		/* ICV placed in whole last entry (not fragmented!) */
 697		nents = 1;
 698		*is_icv_fragmented = false;
 699	} else if (icv_max_size > icv_required_size) {
 700		nents = 1;
 701		*is_icv_fragmented = true;
 702	} else if (icv_max_size == icv_required_size) {
 703		nents = 2;
 704		*is_icv_fragmented = true;
 705	} else {
 706		dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
 707			MAX_ICV_NENTS_SUPPORTED);
 708		nents = -1; /*unsupported*/
 709	}
 710	dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
 711		(*is_icv_fragmented ? "true" : "false"), nents);
 712
 713	return nents;
 714}
 715
 716static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
 717			    struct aead_request *req,
 718			    struct buffer_array *sg_data,
 719			    bool is_last, bool do_chain)
 720{
 721	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 722	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 723	struct device *dev = drvdata_to_dev(drvdata);
 
 724	int rc = 0;
 725
 726	if (!req->iv) {
 727		areq_ctx->gen_ctx.iv_dma_addr = 0;
 
 728		goto chain_iv_exit;
 729	}
 730
 731	areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
 732						       hw_iv_size,
 733						       DMA_BIDIRECTIONAL);
 
 
 
 
 734	if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
 735		dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 736			hw_iv_size, req->iv);
 
 
 737		rc = -ENOMEM;
 738		goto chain_iv_exit;
 739	}
 740
 741	dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 742		hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
 743	// TODO: what about CTR?? ask Ron
 744	if (do_chain && areq_ctx->plaintext_authenticate_only) {
 745		struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 746		unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
 747		unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
 748		/* Chain to given list */
 749		cc_add_buffer_entry(dev, sg_data,
 750				    (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
 751				    iv_size_to_authenc, is_last,
 752				    &areq_ctx->assoc.mlli_nents);
 753		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 754	}
 755
 756chain_iv_exit:
 757	return rc;
 758}
 759
 760static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
 761			       struct aead_request *req,
 762			       struct buffer_array *sg_data,
 763			       bool is_last, bool do_chain)
 764{
 765	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 766	int rc = 0;
 767	u32 mapped_nents = 0;
 768	struct scatterlist *current_sg = req->src;
 769	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 770	unsigned int sg_index = 0;
 771	u32 size_of_assoc = req->assoclen;
 772	struct device *dev = drvdata_to_dev(drvdata);
 773
 774	if (areq_ctx->is_gcm4543)
 775		size_of_assoc += crypto_aead_ivsize(tfm);
 776
 777	if (!sg_data) {
 778		rc = -EINVAL;
 779		goto chain_assoc_exit;
 780	}
 781
 782	if (req->assoclen == 0) {
 783		areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
 784		areq_ctx->assoc.nents = 0;
 785		areq_ctx->assoc.mlli_nents = 0;
 786		dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
 787			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 788			areq_ctx->assoc.nents);
 789		goto chain_assoc_exit;
 790	}
 791
 792	//iterate over the sgl to see how many entries are for associated data
 793	//it is assumed that if we reach here , the sgl is already mapped
 794	sg_index = current_sg->length;
 795	//the first entry in the scatter list contains all the associated data
 796	if (sg_index > size_of_assoc) {
 797		mapped_nents++;
 798	} else {
 799		while (sg_index <= size_of_assoc) {
 800			current_sg = sg_next(current_sg);
 801			/* if have reached the end of the sgl, then this is
 802			 * unexpected
 803			 */
 804			if (!current_sg) {
 805				dev_err(dev, "reached end of sg list. unexpected\n");
 806				return -EINVAL;
 807			}
 808			sg_index += current_sg->length;
 809			mapped_nents++;
 810		}
 811	}
 812	if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 813		dev_err(dev, "Too many fragments. current %d max %d\n",
 814			mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 815		return -ENOMEM;
 816	}
 817	areq_ctx->assoc.nents = mapped_nents;
 818
 819	/* in CCM case we have additional entry for
 820	 * ccm header configurations
 821	 */
 822	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 823		if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 824			dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
 825				(areq_ctx->assoc.nents + 1),
 826				LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 827			rc = -ENOMEM;
 828			goto chain_assoc_exit;
 829		}
 830	}
 831
 832	if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
 833		areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
 834	else
 835		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 836
 837	if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 838		dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
 839			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 840			areq_ctx->assoc.nents);
 841		cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
 842				req->assoclen, 0, is_last,
 843				&areq_ctx->assoc.mlli_nents);
 844		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 845	}
 846
 847chain_assoc_exit:
 848	return rc;
 849}
 850
 851static void cc_prepare_aead_data_dlli(struct aead_request *req,
 852				      u32 *src_last_bytes, u32 *dst_last_bytes)
 853{
 854	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 855	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 856	unsigned int authsize = areq_ctx->req_authsize;
 
 
 857
 858	areq_ctx->is_icv_fragmented = false;
 859	if (req->src == req->dst) {
 860		/*INPLACE*/
 861		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
 862			(*src_last_bytes - authsize);
 863		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
 864			(*src_last_bytes - authsize);
 865	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 866		/*NON-INPLACE and DECRYPT*/
 867		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
 868			(*src_last_bytes - authsize);
 869		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
 870			(*src_last_bytes - authsize);
 871	} else {
 872		/*NON-INPLACE and ENCRYPT*/
 873		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
 874			(*dst_last_bytes - authsize);
 875		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
 876			(*dst_last_bytes - authsize);
 877	}
 
 
 
 878}
 879
 880static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
 881				     struct aead_request *req,
 882				     struct buffer_array *sg_data,
 883				     u32 *src_last_bytes, u32 *dst_last_bytes,
 884				     bool is_last_table)
 885{
 886	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 887	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 888	unsigned int authsize = areq_ctx->req_authsize;
 889	int rc = 0, icv_nents;
 890	struct device *dev = drvdata_to_dev(drvdata);
 891	struct scatterlist *sg;
 892
 893	if (req->src == req->dst) {
 894		/*INPLACE*/
 895		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 896				areq_ctx->src_sgl, areq_ctx->cryptlen,
 897				areq_ctx->src_offset, is_last_table,
 898				&areq_ctx->src.mlli_nents);
 899
 900		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
 901						  areq_ctx->src.nents,
 902						  authsize, *src_last_bytes,
 903						  &areq_ctx->is_icv_fragmented);
 904		if (icv_nents < 0) {
 905			rc = -ENOTSUPP;
 906			goto prepare_data_mlli_exit;
 907		}
 908
 909		if (areq_ctx->is_icv_fragmented) {
 910			/* Backup happens only when ICV is fragmented, ICV
 911			 * verification is made by CPU compare in order to
 912			 * simplify MAC verification upon request completion
 913			 */
 914			if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 915				/* In coherent platforms (e.g. ACP)
 916				 * already copying ICV for any
 917				 * INPLACE-DECRYPT operation, hence
 918				 * we must neglect this code.
 919				 */
 920				if (!drvdata->coherent)
 921					cc_copy_mac(dev, req, CC_SG_TO_BUF);
 922
 923				areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 924			} else {
 925				areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 926				areq_ctx->icv_dma_addr =
 927					areq_ctx->mac_buf_dma_addr;
 928			}
 929		} else { /* Contig. ICV */
 930			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 931			/*Should hanlde if the sg is not contig.*/
 932			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 933				(*src_last_bytes - authsize);
 934			areq_ctx->icv_virt_addr = sg_virt(sg) +
 935				(*src_last_bytes - authsize);
 936		}
 937
 938	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 939		/*NON-INPLACE and DECRYPT*/
 940		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 941				areq_ctx->src_sgl, areq_ctx->cryptlen,
 942				areq_ctx->src_offset, is_last_table,
 943				&areq_ctx->src.mlli_nents);
 944		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 945				areq_ctx->dst_sgl, areq_ctx->cryptlen,
 946				areq_ctx->dst_offset, is_last_table,
 947				&areq_ctx->dst.mlli_nents);
 948
 949		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
 950						  areq_ctx->src.nents,
 951						  authsize, *src_last_bytes,
 952						  &areq_ctx->is_icv_fragmented);
 953		if (icv_nents < 0) {
 954			rc = -ENOTSUPP;
 955			goto prepare_data_mlli_exit;
 956		}
 957
 958		/* Backup happens only when ICV is fragmented, ICV
 959		 * verification is made by CPU compare in order to simplify
 960		 * MAC verification upon request completion
 961		 */
 962		if (areq_ctx->is_icv_fragmented) {
 963			cc_copy_mac(dev, req, CC_SG_TO_BUF);
 964			areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 965
 966		} else { /* Contig. ICV */
 967			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 968			/*Should hanlde if the sg is not contig.*/
 969			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 970				(*src_last_bytes - authsize);
 971			areq_ctx->icv_virt_addr = sg_virt(sg) +
 972				(*src_last_bytes - authsize);
 973		}
 974
 975	} else {
 976		/*NON-INPLACE and ENCRYPT*/
 977		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 978				areq_ctx->dst_sgl, areq_ctx->cryptlen,
 979				areq_ctx->dst_offset, is_last_table,
 980				&areq_ctx->dst.mlli_nents);
 981		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 982				areq_ctx->src_sgl, areq_ctx->cryptlen,
 983				areq_ctx->src_offset, is_last_table,
 984				&areq_ctx->src.mlli_nents);
 985
 986		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
 987						  areq_ctx->dst.nents,
 988						  authsize, *dst_last_bytes,
 989						  &areq_ctx->is_icv_fragmented);
 990		if (icv_nents < 0) {
 991			rc = -ENOTSUPP;
 992			goto prepare_data_mlli_exit;
 993		}
 994
 995		if (!areq_ctx->is_icv_fragmented) {
 996			sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
 997			/* Contig. ICV */
 998			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 999				(*dst_last_bytes - authsize);
1000			areq_ctx->icv_virt_addr = sg_virt(sg) +
1001				(*dst_last_bytes - authsize);
1002		} else {
1003			areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1004			areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1005		}
1006	}
1007
1008prepare_data_mlli_exit:
1009	return rc;
1010}
1011
1012static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1013			      struct aead_request *req,
1014			      struct buffer_array *sg_data,
1015			      bool is_last_table, bool do_chain)
1016{
1017	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1018	struct device *dev = drvdata_to_dev(drvdata);
1019	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
1020	unsigned int authsize = areq_ctx->req_authsize;
1021	unsigned int src_last_bytes = 0, dst_last_bytes = 0;
1022	int rc = 0;
1023	u32 src_mapped_nents = 0, dst_mapped_nents = 0;
1024	u32 offset = 0;
1025	/* non-inplace mode */
1026	unsigned int size_for_map = req->assoclen + req->cryptlen;
1027	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1028	u32 sg_index = 0;
1029	bool chained = false;
1030	bool is_gcm4543 = areq_ctx->is_gcm4543;
1031	u32 size_to_skip = req->assoclen;
1032
1033	if (is_gcm4543)
1034		size_to_skip += crypto_aead_ivsize(tfm);
1035
1036	offset = size_to_skip;
1037
1038	if (!sg_data)
1039		return -EINVAL;
1040
1041	areq_ctx->src_sgl = req->src;
1042	areq_ctx->dst_sgl = req->dst;
1043
1044	if (is_gcm4543)
1045		size_for_map += crypto_aead_ivsize(tfm);
1046
1047	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1048			authsize : 0;
1049	src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
1050					    &src_last_bytes, &chained);
1051	sg_index = areq_ctx->src_sgl->length;
1052	//check where the data starts
1053	while (sg_index <= size_to_skip) {
 
1054		offset -= areq_ctx->src_sgl->length;
1055		areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
1056		//if have reached the end of the sgl, then this is unexpected
1057		if (!areq_ctx->src_sgl) {
1058			dev_err(dev, "reached end of sg list. unexpected\n");
1059			return -EINVAL;
1060		}
1061		sg_index += areq_ctx->src_sgl->length;
1062		src_mapped_nents--;
1063	}
1064	if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
1065		dev_err(dev, "Too many fragments. current %d max %d\n",
1066			src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1067		return -ENOMEM;
1068	}
1069
1070	areq_ctx->src.nents = src_mapped_nents;
1071
1072	areq_ctx->src_offset = offset;
1073
1074	if (req->src != req->dst) {
1075		size_for_map = req->assoclen + req->cryptlen;
1076		size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1077				authsize : 0;
1078		if (is_gcm4543)
1079			size_for_map += crypto_aead_ivsize(tfm);
 
1080
1081		rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
1082			       &areq_ctx->dst.nents,
1083			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
1084			       &dst_mapped_nents);
1085		if (rc) {
1086			rc = -ENOMEM;
1087			goto chain_data_exit;
1088		}
1089	}
1090
1091	dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
1092					    &dst_last_bytes, &chained);
1093	sg_index = areq_ctx->dst_sgl->length;
1094	offset = size_to_skip;
1095
1096	//check where the data starts
1097	while (sg_index <= size_to_skip) {
 
1098		offset -= areq_ctx->dst_sgl->length;
1099		areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
1100		//if have reached the end of the sgl, then this is unexpected
1101		if (!areq_ctx->dst_sgl) {
1102			dev_err(dev, "reached end of sg list. unexpected\n");
1103			return -EINVAL;
1104		}
1105		sg_index += areq_ctx->dst_sgl->length;
1106		dst_mapped_nents--;
1107	}
1108	if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
1109		dev_err(dev, "Too many fragments. current %d max %d\n",
1110			dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1111		return -ENOMEM;
1112	}
1113	areq_ctx->dst.nents = dst_mapped_nents;
1114	areq_ctx->dst_offset = offset;
1115	if (src_mapped_nents > 1 ||
1116	    dst_mapped_nents  > 1 ||
1117	    do_chain) {
1118		areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
1119		rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
1120					       &src_last_bytes,
1121					       &dst_last_bytes, is_last_table);
1122	} else {
1123		areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
1124		cc_prepare_aead_data_dlli(req, &src_last_bytes,
1125					  &dst_last_bytes);
1126	}
1127
1128chain_data_exit:
1129	return rc;
1130}
1131
1132static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
1133				      struct aead_request *req)
1134{
1135	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1136	u32 curr_mlli_size = 0;
1137
1138	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
1139		areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
1140		curr_mlli_size = areq_ctx->assoc.mlli_nents *
1141						LLI_ENTRY_BYTE_SIZE;
1142	}
1143
1144	if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1145		/*Inplace case dst nents equal to src nents*/
1146		if (req->src == req->dst) {
1147			areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
1148			areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
1149								curr_mlli_size;
1150			areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
1151			if (!areq_ctx->is_single_pass)
1152				areq_ctx->assoc.mlli_nents +=
1153					areq_ctx->src.mlli_nents;
1154		} else {
1155			if (areq_ctx->gen_ctx.op_type ==
1156					DRV_CRYPTO_DIRECTION_DECRYPT) {
1157				areq_ctx->src.sram_addr =
1158						drvdata->mlli_sram_addr +
1159								curr_mlli_size;
1160				areq_ctx->dst.sram_addr =
1161						areq_ctx->src.sram_addr +
1162						areq_ctx->src.mlli_nents *
1163						LLI_ENTRY_BYTE_SIZE;
1164				if (!areq_ctx->is_single_pass)
1165					areq_ctx->assoc.mlli_nents +=
1166						areq_ctx->src.mlli_nents;
1167			} else {
1168				areq_ctx->dst.sram_addr =
1169						drvdata->mlli_sram_addr +
1170								curr_mlli_size;
1171				areq_ctx->src.sram_addr =
1172						areq_ctx->dst.sram_addr +
1173						areq_ctx->dst.mlli_nents *
1174						LLI_ENTRY_BYTE_SIZE;
1175				if (!areq_ctx->is_single_pass)
1176					areq_ctx->assoc.mlli_nents +=
1177						areq_ctx->dst.mlli_nents;
1178			}
1179		}
1180	}
1181}
1182
1183int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
1184{
1185	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1186	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1187	struct device *dev = drvdata_to_dev(drvdata);
1188	struct buffer_array sg_data;
1189	unsigned int authsize = areq_ctx->req_authsize;
1190	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1191	int rc = 0;
1192	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1193	bool is_gcm4543 = areq_ctx->is_gcm4543;
1194	dma_addr_t dma_addr;
1195	u32 mapped_nents = 0;
1196	u32 dummy = 0; /*used for the assoc data fragments */
1197	u32 size_to_map = 0;
1198	gfp_t flags = cc_gfp_flags(&req->base);
1199
1200	mlli_params->curr_pool = NULL;
1201	sg_data.num_of_buffers = 0;
1202
1203	/* copy mac to a temporary location to deal with possible
1204	 * data memory overriding that caused by cache coherence problem.
1205	 */
1206	if (drvdata->coherent &&
1207	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
1208	    req->src == req->dst)
1209		cc_copy_mac(dev, req, CC_SG_TO_BUF);
1210
1211	/* cacluate the size for cipher remove ICV in decrypt*/
1212	areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1213				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1214				req->cryptlen :
1215				(req->cryptlen - authsize);
1216
1217	dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
1218				  DMA_BIDIRECTIONAL);
1219	if (dma_mapping_error(dev, dma_addr)) {
1220		dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1221			MAX_MAC_SIZE, areq_ctx->mac_buf);
1222		rc = -ENOMEM;
1223		goto aead_map_failure;
1224	}
1225	areq_ctx->mac_buf_dma_addr = dma_addr;
1226
1227	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1228		void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1229
1230		dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
1231					  DMA_TO_DEVICE);
1232
1233		if (dma_mapping_error(dev, dma_addr)) {
1234			dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1235				AES_BLOCK_SIZE, addr);
1236			areq_ctx->ccm_iv0_dma_addr = 0;
1237			rc = -ENOMEM;
1238			goto aead_map_failure;
1239		}
1240		areq_ctx->ccm_iv0_dma_addr = dma_addr;
1241
1242		if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1243					 &sg_data, req->assoclen)) {
1244			rc = -ENOMEM;
1245			goto aead_map_failure;
1246		}
1247	}
1248
1249	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1250		dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1251					  DMA_BIDIRECTIONAL);
1252		if (dma_mapping_error(dev, dma_addr)) {
1253			dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1254				AES_BLOCK_SIZE, areq_ctx->hkey);
1255			rc = -ENOMEM;
1256			goto aead_map_failure;
1257		}
1258		areq_ctx->hkey_dma_addr = dma_addr;
1259
1260		dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1261					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1262		if (dma_mapping_error(dev, dma_addr)) {
1263			dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1264				AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1265			rc = -ENOMEM;
1266			goto aead_map_failure;
1267		}
1268		areq_ctx->gcm_block_len_dma_addr = dma_addr;
1269
1270		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1271					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1272
1273		if (dma_mapping_error(dev, dma_addr)) {
1274			dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1275				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1276			areq_ctx->gcm_iv_inc1_dma_addr = 0;
1277			rc = -ENOMEM;
1278			goto aead_map_failure;
1279		}
1280		areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1281
1282		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1283					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1284
1285		if (dma_mapping_error(dev, dma_addr)) {
1286			dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1287				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1288			areq_ctx->gcm_iv_inc2_dma_addr = 0;
1289			rc = -ENOMEM;
1290			goto aead_map_failure;
1291		}
1292		areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1293	}
1294
1295	size_to_map = req->cryptlen + req->assoclen;
1296	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
 
 
1297		size_to_map += authsize;
 
1298
1299	if (is_gcm4543)
1300		size_to_map += crypto_aead_ivsize(tfm);
1301	rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
1302		       &areq_ctx->src.nents,
1303		       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1304			LLI_MAX_NUM_OF_DATA_ENTRIES),
1305		       &dummy, &mapped_nents);
1306	if (rc) {
1307		rc = -ENOMEM;
1308		goto aead_map_failure;
1309	}
1310
1311	if (areq_ctx->is_single_pass) {
1312		/*
1313		 * Create MLLI table for:
1314		 *   (1) Assoc. data
1315		 *   (2) Src/Dst SGLs
1316		 *   Note: IV is contg. buffer (not an SGL)
1317		 */
1318		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1319		if (rc)
1320			goto aead_map_failure;
1321		rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1322		if (rc)
1323			goto aead_map_failure;
1324		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1325		if (rc)
1326			goto aead_map_failure;
1327	} else { /* DOUBLE-PASS flow */
1328		/*
1329		 * Prepare MLLI table(s) in this order:
1330		 *
1331		 * If ENCRYPT/DECRYPT (inplace):
1332		 *   (1) MLLI table for assoc
1333		 *   (2) IV entry (chained right after end of assoc)
1334		 *   (3) MLLI for src/dst (inplace operation)
1335		 *
1336		 * If ENCRYPT (non-inplace)
1337		 *   (1) MLLI table for assoc
1338		 *   (2) IV entry (chained right after end of assoc)
1339		 *   (3) MLLI for dst
1340		 *   (4) MLLI for src
1341		 *
1342		 * If DECRYPT (non-inplace)
1343		 *   (1) MLLI table for assoc
1344		 *   (2) IV entry (chained right after end of assoc)
1345		 *   (3) MLLI for src
1346		 *   (4) MLLI for dst
1347		 */
1348		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1349		if (rc)
1350			goto aead_map_failure;
1351		rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1352		if (rc)
1353			goto aead_map_failure;
1354		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1355		if (rc)
1356			goto aead_map_failure;
1357	}
1358
1359	/* Mlli support -start building the MLLI according to the above
1360	 * results
1361	 */
1362	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1363	    areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1364		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1365		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1366		if (rc)
1367			goto aead_map_failure;
1368
1369		cc_update_aead_mlli_nents(drvdata, req);
1370		dev_dbg(dev, "assoc params mn %d\n",
1371			areq_ctx->assoc.mlli_nents);
1372		dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1373		dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1374	}
1375	return 0;
1376
1377aead_map_failure:
1378	cc_unmap_aead_request(dev, req);
1379	return rc;
1380}
1381
1382int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1383			      struct scatterlist *src, unsigned int nbytes,
1384			      bool do_update, gfp_t flags)
1385{
1386	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1387	struct device *dev = drvdata_to_dev(drvdata);
1388	u8 *curr_buff = cc_hash_buf(areq_ctx);
1389	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1390	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1391	struct buffer_array sg_data;
1392	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1393	u32 dummy = 0;
1394	u32 mapped_nents = 0;
1395
1396	dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1397		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1398	/* Init the type of the dma buffer */
1399	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1400	mlli_params->curr_pool = NULL;
1401	sg_data.num_of_buffers = 0;
1402	areq_ctx->in_nents = 0;
1403
1404	if (nbytes == 0 && *curr_buff_cnt == 0) {
1405		/* nothing to do */
1406		return 0;
1407	}
1408
1409	/*TODO: copy data in case that buffer is enough for operation */
1410	/* map the previous buffer */
1411	if (*curr_buff_cnt) {
1412		if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1413				    &sg_data)) {
1414			return -ENOMEM;
1415		}
1416	}
1417
1418	if (src && nbytes > 0 && do_update) {
1419		if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1420			      &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1421			      &dummy, &mapped_nents)) {
 
1422			goto unmap_curr_buff;
1423		}
1424		if (src && mapped_nents == 1 &&
1425		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1426			memcpy(areq_ctx->buff_sg, src,
1427			       sizeof(struct scatterlist));
1428			areq_ctx->buff_sg->length = nbytes;
1429			areq_ctx->curr_sg = areq_ctx->buff_sg;
1430			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1431		} else {
1432			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1433		}
1434	}
1435
1436	/*build mlli */
1437	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1438		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1439		/* add the src data to the sg_data */
1440		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1441				0, true, &areq_ctx->mlli_nents);
1442		if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
 
1443			goto fail_unmap_din;
1444	}
1445	/* change the buffer index for the unmap function */
1446	areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1447	dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1448		cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1449	return 0;
1450
1451fail_unmap_din:
1452	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1453
1454unmap_curr_buff:
1455	if (*curr_buff_cnt)
1456		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1457
1458	return -ENOMEM;
1459}
1460
1461int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1462			       struct scatterlist *src, unsigned int nbytes,
1463			       unsigned int block_size, gfp_t flags)
1464{
1465	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1466	struct device *dev = drvdata_to_dev(drvdata);
1467	u8 *curr_buff = cc_hash_buf(areq_ctx);
1468	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1469	u8 *next_buff = cc_next_buf(areq_ctx);
1470	u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1471	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1472	unsigned int update_data_len;
1473	u32 total_in_len = nbytes + *curr_buff_cnt;
1474	struct buffer_array sg_data;
1475	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1476	unsigned int swap_index = 0;
 
1477	u32 dummy = 0;
1478	u32 mapped_nents = 0;
1479
1480	dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1481		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1482	/* Init the type of the dma buffer */
1483	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1484	mlli_params->curr_pool = NULL;
1485	areq_ctx->curr_sg = NULL;
1486	sg_data.num_of_buffers = 0;
1487	areq_ctx->in_nents = 0;
1488
1489	if (total_in_len < block_size) {
1490		dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1491			curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1492		areq_ctx->in_nents =
1493			cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
1494		sg_copy_to_buffer(src, areq_ctx->in_nents,
1495				  &curr_buff[*curr_buff_cnt], nbytes);
1496		*curr_buff_cnt += nbytes;
1497		return 1;
1498	}
1499
1500	/* Calculate the residue size*/
1501	*next_buff_cnt = total_in_len & (block_size - 1);
1502	/* update data len */
1503	update_data_len = total_in_len - *next_buff_cnt;
1504
1505	dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1506		*next_buff_cnt, update_data_len);
1507
1508	/* Copy the new residue to next buffer */
1509	if (*next_buff_cnt) {
1510		dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1511			next_buff, (update_data_len - *curr_buff_cnt),
1512			*next_buff_cnt);
1513		cc_copy_sg_portion(dev, next_buff, src,
1514				   (update_data_len - *curr_buff_cnt),
1515				   nbytes, CC_SG_TO_BUF);
1516		/* change the buffer index for next operation */
1517		swap_index = 1;
1518	}
1519
1520	if (*curr_buff_cnt) {
1521		if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1522				    &sg_data)) {
1523			return -ENOMEM;
1524		}
1525		/* change the buffer index for next operation */
1526		swap_index = 1;
1527	}
1528
1529	if (update_data_len > *curr_buff_cnt) {
1530		if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1531			      DMA_TO_DEVICE, &areq_ctx->in_nents,
1532			      LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1533			      &mapped_nents)) {
 
1534			goto unmap_curr_buff;
1535		}
1536		if (mapped_nents == 1 &&
1537		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1538			/* only one entry in the SG and no previous data */
1539			memcpy(areq_ctx->buff_sg, src,
1540			       sizeof(struct scatterlist));
1541			areq_ctx->buff_sg->length = update_data_len;
1542			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1543			areq_ctx->curr_sg = areq_ctx->buff_sg;
1544		} else {
1545			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1546		}
1547	}
1548
1549	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1550		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1551		/* add the src data to the sg_data */
1552		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1553				(update_data_len - *curr_buff_cnt), 0, true,
1554				&areq_ctx->mlli_nents);
1555		if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
 
1556			goto fail_unmap_din;
1557	}
1558	areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1559
1560	return 0;
1561
1562fail_unmap_din:
1563	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1564
1565unmap_curr_buff:
1566	if (*curr_buff_cnt)
1567		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1568
1569	return -ENOMEM;
1570}
1571
1572void cc_unmap_hash_request(struct device *dev, void *ctx,
1573			   struct scatterlist *src, bool do_revert)
1574{
1575	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1576	u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1577
1578	/*In case a pool was set, a table was
1579	 *allocated and should be released
1580	 */
1581	if (areq_ctx->mlli_params.curr_pool) {
1582		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1583			&areq_ctx->mlli_params.mlli_dma_addr,
1584			areq_ctx->mlli_params.mlli_virt_addr);
1585		dma_pool_free(areq_ctx->mlli_params.curr_pool,
1586			      areq_ctx->mlli_params.mlli_virt_addr,
1587			      areq_ctx->mlli_params.mlli_dma_addr);
1588	}
1589
1590	if (src && areq_ctx->in_nents) {
1591		dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1592			sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1593		dma_unmap_sg(dev, src,
1594			     areq_ctx->in_nents, DMA_TO_DEVICE);
1595	}
1596
1597	if (*prev_len) {
1598		dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1599			sg_virt(areq_ctx->buff_sg),
1600			&sg_dma_address(areq_ctx->buff_sg),
1601			sg_dma_len(areq_ctx->buff_sg));
1602		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1603		if (!do_revert) {
1604			/* clean the previous data length for update
1605			 * operation
1606			 */
1607			*prev_len = 0;
1608		} else {
1609			areq_ctx->buff_index ^= 1;
1610		}
1611	}
1612}
1613
1614int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1615{
1616	struct buff_mgr_handle *buff_mgr_handle;
1617	struct device *dev = drvdata_to_dev(drvdata);
1618
1619	buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
1620	if (!buff_mgr_handle)
1621		return -ENOMEM;
1622
1623	drvdata->buff_mgr_handle = buff_mgr_handle;
1624
1625	buff_mgr_handle->mlli_buffs_pool =
1626		dma_pool_create("dx_single_mlli_tables", dev,
1627				MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1628				LLI_ENTRY_BYTE_SIZE,
1629				MLLI_TABLE_MIN_ALIGNMENT, 0);
1630
1631	if (!buff_mgr_handle->mlli_buffs_pool)
1632		goto error;
1633
1634	return 0;
1635
1636error:
1637	cc_buffer_mgr_fini(drvdata);
1638	return -ENOMEM;
1639}
1640
1641int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1642{
1643	struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1644
1645	if (buff_mgr_handle) {
1646		dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1647		kfree(drvdata->buff_mgr_handle);
1648		drvdata->buff_mgr_handle = NULL;
1649	}
1650	return 0;
1651}