Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
   3
   4#include <crypto/internal/aead.h>
   5#include <crypto/authenc.h>
   6#include <crypto/scatterwalk.h>
   7#include <linux/dmapool.h>
   8#include <linux/dma-mapping.h>
   9
  10#include "cc_buffer_mgr.h"
  11#include "cc_lli_defs.h"
  12#include "cc_cipher.h"
  13#include "cc_hash.h"
  14#include "cc_aead.h"
  15
 
 
 
 
 
 
 
 
 
 
  16union buffer_array_entry {
  17	struct scatterlist *sgl;
  18	dma_addr_t buffer_dma;
  19};
  20
  21struct buffer_array {
  22	unsigned int num_of_buffers;
  23	union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
  24	unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
  25	int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  26	int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
 
  27	bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
  28	u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  29};
  30
  31static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
  32{
  33	switch (type) {
  34	case CC_DMA_BUF_NULL:
  35		return "BUF_NULL";
  36	case CC_DMA_BUF_DLLI:
  37		return "BUF_DLLI";
  38	case CC_DMA_BUF_MLLI:
  39		return "BUF_MLLI";
  40	default:
  41		return "BUF_INVALID";
  42	}
  43}
  44
  45/**
  46 * cc_copy_mac() - Copy MAC to temporary location
  47 *
  48 * @dev: device object
  49 * @req: aead request object
  50 * @dir: [IN] copy from/to sgl
  51 */
  52static void cc_copy_mac(struct device *dev, struct aead_request *req,
  53			enum cc_sg_cpy_direct dir)
  54{
  55	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 
  56	u32 skip = req->assoclen + req->cryptlen;
  57
 
 
 
  58	cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
  59			   (skip - areq_ctx->req_authsize), skip, dir);
  60}
  61
  62/**
  63 * cc_get_sgl_nents() - Get scatterlist number of entries.
  64 *
  65 * @dev: Device object
  66 * @sg_list: SG list
  67 * @nbytes: [IN] Total SGL data bytes.
  68 * @lbytes: [OUT] Returns the amount of bytes at the last entry
  69 *
  70 * Return:
  71 * Number of entries in the scatterlist
  72 */
  73static unsigned int cc_get_sgl_nents(struct device *dev,
  74				     struct scatterlist *sg_list,
  75				     unsigned int nbytes, u32 *lbytes)
 
  76{
  77	unsigned int nents = 0;
  78
  79	*lbytes = 0;
  80
  81	while (nbytes && sg_list) {
  82		nents++;
  83		/* get the number of bytes in the last entry */
  84		*lbytes = nbytes;
  85		nbytes -= (sg_list->length > nbytes) ?
  86				nbytes : sg_list->length;
  87		sg_list = sg_next(sg_list);
 
 
 
 
 
 
  88	}
  89
  90	dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
  91	return nents;
  92}
  93
  94/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  95 * cc_copy_sg_portion() - Copy scatter list data,
  96 * from to_skip to end, to dest and vice versa
  97 *
  98 * @dev: Device object
  99 * @dest: Buffer to copy to/from
 100 * @sg: SG list
 101 * @to_skip: Number of bytes to skip before copying
 102 * @end: Offset of last byte to copy
 103 * @direct: Transfer direction (true == from SG list to buffer, false == from
 104 *          buffer to SG list)
 105 */
 106void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
 107			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
 108{
 109	u32 nents;
 110
 111	nents = sg_nents_for_len(sg, end);
 112	sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip,
 113		       (direct == CC_SG_TO_BUF));
 114}
 115
 116static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
 117				  u32 buff_size, u32 *curr_nents,
 118				  u32 **mlli_entry_pp)
 119{
 120	u32 *mlli_entry_p = *mlli_entry_pp;
 121	u32 new_nents;
 122
 123	/* Verify there is no memory overflow*/
 124	new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
 125	if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
 126		dev_err(dev, "Too many mlli entries. current %d max %d\n",
 127			new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
 128		return -ENOMEM;
 129	}
 130
 131	/*handle buffer longer than 64 kbytes */
 132	while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
 133		cc_lli_set_addr(mlli_entry_p, buff_dma);
 134		cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
 135		dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 136			*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 137			mlli_entry_p[LLI_WORD1_OFFSET]);
 138		buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
 139		buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
 140		mlli_entry_p = mlli_entry_p + 2;
 141		(*curr_nents)++;
 142	}
 143	/*Last entry */
 144	cc_lli_set_addr(mlli_entry_p, buff_dma);
 145	cc_lli_set_size(mlli_entry_p, buff_size);
 146	dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 147		*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 148		mlli_entry_p[LLI_WORD1_OFFSET]);
 149	mlli_entry_p = mlli_entry_p + 2;
 150	*mlli_entry_pp = mlli_entry_p;
 151	(*curr_nents)++;
 152	return 0;
 153}
 154
 155static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
 156				u32 sgl_data_len, u32 sgl_offset,
 157				u32 *curr_nents, u32 **mlli_entry_pp)
 158{
 159	struct scatterlist *curr_sgl = sgl;
 160	u32 *mlli_entry_p = *mlli_entry_pp;
 161	s32 rc = 0;
 162
 163	for ( ; (curr_sgl && sgl_data_len);
 164	      curr_sgl = sg_next(curr_sgl)) {
 165		u32 entry_data_len =
 166			(sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
 167				sg_dma_len(curr_sgl) - sgl_offset :
 168				sgl_data_len;
 169		sgl_data_len -= entry_data_len;
 170		rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
 171					    sgl_offset, entry_data_len,
 172					    curr_nents, &mlli_entry_p);
 173		if (rc)
 174			return rc;
 175
 176		sgl_offset = 0;
 177	}
 178	*mlli_entry_pp = mlli_entry_p;
 179	return 0;
 180}
 181
 182static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
 183			    struct mlli_params *mlli_params, gfp_t flags)
 184{
 185	u32 *mlli_p;
 186	u32 total_nents = 0, prev_total_nents = 0;
 187	int rc = 0, i;
 188
 189	dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
 190
 191	/* Allocate memory from the pointed pool */
 192	mlli_params->mlli_virt_addr =
 193		dma_pool_alloc(mlli_params->curr_pool, flags,
 194			       &mlli_params->mlli_dma_addr);
 195	if (!mlli_params->mlli_virt_addr) {
 196		dev_err(dev, "dma_pool_alloc() failed\n");
 197		rc = -ENOMEM;
 198		goto build_mlli_exit;
 199	}
 200	/* Point to start of MLLI */
 201	mlli_p = mlli_params->mlli_virt_addr;
 202	/* go over all SG's and link it to one MLLI table */
 203	for (i = 0; i < sg_data->num_of_buffers; i++) {
 204		union buffer_array_entry *entry = &sg_data->entry[i];
 205		u32 tot_len = sg_data->total_data_len[i];
 206		u32 offset = sg_data->offset[i];
 207
 208		rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset,
 209					  &total_nents, &mlli_p);
 
 
 
 
 
 
 210		if (rc)
 211			return rc;
 212
 213		/* set last bit in the current table */
 214		if (sg_data->mlli_nents[i]) {
 215			/*Calculate the current MLLI table length for the
 216			 *length field in the descriptor
 217			 */
 218			*sg_data->mlli_nents[i] +=
 219				(total_nents - prev_total_nents);
 220			prev_total_nents = total_nents;
 221		}
 222	}
 223
 224	/* Set MLLI size for the bypass operation */
 225	mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
 226
 227	dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
 228		mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
 229		mlli_params->mlli_len);
 230
 231build_mlli_exit:
 232	return rc;
 233}
 234
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 235static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
 236			    unsigned int nents, struct scatterlist *sgl,
 237			    unsigned int data_len, unsigned int data_offset,
 238			    bool is_last_table, u32 *mlli_nents)
 239{
 240	unsigned int index = sgl_data->num_of_buffers;
 241
 242	dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
 243		index, nents, sgl, data_len, is_last_table);
 244	sgl_data->nents[index] = nents;
 245	sgl_data->entry[index].sgl = sgl;
 246	sgl_data->offset[index] = data_offset;
 247	sgl_data->total_data_len[index] = data_len;
 
 248	sgl_data->is_last[index] = is_last_table;
 249	sgl_data->mlli_nents[index] = mlli_nents;
 250	if (sgl_data->mlli_nents[index])
 251		*sgl_data->mlli_nents[index] = 0;
 252	sgl_data->num_of_buffers++;
 253}
 254
 255static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 256		     unsigned int nbytes, int direction, u32 *nents,
 257		     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
 258{
 259	int ret = 0;
 
 260
 261	if (!nbytes) {
 262		*mapped_nents = 0;
 263		*lbytes = 0;
 264		*nents = 0;
 265		return 0;
 
 
 
 266	}
 
 267
 268	*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
 269	if (*nents > max_sg_nents) {
 270		*nents = 0;
 271		dev_err(dev, "Too many fragments. current %d max %d\n",
 272			*nents, max_sg_nents);
 273		return -ENOMEM;
 
 274	}
 
 
 275
 276	ret = dma_map_sg(dev, sg, *nents, direction);
 277	if (!ret) {
 278		*nents = 0;
 279		dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
 280		return -ENOMEM;
 281	}
 282
 283	*mapped_nents = ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284
 285	return 0;
 286}
 287
 288static int
 289cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
 290		     u8 *config_data, struct buffer_array *sg_data,
 291		     unsigned int assoclen)
 292{
 293	dev_dbg(dev, " handle additional data config set to DLLI\n");
 294	/* create sg for the current buffer */
 295	sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
 296		    AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
 297	if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
 298		dev_err(dev, "dma_map_sg() config buffer failed\n");
 299		return -ENOMEM;
 300	}
 301	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 302		&sg_dma_address(&areq_ctx->ccm_adata_sg),
 303		sg_page(&areq_ctx->ccm_adata_sg),
 304		sg_virt(&areq_ctx->ccm_adata_sg),
 305		areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
 306	/* prepare for case of MLLI */
 307	if (assoclen > 0) {
 308		cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
 309				(AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
 310				0, false, NULL);
 311	}
 312	return 0;
 313}
 314
 315static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
 316			   u8 *curr_buff, u32 curr_buff_cnt,
 317			   struct buffer_array *sg_data)
 318{
 319	dev_dbg(dev, " handle curr buff %x set to   DLLI\n", curr_buff_cnt);
 320	/* create sg for the current buffer */
 321	sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
 322	if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
 323		dev_err(dev, "dma_map_sg() src buffer failed\n");
 324		return -ENOMEM;
 325	}
 326	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 327		&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
 328		sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
 329		areq_ctx->buff_sg->length);
 330	areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
 331	areq_ctx->curr_sg = areq_ctx->buff_sg;
 332	areq_ctx->in_nents = 0;
 333	/* prepare for case of MLLI */
 334	cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
 335			false, NULL);
 336	return 0;
 337}
 338
 339void cc_unmap_cipher_request(struct device *dev, void *ctx,
 340				unsigned int ivsize, struct scatterlist *src,
 341				struct scatterlist *dst)
 342{
 343	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 344
 345	if (req_ctx->gen_ctx.iv_dma_addr) {
 346		dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
 347			&req_ctx->gen_ctx.iv_dma_addr, ivsize);
 348		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
 349				 ivsize, DMA_BIDIRECTIONAL);
 
 
 350	}
 351	/* Release pool */
 352	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
 353	    req_ctx->mlli_params.mlli_virt_addr) {
 354		dma_pool_free(req_ctx->mlli_params.curr_pool,
 355			      req_ctx->mlli_params.mlli_virt_addr,
 356			      req_ctx->mlli_params.mlli_dma_addr);
 357	}
 358
 
 
 
 359	if (src != dst) {
 360		dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
 361		dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
 362		dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
 363		dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
 364	} else {
 365		dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
 366		dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
 367	}
 368}
 369
 370int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
 371			  unsigned int ivsize, unsigned int nbytes,
 372			  void *info, struct scatterlist *src,
 373			  struct scatterlist *dst, gfp_t flags)
 374{
 375	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 376	struct mlli_params *mlli_params = &req_ctx->mlli_params;
 
 377	struct device *dev = drvdata_to_dev(drvdata);
 378	struct buffer_array sg_data;
 379	u32 dummy = 0;
 380	int rc = 0;
 381	u32 mapped_nents = 0;
 382	int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
 383
 384	req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
 385	mlli_params->curr_pool = NULL;
 386	sg_data.num_of_buffers = 0;
 387
 388	/* Map IV buffer */
 389	if (ivsize) {
 390		dump_byte_array("iv", info, ivsize);
 391		req_ctx->gen_ctx.iv_dma_addr =
 392			dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
 
 
 
 393		if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
 394			dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 395				ivsize, info);
 396			return -ENOMEM;
 397		}
 398		dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 399			ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
 400	} else {
 401		req_ctx->gen_ctx.iv_dma_addr = 0;
 402	}
 403
 404	/* Map the src SGL */
 405	rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents,
 406		       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
 407	if (rc)
 
 408		goto cipher_exit;
 
 409	if (mapped_nents > 1)
 410		req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 411
 412	if (src == dst) {
 413		/* Handle inplace operation */
 414		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 415			req_ctx->out_nents = 0;
 416			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 417					nbytes, 0, true,
 418					&req_ctx->in_mlli_nents);
 419		}
 420	} else {
 421		/* Map the dst sg */
 422		rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE,
 423			       &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
 424			       &dummy, &mapped_nents);
 425		if (rc)
 426			goto cipher_exit;
 
 427		if (mapped_nents > 1)
 428			req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 429
 430		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 431			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 432					nbytes, 0, true,
 433					&req_ctx->in_mlli_nents);
 434			cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
 435					nbytes, 0, true,
 436					&req_ctx->out_mlli_nents);
 437		}
 438	}
 439
 440	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 441		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
 442		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
 443		if (rc)
 444			goto cipher_exit;
 445	}
 446
 447	dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
 448		cc_dma_buf_type(req_ctx->dma_buf_type));
 449
 450	return 0;
 451
 452cipher_exit:
 453	cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
 454	return rc;
 455}
 456
 457void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 458{
 459	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 460	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 
 461	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
 462	int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
 
 
 463
 464	if (areq_ctx->mac_buf_dma_addr) {
 465		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
 466				 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
 467	}
 468
 469	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
 470		if (areq_ctx->hkey_dma_addr) {
 471			dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
 472					 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
 473		}
 474
 475		if (areq_ctx->gcm_block_len_dma_addr) {
 476			dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
 477					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 478		}
 479
 480		if (areq_ctx->gcm_iv_inc1_dma_addr) {
 481			dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
 482					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 483		}
 484
 485		if (areq_ctx->gcm_iv_inc2_dma_addr) {
 486			dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
 487					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 488		}
 489	}
 490
 491	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 492		if (areq_ctx->ccm_iv0_dma_addr) {
 493			dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
 494					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 495		}
 496
 497		dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
 498	}
 499	if (areq_ctx->gen_ctx.iv_dma_addr) {
 500		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
 501				 hw_iv_size, DMA_BIDIRECTIONAL);
 502		kfree_sensitive(areq_ctx->gen_ctx.iv);
 503	}
 504
 505	/* Release pool */
 506	if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
 507	     areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
 508	    (areq_ctx->mlli_params.mlli_virt_addr)) {
 509		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
 510			&areq_ctx->mlli_params.mlli_dma_addr,
 511			areq_ctx->mlli_params.mlli_virt_addr);
 512		dma_pool_free(areq_ctx->mlli_params.curr_pool,
 513			      areq_ctx->mlli_params.mlli_virt_addr,
 514			      areq_ctx->mlli_params.mlli_dma_addr);
 515	}
 516
 517	dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
 518		sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
 519		areq_ctx->assoclen, req->cryptlen);
 520
 521	dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
 
 
 
 
 
 
 
 
 522	if (req->src != req->dst) {
 523		dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
 524			sg_virt(req->dst));
 525		dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
 
 
 
 526	}
 527	if (drvdata->coherent &&
 528	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
 529	    req->src == req->dst) {
 530		/* copy back mac from temporary location to deal with possible
 531		 * data memory overriding that caused by cache coherence
 532		 * problem.
 533		 */
 534		cc_copy_mac(dev, req, CC_SG_FROM_BUF);
 535	}
 536}
 537
 538static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
 539			   u32 last_entry_data_size)
 540{
 541	return ((sgl_nents > 1) && (last_entry_data_size < authsize));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 542}
 543
 544static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
 545			    struct aead_request *req,
 546			    struct buffer_array *sg_data,
 547			    bool is_last, bool do_chain)
 548{
 549	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 550	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 551	struct device *dev = drvdata_to_dev(drvdata);
 552	gfp_t flags = cc_gfp_flags(&req->base);
 553	int rc = 0;
 554
 555	if (!req->iv) {
 556		areq_ctx->gen_ctx.iv_dma_addr = 0;
 557		areq_ctx->gen_ctx.iv = NULL;
 558		goto chain_iv_exit;
 559	}
 560
 561	areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
 562	if (!areq_ctx->gen_ctx.iv)
 563		return -ENOMEM;
 564
 565	areq_ctx->gen_ctx.iv_dma_addr =
 566		dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
 567			       DMA_BIDIRECTIONAL);
 568	if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
 569		dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 570			hw_iv_size, req->iv);
 571		kfree_sensitive(areq_ctx->gen_ctx.iv);
 572		areq_ctx->gen_ctx.iv = NULL;
 573		rc = -ENOMEM;
 574		goto chain_iv_exit;
 575	}
 576
 577	dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 578		hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
 
 
 
 
 
 
 
 
 
 
 
 
 579
 580chain_iv_exit:
 581	return rc;
 582}
 583
 584static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
 585			       struct aead_request *req,
 586			       struct buffer_array *sg_data,
 587			       bool is_last, bool do_chain)
 588{
 589	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 590	int rc = 0;
 591	int mapped_nents = 0;
 
 
 
 
 592	struct device *dev = drvdata_to_dev(drvdata);
 593
 
 
 
 594	if (!sg_data) {
 595		rc = -EINVAL;
 596		goto chain_assoc_exit;
 597	}
 598
 599	if (areq_ctx->assoclen == 0) {
 600		areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
 601		areq_ctx->assoc.nents = 0;
 602		areq_ctx->assoc.mlli_nents = 0;
 603		dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
 604			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 605			areq_ctx->assoc.nents);
 606		goto chain_assoc_exit;
 607	}
 608
 609	mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen);
 610	if (mapped_nents < 0)
 611		return mapped_nents;
 612
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 613	if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 614		dev_err(dev, "Too many fragments. current %d max %d\n",
 615			mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 616		return -ENOMEM;
 617	}
 618	areq_ctx->assoc.nents = mapped_nents;
 619
 620	/* in CCM case we have additional entry for
 621	 * ccm header configurations
 622	 */
 623	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 624		if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 625			dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
 626				(areq_ctx->assoc.nents + 1),
 627				LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 628			rc = -ENOMEM;
 629			goto chain_assoc_exit;
 630		}
 631	}
 632
 633	if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
 634		areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
 635	else
 636		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 637
 638	if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 639		dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
 640			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 641			areq_ctx->assoc.nents);
 642		cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
 643				areq_ctx->assoclen, 0, is_last,
 644				&areq_ctx->assoc.mlli_nents);
 645		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 646	}
 647
 648chain_assoc_exit:
 649	return rc;
 650}
 651
 652static void cc_prepare_aead_data_dlli(struct aead_request *req,
 653				      u32 *src_last_bytes, u32 *dst_last_bytes)
 654{
 655	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 656	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 657	unsigned int authsize = areq_ctx->req_authsize;
 658	struct scatterlist *sg;
 659	ssize_t offset;
 660
 661	areq_ctx->is_icv_fragmented = false;
 662
 663	if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 664		sg = areq_ctx->src_sgl;
 665		offset = *src_last_bytes - authsize;
 
 
 
 
 
 
 
 
 666	} else {
 667		sg = areq_ctx->dst_sgl;
 668		offset = *dst_last_bytes - authsize;
 
 
 
 669	}
 670
 671	areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
 672	areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
 673}
 674
 675static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
 676				      struct aead_request *req,
 677				      struct buffer_array *sg_data,
 678				      u32 *src_last_bytes, u32 *dst_last_bytes,
 679				      bool is_last_table)
 680{
 681	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 682	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 683	unsigned int authsize = areq_ctx->req_authsize;
 
 684	struct device *dev = drvdata_to_dev(drvdata);
 685	struct scatterlist *sg;
 686
 687	if (req->src == req->dst) {
 688		/*INPLACE*/
 689		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 690				areq_ctx->src_sgl, areq_ctx->cryptlen,
 691				areq_ctx->src_offset, is_last_table,
 692				&areq_ctx->src.mlli_nents);
 693
 694		areq_ctx->is_icv_fragmented =
 695			cc_is_icv_frag(areq_ctx->src.nents, authsize,
 696				       *src_last_bytes);
 
 
 
 
 
 697
 698		if (areq_ctx->is_icv_fragmented) {
 699			/* Backup happens only when ICV is fragmented, ICV
 700			 * verification is made by CPU compare in order to
 701			 * simplify MAC verification upon request completion
 702			 */
 703			if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 704				/* In coherent platforms (e.g. ACP)
 705				 * already copying ICV for any
 706				 * INPLACE-DECRYPT operation, hence
 707				 * we must neglect this code.
 708				 */
 709				if (!drvdata->coherent)
 710					cc_copy_mac(dev, req, CC_SG_TO_BUF);
 711
 712				areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 713			} else {
 714				areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 715				areq_ctx->icv_dma_addr =
 716					areq_ctx->mac_buf_dma_addr;
 717			}
 718		} else { /* Contig. ICV */
 719			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 720			/*Should hanlde if the sg is not contig.*/
 721			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 722				(*src_last_bytes - authsize);
 723			areq_ctx->icv_virt_addr = sg_virt(sg) +
 724				(*src_last_bytes - authsize);
 725		}
 726
 727	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 728		/*NON-INPLACE and DECRYPT*/
 729		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 730				areq_ctx->src_sgl, areq_ctx->cryptlen,
 731				areq_ctx->src_offset, is_last_table,
 732				&areq_ctx->src.mlli_nents);
 733		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 734				areq_ctx->dst_sgl, areq_ctx->cryptlen,
 735				areq_ctx->dst_offset, is_last_table,
 736				&areq_ctx->dst.mlli_nents);
 737
 738		areq_ctx->is_icv_fragmented =
 739			cc_is_icv_frag(areq_ctx->src.nents, authsize,
 740				       *src_last_bytes);
 741		/* Backup happens only when ICV is fragmented, ICV
 
 
 
 
 742
 
 743		 * verification is made by CPU compare in order to simplify
 744		 * MAC verification upon request completion
 745		 */
 746		if (areq_ctx->is_icv_fragmented) {
 747			cc_copy_mac(dev, req, CC_SG_TO_BUF);
 748			areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 749
 750		} else { /* Contig. ICV */
 751			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 752			/*Should hanlde if the sg is not contig.*/
 753			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 754				(*src_last_bytes - authsize);
 755			areq_ctx->icv_virt_addr = sg_virt(sg) +
 756				(*src_last_bytes - authsize);
 757		}
 758
 759	} else {
 760		/*NON-INPLACE and ENCRYPT*/
 761		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 762				areq_ctx->dst_sgl, areq_ctx->cryptlen,
 763				areq_ctx->dst_offset, is_last_table,
 764				&areq_ctx->dst.mlli_nents);
 765		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 766				areq_ctx->src_sgl, areq_ctx->cryptlen,
 767				areq_ctx->src_offset, is_last_table,
 768				&areq_ctx->src.mlli_nents);
 769
 770		areq_ctx->is_icv_fragmented =
 771			cc_is_icv_frag(areq_ctx->dst.nents, authsize,
 772				       *dst_last_bytes);
 
 
 
 
 
 773
 774		if (!areq_ctx->is_icv_fragmented) {
 775			sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
 776			/* Contig. ICV */
 777			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 778				(*dst_last_bytes - authsize);
 779			areq_ctx->icv_virt_addr = sg_virt(sg) +
 780				(*dst_last_bytes - authsize);
 781		} else {
 782			areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
 783			areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 784		}
 785	}
 
 
 
 786}
 787
 788static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 789			      struct aead_request *req,
 790			      struct buffer_array *sg_data,
 791			      bool is_last_table, bool do_chain)
 792{
 793	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 794	struct device *dev = drvdata_to_dev(drvdata);
 795	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 796	unsigned int authsize = areq_ctx->req_authsize;
 797	unsigned int src_last_bytes = 0, dst_last_bytes = 0;
 798	int rc = 0;
 799	u32 src_mapped_nents = 0, dst_mapped_nents = 0;
 800	u32 offset = 0;
 801	/* non-inplace mode */
 802	unsigned int size_for_map = req->assoclen + req->cryptlen;
 
 803	u32 sg_index = 0;
 
 
 804	u32 size_to_skip = req->assoclen;
 805	struct scatterlist *sgl;
 
 
 806
 807	offset = size_to_skip;
 808
 809	if (!sg_data)
 810		return -EINVAL;
 811
 812	areq_ctx->src_sgl = req->src;
 813	areq_ctx->dst_sgl = req->dst;
 814
 
 
 
 815	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 816			authsize : 0;
 817	src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
 818					    &src_last_bytes);
 819	sg_index = areq_ctx->src_sgl->length;
 820	//check where the data starts
 821	while (src_mapped_nents && (sg_index <= size_to_skip)) {
 822		src_mapped_nents--;
 823		offset -= areq_ctx->src_sgl->length;
 824		sgl = sg_next(areq_ctx->src_sgl);
 825		if (!sgl)
 826			break;
 827		areq_ctx->src_sgl = sgl;
 
 
 828		sg_index += areq_ctx->src_sgl->length;
 
 829	}
 830	if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 831		dev_err(dev, "Too many fragments. current %d max %d\n",
 832			src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 833		return -ENOMEM;
 834	}
 835
 836	areq_ctx->src.nents = src_mapped_nents;
 837
 838	areq_ctx->src_offset = offset;
 839
 840	if (req->src != req->dst) {
 841		size_for_map = req->assoclen + req->cryptlen;
 
 
 
 
 842
 843		if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
 844			size_for_map += authsize;
 845		else
 846			size_for_map -= authsize;
 847
 848		rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE,
 849			       &areq_ctx->dst.mapped_nents,
 850			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
 851			       &dst_mapped_nents);
 852		if (rc)
 
 853			goto chain_data_exit;
 
 854	}
 855
 856	dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
 857					    &dst_last_bytes);
 858	sg_index = areq_ctx->dst_sgl->length;
 859	offset = size_to_skip;
 860
 861	//check where the data starts
 862	while (dst_mapped_nents && sg_index <= size_to_skip) {
 863		dst_mapped_nents--;
 864		offset -= areq_ctx->dst_sgl->length;
 865		sgl = sg_next(areq_ctx->dst_sgl);
 866		if (!sgl)
 867			break;
 868		areq_ctx->dst_sgl = sgl;
 
 
 869		sg_index += areq_ctx->dst_sgl->length;
 
 870	}
 871	if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 872		dev_err(dev, "Too many fragments. current %d max %d\n",
 873			dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 874		return -ENOMEM;
 875	}
 876	areq_ctx->dst.nents = dst_mapped_nents;
 877	areq_ctx->dst_offset = offset;
 878	if (src_mapped_nents > 1 ||
 879	    dst_mapped_nents  > 1 ||
 880	    do_chain) {
 881		areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
 882		cc_prepare_aead_data_mlli(drvdata, req, sg_data,
 883					  &src_last_bytes, &dst_last_bytes,
 884					  is_last_table);
 885	} else {
 886		areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
 887		cc_prepare_aead_data_dlli(req, &src_last_bytes,
 888					  &dst_last_bytes);
 889	}
 890
 891chain_data_exit:
 892	return rc;
 893}
 894
 895static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
 896				      struct aead_request *req)
 897{
 898	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 899	u32 curr_mlli_size = 0;
 900
 901	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 902		areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
 903		curr_mlli_size = areq_ctx->assoc.mlli_nents *
 904						LLI_ENTRY_BYTE_SIZE;
 905	}
 906
 907	if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
 908		/*Inplace case dst nents equal to src nents*/
 909		if (req->src == req->dst) {
 910			areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
 911			areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
 912								curr_mlli_size;
 913			areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
 914			if (!areq_ctx->is_single_pass)
 915				areq_ctx->assoc.mlli_nents +=
 916					areq_ctx->src.mlli_nents;
 917		} else {
 918			if (areq_ctx->gen_ctx.op_type ==
 919					DRV_CRYPTO_DIRECTION_DECRYPT) {
 920				areq_ctx->src.sram_addr =
 921						drvdata->mlli_sram_addr +
 922								curr_mlli_size;
 923				areq_ctx->dst.sram_addr =
 924						areq_ctx->src.sram_addr +
 925						areq_ctx->src.mlli_nents *
 926						LLI_ENTRY_BYTE_SIZE;
 927				if (!areq_ctx->is_single_pass)
 928					areq_ctx->assoc.mlli_nents +=
 929						areq_ctx->src.mlli_nents;
 930			} else {
 931				areq_ctx->dst.sram_addr =
 932						drvdata->mlli_sram_addr +
 933								curr_mlli_size;
 934				areq_ctx->src.sram_addr =
 935						areq_ctx->dst.sram_addr +
 936						areq_ctx->dst.mlli_nents *
 937						LLI_ENTRY_BYTE_SIZE;
 938				if (!areq_ctx->is_single_pass)
 939					areq_ctx->assoc.mlli_nents +=
 940						areq_ctx->dst.mlli_nents;
 941			}
 942		}
 943	}
 944}
 945
 946int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
 947{
 948	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
 949	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
 950	struct device *dev = drvdata_to_dev(drvdata);
 951	struct buffer_array sg_data;
 952	unsigned int authsize = areq_ctx->req_authsize;
 
 953	int rc = 0;
 
 
 954	dma_addr_t dma_addr;
 955	u32 mapped_nents = 0;
 956	u32 dummy = 0; /*used for the assoc data fragments */
 957	u32 size_to_map;
 958	gfp_t flags = cc_gfp_flags(&req->base);
 959
 960	mlli_params->curr_pool = NULL;
 961	sg_data.num_of_buffers = 0;
 962
 963	/* copy mac to a temporary location to deal with possible
 964	 * data memory overriding that caused by cache coherence problem.
 965	 */
 966	if (drvdata->coherent &&
 967	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
 968	    req->src == req->dst)
 969		cc_copy_mac(dev, req, CC_SG_TO_BUF);
 970
 971	/* cacluate the size for cipher remove ICV in decrypt*/
 972	areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
 973				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 974				req->cryptlen :
 975				(req->cryptlen - authsize);
 976
 977	dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
 978				  DMA_BIDIRECTIONAL);
 979	if (dma_mapping_error(dev, dma_addr)) {
 980		dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
 981			MAX_MAC_SIZE, areq_ctx->mac_buf);
 982		rc = -ENOMEM;
 983		goto aead_map_failure;
 984	}
 985	areq_ctx->mac_buf_dma_addr = dma_addr;
 986
 987	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 988		void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
 989
 990		dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
 991					  DMA_TO_DEVICE);
 992
 993		if (dma_mapping_error(dev, dma_addr)) {
 994			dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
 995				AES_BLOCK_SIZE, addr);
 996			areq_ctx->ccm_iv0_dma_addr = 0;
 997			rc = -ENOMEM;
 998			goto aead_map_failure;
 999		}
1000		areq_ctx->ccm_iv0_dma_addr = dma_addr;
1001
1002		rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1003					  &sg_data, areq_ctx->assoclen);
1004		if (rc)
1005			goto aead_map_failure;
 
1006	}
1007
1008	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1009		dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1010					  DMA_BIDIRECTIONAL);
1011		if (dma_mapping_error(dev, dma_addr)) {
1012			dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1013				AES_BLOCK_SIZE, areq_ctx->hkey);
1014			rc = -ENOMEM;
1015			goto aead_map_failure;
1016		}
1017		areq_ctx->hkey_dma_addr = dma_addr;
1018
1019		dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1020					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1021		if (dma_mapping_error(dev, dma_addr)) {
1022			dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1023				AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1024			rc = -ENOMEM;
1025			goto aead_map_failure;
1026		}
1027		areq_ctx->gcm_block_len_dma_addr = dma_addr;
1028
1029		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1030					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1031
1032		if (dma_mapping_error(dev, dma_addr)) {
1033			dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1034				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1035			areq_ctx->gcm_iv_inc1_dma_addr = 0;
1036			rc = -ENOMEM;
1037			goto aead_map_failure;
1038		}
1039		areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1040
1041		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1042					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1043
1044		if (dma_mapping_error(dev, dma_addr)) {
1045			dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1046				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1047			areq_ctx->gcm_iv_inc2_dma_addr = 0;
1048			rc = -ENOMEM;
1049			goto aead_map_failure;
1050		}
1051		areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1052	}
1053
1054	size_to_map = req->cryptlen + req->assoclen;
1055	/* If we do in-place encryption, we also need the auth tag */
1056	if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
1057	   (req->src == req->dst)) {
1058		size_to_map += authsize;
1059	}
1060
1061	rc = cc_map_sg(dev, req->src, size_to_map,
1062		       (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL),
1063		       &areq_ctx->src.mapped_nents,
 
1064		       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1065			LLI_MAX_NUM_OF_DATA_ENTRIES),
1066		       &dummy, &mapped_nents);
1067	if (rc)
 
1068		goto aead_map_failure;
 
1069
1070	if (areq_ctx->is_single_pass) {
1071		/*
1072		 * Create MLLI table for:
1073		 *   (1) Assoc. data
1074		 *   (2) Src/Dst SGLs
1075		 *   Note: IV is contg. buffer (not an SGL)
1076		 */
1077		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1078		if (rc)
1079			goto aead_map_failure;
1080		rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1081		if (rc)
1082			goto aead_map_failure;
1083		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1084		if (rc)
1085			goto aead_map_failure;
1086	} else { /* DOUBLE-PASS flow */
1087		/*
1088		 * Prepare MLLI table(s) in this order:
1089		 *
1090		 * If ENCRYPT/DECRYPT (inplace):
1091		 *   (1) MLLI table for assoc
1092		 *   (2) IV entry (chained right after end of assoc)
1093		 *   (3) MLLI for src/dst (inplace operation)
1094		 *
1095		 * If ENCRYPT (non-inplace)
1096		 *   (1) MLLI table for assoc
1097		 *   (2) IV entry (chained right after end of assoc)
1098		 *   (3) MLLI for dst
1099		 *   (4) MLLI for src
1100		 *
1101		 * If DECRYPT (non-inplace)
1102		 *   (1) MLLI table for assoc
1103		 *   (2) IV entry (chained right after end of assoc)
1104		 *   (3) MLLI for src
1105		 *   (4) MLLI for dst
1106		 */
1107		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1108		if (rc)
1109			goto aead_map_failure;
1110		rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1111		if (rc)
1112			goto aead_map_failure;
1113		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1114		if (rc)
1115			goto aead_map_failure;
1116	}
1117
1118	/* Mlli support -start building the MLLI according to the above
1119	 * results
1120	 */
1121	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1122	    areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1123		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1124		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1125		if (rc)
1126			goto aead_map_failure;
1127
1128		cc_update_aead_mlli_nents(drvdata, req);
1129		dev_dbg(dev, "assoc params mn %d\n",
1130			areq_ctx->assoc.mlli_nents);
1131		dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1132		dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1133	}
1134	return 0;
1135
1136aead_map_failure:
1137	cc_unmap_aead_request(dev, req);
1138	return rc;
1139}
1140
1141int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1142			      struct scatterlist *src, unsigned int nbytes,
1143			      bool do_update, gfp_t flags)
1144{
1145	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1146	struct device *dev = drvdata_to_dev(drvdata);
1147	u8 *curr_buff = cc_hash_buf(areq_ctx);
1148	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1149	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1150	struct buffer_array sg_data;
1151	int rc = 0;
1152	u32 dummy = 0;
1153	u32 mapped_nents = 0;
1154
1155	dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1156		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1157	/* Init the type of the dma buffer */
1158	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1159	mlli_params->curr_pool = NULL;
1160	sg_data.num_of_buffers = 0;
1161	areq_ctx->in_nents = 0;
1162
1163	if (nbytes == 0 && *curr_buff_cnt == 0) {
1164		/* nothing to do */
1165		return 0;
1166	}
1167
 
1168	/* map the previous buffer */
1169	if (*curr_buff_cnt) {
1170		rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1171				     &sg_data);
1172		if (rc)
1173			return rc;
1174	}
1175
1176	if (src && nbytes > 0 && do_update) {
1177		rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1178			       &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1179			       &dummy, &mapped_nents);
1180		if (rc)
1181			goto unmap_curr_buff;
 
1182		if (src && mapped_nents == 1 &&
1183		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1184			memcpy(areq_ctx->buff_sg, src,
1185			       sizeof(struct scatterlist));
1186			areq_ctx->buff_sg->length = nbytes;
1187			areq_ctx->curr_sg = areq_ctx->buff_sg;
1188			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1189		} else {
1190			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1191		}
1192	}
1193
1194	/*build mlli */
1195	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1196		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1197		/* add the src data to the sg_data */
1198		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1199				0, true, &areq_ctx->mlli_nents);
1200		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1201		if (rc)
1202			goto fail_unmap_din;
1203	}
1204	/* change the buffer index for the unmap function */
1205	areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1206	dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1207		cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1208	return 0;
1209
1210fail_unmap_din:
1211	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1212
1213unmap_curr_buff:
1214	if (*curr_buff_cnt)
1215		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1216
1217	return rc;
1218}
1219
1220int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1221			       struct scatterlist *src, unsigned int nbytes,
1222			       unsigned int block_size, gfp_t flags)
1223{
1224	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1225	struct device *dev = drvdata_to_dev(drvdata);
1226	u8 *curr_buff = cc_hash_buf(areq_ctx);
1227	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1228	u8 *next_buff = cc_next_buf(areq_ctx);
1229	u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1230	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1231	unsigned int update_data_len;
1232	u32 total_in_len = nbytes + *curr_buff_cnt;
1233	struct buffer_array sg_data;
 
1234	unsigned int swap_index = 0;
1235	int rc = 0;
1236	u32 dummy = 0;
1237	u32 mapped_nents = 0;
1238
1239	dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1240		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1241	/* Init the type of the dma buffer */
1242	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1243	mlli_params->curr_pool = NULL;
1244	areq_ctx->curr_sg = NULL;
1245	sg_data.num_of_buffers = 0;
1246	areq_ctx->in_nents = 0;
1247
1248	if (total_in_len < block_size) {
1249		dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1250			curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1251		areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
 
1252		sg_copy_to_buffer(src, areq_ctx->in_nents,
1253				  &curr_buff[*curr_buff_cnt], nbytes);
1254		*curr_buff_cnt += nbytes;
1255		return 1;
1256	}
1257
1258	/* Calculate the residue size*/
1259	*next_buff_cnt = total_in_len & (block_size - 1);
1260	/* update data len */
1261	update_data_len = total_in_len - *next_buff_cnt;
1262
1263	dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1264		*next_buff_cnt, update_data_len);
1265
1266	/* Copy the new residue to next buffer */
1267	if (*next_buff_cnt) {
1268		dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1269			next_buff, (update_data_len - *curr_buff_cnt),
1270			*next_buff_cnt);
1271		cc_copy_sg_portion(dev, next_buff, src,
1272				   (update_data_len - *curr_buff_cnt),
1273				   nbytes, CC_SG_TO_BUF);
1274		/* change the buffer index for next operation */
1275		swap_index = 1;
1276	}
1277
1278	if (*curr_buff_cnt) {
1279		rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1280				     &sg_data);
1281		if (rc)
1282			return rc;
1283		/* change the buffer index for next operation */
1284		swap_index = 1;
1285	}
1286
1287	if (update_data_len > *curr_buff_cnt) {
1288		rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1289			       DMA_TO_DEVICE, &areq_ctx->in_nents,
1290			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1291			       &mapped_nents);
1292		if (rc)
1293			goto unmap_curr_buff;
 
1294		if (mapped_nents == 1 &&
1295		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1296			/* only one entry in the SG and no previous data */
1297			memcpy(areq_ctx->buff_sg, src,
1298			       sizeof(struct scatterlist));
1299			areq_ctx->buff_sg->length = update_data_len;
1300			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1301			areq_ctx->curr_sg = areq_ctx->buff_sg;
1302		} else {
1303			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1304		}
1305	}
1306
1307	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1308		mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1309		/* add the src data to the sg_data */
1310		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1311				(update_data_len - *curr_buff_cnt), 0, true,
1312				&areq_ctx->mlli_nents);
1313		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1314		if (rc)
1315			goto fail_unmap_din;
1316	}
1317	areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1318
1319	return 0;
1320
1321fail_unmap_din:
1322	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1323
1324unmap_curr_buff:
1325	if (*curr_buff_cnt)
1326		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1327
1328	return rc;
1329}
1330
1331void cc_unmap_hash_request(struct device *dev, void *ctx,
1332			   struct scatterlist *src, bool do_revert)
1333{
1334	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1335	u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1336
1337	/*In case a pool was set, a table was
1338	 *allocated and should be released
1339	 */
1340	if (areq_ctx->mlli_params.curr_pool) {
1341		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1342			&areq_ctx->mlli_params.mlli_dma_addr,
1343			areq_ctx->mlli_params.mlli_virt_addr);
1344		dma_pool_free(areq_ctx->mlli_params.curr_pool,
1345			      areq_ctx->mlli_params.mlli_virt_addr,
1346			      areq_ctx->mlli_params.mlli_dma_addr);
1347	}
1348
1349	if (src && areq_ctx->in_nents) {
1350		dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1351			sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1352		dma_unmap_sg(dev, src,
1353			     areq_ctx->in_nents, DMA_TO_DEVICE);
1354	}
1355
1356	if (*prev_len) {
1357		dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1358			sg_virt(areq_ctx->buff_sg),
1359			&sg_dma_address(areq_ctx->buff_sg),
1360			sg_dma_len(areq_ctx->buff_sg));
1361		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1362		if (!do_revert) {
1363			/* clean the previous data length for update
1364			 * operation
1365			 */
1366			*prev_len = 0;
1367		} else {
1368			areq_ctx->buff_index ^= 1;
1369		}
1370	}
1371}
1372
1373int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1374{
 
1375	struct device *dev = drvdata_to_dev(drvdata);
1376
1377	drvdata->mlli_buffs_pool =
 
 
 
 
 
 
1378		dma_pool_create("dx_single_mlli_tables", dev,
1379				MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1380				LLI_ENTRY_BYTE_SIZE,
1381				MLLI_TABLE_MIN_ALIGNMENT, 0);
1382
1383	if (!drvdata->mlli_buffs_pool)
1384		return -ENOMEM;
1385
1386	return 0;
 
 
 
 
1387}
1388
1389int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1390{
1391	dma_pool_destroy(drvdata->mlli_buffs_pool);
 
 
 
 
 
 
1392	return 0;
1393}
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
   3
   4#include <crypto/internal/aead.h>
   5#include <crypto/authenc.h>
   6#include <crypto/scatterwalk.h>
   7#include <linux/dmapool.h>
   8#include <linux/dma-mapping.h>
   9
  10#include "cc_buffer_mgr.h"
  11#include "cc_lli_defs.h"
  12#include "cc_cipher.h"
  13#include "cc_hash.h"
  14#include "cc_aead.h"
  15
  16enum dma_buffer_type {
  17	DMA_NULL_TYPE = -1,
  18	DMA_SGL_TYPE = 1,
  19	DMA_BUFF_TYPE = 2,
  20};
  21
  22struct buff_mgr_handle {
  23	struct dma_pool *mlli_buffs_pool;
  24};
  25
  26union buffer_array_entry {
  27	struct scatterlist *sgl;
  28	dma_addr_t buffer_dma;
  29};
  30
  31struct buffer_array {
  32	unsigned int num_of_buffers;
  33	union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
  34	unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
  35	int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  36	int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
  37	enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
  38	bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
  39	u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  40};
  41
  42static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
  43{
  44	switch (type) {
  45	case CC_DMA_BUF_NULL:
  46		return "BUF_NULL";
  47	case CC_DMA_BUF_DLLI:
  48		return "BUF_DLLI";
  49	case CC_DMA_BUF_MLLI:
  50		return "BUF_MLLI";
  51	default:
  52		return "BUF_INVALID";
  53	}
  54}
  55
  56/**
  57 * cc_copy_mac() - Copy MAC to temporary location
  58 *
  59 * @dev: device object
  60 * @req: aead request object
  61 * @dir: [IN] copy from/to sgl
  62 */
  63static void cc_copy_mac(struct device *dev, struct aead_request *req,
  64			enum cc_sg_cpy_direct dir)
  65{
  66	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  67	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  68	u32 skip = req->assoclen + req->cryptlen;
  69
  70	if (areq_ctx->is_gcm4543)
  71		skip += crypto_aead_ivsize(tfm);
  72
  73	cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
  74			   (skip - areq_ctx->req_authsize), skip, dir);
  75}
  76
  77/**
  78 * cc_get_sgl_nents() - Get scatterlist number of entries.
  79 *
 
  80 * @sg_list: SG list
  81 * @nbytes: [IN] Total SGL data bytes.
  82 * @lbytes: [OUT] Returns the amount of bytes at the last entry
 
 
 
  83 */
  84static unsigned int cc_get_sgl_nents(struct device *dev,
  85				     struct scatterlist *sg_list,
  86				     unsigned int nbytes, u32 *lbytes,
  87				     bool *is_chained)
  88{
  89	unsigned int nents = 0;
  90
 
 
  91	while (nbytes && sg_list) {
  92		if (sg_list->length) {
  93			nents++;
  94			/* get the number of bytes in the last entry */
  95			*lbytes = nbytes;
  96			nbytes -= (sg_list->length > nbytes) ?
  97					nbytes : sg_list->length;
  98			sg_list = sg_next(sg_list);
  99		} else {
 100			sg_list = (struct scatterlist *)sg_page(sg_list);
 101			if (is_chained)
 102				*is_chained = true;
 103		}
 104	}
 
 105	dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
 106	return nents;
 107}
 108
 109/**
 110 * cc_zero_sgl() - Zero scatter scatter list data.
 111 *
 112 * @sgl:
 113 */
 114void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
 115{
 116	struct scatterlist *current_sg = sgl;
 117	int sg_index = 0;
 118
 119	while (sg_index <= data_len) {
 120		if (!current_sg) {
 121			/* reached the end of the sgl --> just return back */
 122			return;
 123		}
 124		memset(sg_virt(current_sg), 0, current_sg->length);
 125		sg_index += current_sg->length;
 126		current_sg = sg_next(current_sg);
 127	}
 128}
 129
 130/**
 131 * cc_copy_sg_portion() - Copy scatter list data,
 132 * from to_skip to end, to dest and vice versa
 133 *
 134 * @dest:
 135 * @sg:
 136 * @to_skip:
 137 * @end:
 138 * @direct:
 
 
 139 */
 140void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
 141			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
 142{
 143	u32 nents, lbytes;
 144
 145	nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
 146	sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
 147		       (direct == CC_SG_TO_BUF));
 148}
 149
 150static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
 151				  u32 buff_size, u32 *curr_nents,
 152				  u32 **mlli_entry_pp)
 153{
 154	u32 *mlli_entry_p = *mlli_entry_pp;
 155	u32 new_nents;
 156
 157	/* Verify there is no memory overflow*/
 158	new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
 159	if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
 
 
 160		return -ENOMEM;
 
 161
 162	/*handle buffer longer than 64 kbytes */
 163	while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
 164		cc_lli_set_addr(mlli_entry_p, buff_dma);
 165		cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
 166		dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 167			*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 168			mlli_entry_p[LLI_WORD1_OFFSET]);
 169		buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
 170		buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
 171		mlli_entry_p = mlli_entry_p + 2;
 172		(*curr_nents)++;
 173	}
 174	/*Last entry */
 175	cc_lli_set_addr(mlli_entry_p, buff_dma);
 176	cc_lli_set_size(mlli_entry_p, buff_size);
 177	dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 178		*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 179		mlli_entry_p[LLI_WORD1_OFFSET]);
 180	mlli_entry_p = mlli_entry_p + 2;
 181	*mlli_entry_pp = mlli_entry_p;
 182	(*curr_nents)++;
 183	return 0;
 184}
 185
 186static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
 187				u32 sgl_data_len, u32 sgl_offset,
 188				u32 *curr_nents, u32 **mlli_entry_pp)
 189{
 190	struct scatterlist *curr_sgl = sgl;
 191	u32 *mlli_entry_p = *mlli_entry_pp;
 192	s32 rc = 0;
 193
 194	for ( ; (curr_sgl && sgl_data_len);
 195	      curr_sgl = sg_next(curr_sgl)) {
 196		u32 entry_data_len =
 197			(sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
 198				sg_dma_len(curr_sgl) - sgl_offset :
 199				sgl_data_len;
 200		sgl_data_len -= entry_data_len;
 201		rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
 202					    sgl_offset, entry_data_len,
 203					    curr_nents, &mlli_entry_p);
 204		if (rc)
 205			return rc;
 206
 207		sgl_offset = 0;
 208	}
 209	*mlli_entry_pp = mlli_entry_p;
 210	return 0;
 211}
 212
 213static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
 214			    struct mlli_params *mlli_params, gfp_t flags)
 215{
 216	u32 *mlli_p;
 217	u32 total_nents = 0, prev_total_nents = 0;
 218	int rc = 0, i;
 219
 220	dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
 221
 222	/* Allocate memory from the pointed pool */
 223	mlli_params->mlli_virt_addr =
 224		dma_pool_alloc(mlli_params->curr_pool, flags,
 225			       &mlli_params->mlli_dma_addr);
 226	if (!mlli_params->mlli_virt_addr) {
 227		dev_err(dev, "dma_pool_alloc() failed\n");
 228		rc = -ENOMEM;
 229		goto build_mlli_exit;
 230	}
 231	/* Point to start of MLLI */
 232	mlli_p = (u32 *)mlli_params->mlli_virt_addr;
 233	/* go over all SG's and link it to one MLLI table */
 234	for (i = 0; i < sg_data->num_of_buffers; i++) {
 235		union buffer_array_entry *entry = &sg_data->entry[i];
 236		u32 tot_len = sg_data->total_data_len[i];
 237		u32 offset = sg_data->offset[i];
 238
 239		if (sg_data->type[i] == DMA_SGL_TYPE)
 240			rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
 241						  offset, &total_nents,
 242						  &mlli_p);
 243		else /*DMA_BUFF_TYPE*/
 244			rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
 245						    tot_len, &total_nents,
 246						    &mlli_p);
 247		if (rc)
 248			return rc;
 249
 250		/* set last bit in the current table */
 251		if (sg_data->mlli_nents[i]) {
 252			/*Calculate the current MLLI table length for the
 253			 *length field in the descriptor
 254			 */
 255			*sg_data->mlli_nents[i] +=
 256				(total_nents - prev_total_nents);
 257			prev_total_nents = total_nents;
 258		}
 259	}
 260
 261	/* Set MLLI size for the bypass operation */
 262	mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
 263
 264	dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
 265		mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
 266		mlli_params->mlli_len);
 267
 268build_mlli_exit:
 269	return rc;
 270}
 271
 272static void cc_add_buffer_entry(struct device *dev,
 273				struct buffer_array *sgl_data,
 274				dma_addr_t buffer_dma, unsigned int buffer_len,
 275				bool is_last_entry, u32 *mlli_nents)
 276{
 277	unsigned int index = sgl_data->num_of_buffers;
 278
 279	dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
 280		index, &buffer_dma, buffer_len, is_last_entry);
 281	sgl_data->nents[index] = 1;
 282	sgl_data->entry[index].buffer_dma = buffer_dma;
 283	sgl_data->offset[index] = 0;
 284	sgl_data->total_data_len[index] = buffer_len;
 285	sgl_data->type[index] = DMA_BUFF_TYPE;
 286	sgl_data->is_last[index] = is_last_entry;
 287	sgl_data->mlli_nents[index] = mlli_nents;
 288	if (sgl_data->mlli_nents[index])
 289		*sgl_data->mlli_nents[index] = 0;
 290	sgl_data->num_of_buffers++;
 291}
 292
 293static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
 294			    unsigned int nents, struct scatterlist *sgl,
 295			    unsigned int data_len, unsigned int data_offset,
 296			    bool is_last_table, u32 *mlli_nents)
 297{
 298	unsigned int index = sgl_data->num_of_buffers;
 299
 300	dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
 301		index, nents, sgl, data_len, is_last_table);
 302	sgl_data->nents[index] = nents;
 303	sgl_data->entry[index].sgl = sgl;
 304	sgl_data->offset[index] = data_offset;
 305	sgl_data->total_data_len[index] = data_len;
 306	sgl_data->type[index] = DMA_SGL_TYPE;
 307	sgl_data->is_last[index] = is_last_table;
 308	sgl_data->mlli_nents[index] = mlli_nents;
 309	if (sgl_data->mlli_nents[index])
 310		*sgl_data->mlli_nents[index] = 0;
 311	sgl_data->num_of_buffers++;
 312}
 313
 314static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
 315			 enum dma_data_direction direction)
 
 316{
 317	u32 i, j;
 318	struct scatterlist *l_sg = sg;
 319
 320	for (i = 0; i < nents; i++) {
 321		if (!l_sg)
 322			break;
 323		if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
 324			dev_err(dev, "dma_map_page() sg buffer failed\n");
 325			goto err;
 326		}
 327		l_sg = sg_next(l_sg);
 328	}
 329	return nents;
 330
 331err:
 332	/* Restore mapped parts */
 333	for (j = 0; j < i; j++) {
 334		if (!sg)
 335			break;
 336		dma_unmap_sg(dev, sg, 1, direction);
 337		sg = sg_next(sg);
 338	}
 339	return 0;
 340}
 341
 342static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 343		     unsigned int nbytes, int direction, u32 *nents,
 344		     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
 345{
 346	bool is_chained = false;
 
 347
 348	if (sg_is_last(sg)) {
 349		/* One entry only case -set to DLLI */
 350		if (dma_map_sg(dev, sg, 1, direction) != 1) {
 351			dev_err(dev, "dma_map_sg() single buffer failed\n");
 352			return -ENOMEM;
 353		}
 354		dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 355			&sg_dma_address(sg), sg_page(sg), sg_virt(sg),
 356			sg->offset, sg->length);
 357		*lbytes = nbytes;
 358		*nents = 1;
 359		*mapped_nents = 1;
 360	} else {  /*sg_is_last*/
 361		*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
 362					  &is_chained);
 363		if (*nents > max_sg_nents) {
 364			*nents = 0;
 365			dev_err(dev, "Too many fragments. current %d max %d\n",
 366				*nents, max_sg_nents);
 367			return -ENOMEM;
 368		}
 369		if (!is_chained) {
 370			/* In case of mmu the number of mapped nents might
 371			 * be changed from the original sgl nents
 372			 */
 373			*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
 374			if (*mapped_nents == 0) {
 375				*nents = 0;
 376				dev_err(dev, "dma_map_sg() sg buffer failed\n");
 377				return -ENOMEM;
 378			}
 379		} else {
 380			/*In this case the driver maps entry by entry so it
 381			 * must have the same nents before and after map
 382			 */
 383			*mapped_nents = cc_dma_map_sg(dev, sg, *nents,
 384						      direction);
 385			if (*mapped_nents != *nents) {
 386				*nents = *mapped_nents;
 387				dev_err(dev, "dma_map_sg() sg buffer failed\n");
 388				return -ENOMEM;
 389			}
 390		}
 391	}
 392
 393	return 0;
 394}
 395
 396static int
 397cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
 398		     u8 *config_data, struct buffer_array *sg_data,
 399		     unsigned int assoclen)
 400{
 401	dev_dbg(dev, " handle additional data config set to DLLI\n");
 402	/* create sg for the current buffer */
 403	sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
 404		    AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
 405	if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
 406		dev_err(dev, "dma_map_sg() config buffer failed\n");
 407		return -ENOMEM;
 408	}
 409	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 410		&sg_dma_address(&areq_ctx->ccm_adata_sg),
 411		sg_page(&areq_ctx->ccm_adata_sg),
 412		sg_virt(&areq_ctx->ccm_adata_sg),
 413		areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
 414	/* prepare for case of MLLI */
 415	if (assoclen > 0) {
 416		cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
 417				(AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
 418				0, false, NULL);
 419	}
 420	return 0;
 421}
 422
 423static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
 424			   u8 *curr_buff, u32 curr_buff_cnt,
 425			   struct buffer_array *sg_data)
 426{
 427	dev_dbg(dev, " handle curr buff %x set to   DLLI\n", curr_buff_cnt);
 428	/* create sg for the current buffer */
 429	sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
 430	if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
 431		dev_err(dev, "dma_map_sg() src buffer failed\n");
 432		return -ENOMEM;
 433	}
 434	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 435		&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
 436		sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
 437		areq_ctx->buff_sg->length);
 438	areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
 439	areq_ctx->curr_sg = areq_ctx->buff_sg;
 440	areq_ctx->in_nents = 0;
 441	/* prepare for case of MLLI */
 442	cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
 443			false, NULL);
 444	return 0;
 445}
 446
 447void cc_unmap_cipher_request(struct device *dev, void *ctx,
 448				unsigned int ivsize, struct scatterlist *src,
 449				struct scatterlist *dst)
 450{
 451	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 452
 453	if (req_ctx->gen_ctx.iv_dma_addr) {
 454		dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
 455			&req_ctx->gen_ctx.iv_dma_addr, ivsize);
 456		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
 457				 ivsize,
 458				 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
 459				 DMA_TO_DEVICE);
 460	}
 461	/* Release pool */
 462	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
 463	    req_ctx->mlli_params.mlli_virt_addr) {
 464		dma_pool_free(req_ctx->mlli_params.curr_pool,
 465			      req_ctx->mlli_params.mlli_virt_addr,
 466			      req_ctx->mlli_params.mlli_dma_addr);
 467	}
 468
 469	dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
 470	dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
 471
 472	if (src != dst) {
 473		dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
 
 474		dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
 
 
 
 
 475	}
 476}
 477
 478int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
 479			  unsigned int ivsize, unsigned int nbytes,
 480			  void *info, struct scatterlist *src,
 481			  struct scatterlist *dst, gfp_t flags)
 482{
 483	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 484	struct mlli_params *mlli_params = &req_ctx->mlli_params;
 485	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
 486	struct device *dev = drvdata_to_dev(drvdata);
 487	struct buffer_array sg_data;
 488	u32 dummy = 0;
 489	int rc = 0;
 490	u32 mapped_nents = 0;
 
 491
 492	req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
 493	mlli_params->curr_pool = NULL;
 494	sg_data.num_of_buffers = 0;
 495
 496	/* Map IV buffer */
 497	if (ivsize) {
 498		dump_byte_array("iv", (u8 *)info, ivsize);
 499		req_ctx->gen_ctx.iv_dma_addr =
 500			dma_map_single(dev, (void *)info,
 501				       ivsize,
 502				       req_ctx->is_giv ? DMA_BIDIRECTIONAL :
 503				       DMA_TO_DEVICE);
 504		if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
 505			dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 506				ivsize, info);
 507			return -ENOMEM;
 508		}
 509		dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 510			ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
 511	} else {
 512		req_ctx->gen_ctx.iv_dma_addr = 0;
 513	}
 514
 515	/* Map the src SGL */
 516	rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
 517		       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
 518	if (rc) {
 519		rc = -ENOMEM;
 520		goto cipher_exit;
 521	}
 522	if (mapped_nents > 1)
 523		req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 524
 525	if (src == dst) {
 526		/* Handle inplace operation */
 527		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 528			req_ctx->out_nents = 0;
 529			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 530					nbytes, 0, true,
 531					&req_ctx->in_mlli_nents);
 532		}
 533	} else {
 534		/* Map the dst sg */
 535		if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
 536			      &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
 537			      &dummy, &mapped_nents)) {
 538			rc = -ENOMEM;
 539			goto cipher_exit;
 540		}
 541		if (mapped_nents > 1)
 542			req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 543
 544		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 545			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 546					nbytes, 0, true,
 547					&req_ctx->in_mlli_nents);
 548			cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
 549					nbytes, 0, true,
 550					&req_ctx->out_mlli_nents);
 551		}
 552	}
 553
 554	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 555		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
 556		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
 557		if (rc)
 558			goto cipher_exit;
 559	}
 560
 561	dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
 562		cc_dma_buf_type(req_ctx->dma_buf_type));
 563
 564	return 0;
 565
 566cipher_exit:
 567	cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
 568	return rc;
 569}
 570
 571void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 572{
 573	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 574	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 575	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 576	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
 577	u32 dummy;
 578	bool chained;
 579	u32 size_to_unmap = 0;
 580
 581	if (areq_ctx->mac_buf_dma_addr) {
 582		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
 583				 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
 584	}
 585
 586	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
 587		if (areq_ctx->hkey_dma_addr) {
 588			dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
 589					 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
 590		}
 591
 592		if (areq_ctx->gcm_block_len_dma_addr) {
 593			dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
 594					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 595		}
 596
 597		if (areq_ctx->gcm_iv_inc1_dma_addr) {
 598			dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
 599					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 600		}
 601
 602		if (areq_ctx->gcm_iv_inc2_dma_addr) {
 603			dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
 604					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 605		}
 606	}
 607
 608	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 609		if (areq_ctx->ccm_iv0_dma_addr) {
 610			dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
 611					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 612		}
 613
 614		dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
 615	}
 616	if (areq_ctx->gen_ctx.iv_dma_addr) {
 617		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
 618				 hw_iv_size, DMA_BIDIRECTIONAL);
 
 619	}
 620
 621	/*In case a pool was set, a table was
 622	 *allocated and should be released
 623	 */
 624	if (areq_ctx->mlli_params.curr_pool) {
 625		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
 626			&areq_ctx->mlli_params.mlli_dma_addr,
 627			areq_ctx->mlli_params.mlli_virt_addr);
 628		dma_pool_free(areq_ctx->mlli_params.curr_pool,
 629			      areq_ctx->mlli_params.mlli_virt_addr,
 630			      areq_ctx->mlli_params.mlli_dma_addr);
 631	}
 632
 633	dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
 634		sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
 635		req->assoclen, req->cryptlen);
 636	size_to_unmap = req->assoclen + req->cryptlen;
 637	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
 638		size_to_unmap += areq_ctx->req_authsize;
 639	if (areq_ctx->is_gcm4543)
 640		size_to_unmap += crypto_aead_ivsize(tfm);
 641
 642	dma_unmap_sg(dev, req->src,
 643		     cc_get_sgl_nents(dev, req->src, size_to_unmap,
 644				      &dummy, &chained),
 645		     DMA_BIDIRECTIONAL);
 646	if (req->src != req->dst) {
 647		dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
 648			sg_virt(req->dst));
 649		dma_unmap_sg(dev, req->dst,
 650			     cc_get_sgl_nents(dev, req->dst, size_to_unmap,
 651					      &dummy, &chained),
 652			     DMA_BIDIRECTIONAL);
 653	}
 654	if (drvdata->coherent &&
 655	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
 656	    req->src == req->dst) {
 657		/* copy back mac from temporary location to deal with possible
 658		 * data memory overriding that caused by cache coherence
 659		 * problem.
 660		 */
 661		cc_copy_mac(dev, req, CC_SG_FROM_BUF);
 662	}
 663}
 664
 665static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
 666				 unsigned int sgl_nents, unsigned int authsize,
 667				 u32 last_entry_data_size,
 668				 bool *is_icv_fragmented)
 669{
 670	unsigned int icv_max_size = 0;
 671	unsigned int icv_required_size = authsize > last_entry_data_size ?
 672					(authsize - last_entry_data_size) :
 673					authsize;
 674	unsigned int nents;
 675	unsigned int i;
 676
 677	if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
 678		*is_icv_fragmented = false;
 679		return 0;
 680	}
 681
 682	for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
 683		if (!sgl)
 684			break;
 685		sgl = sg_next(sgl);
 686	}
 687
 688	if (sgl)
 689		icv_max_size = sgl->length;
 690
 691	if (last_entry_data_size > authsize) {
 692		/* ICV attached to data in last entry (not fragmented!) */
 693		nents = 0;
 694		*is_icv_fragmented = false;
 695	} else if (last_entry_data_size == authsize) {
 696		/* ICV placed in whole last entry (not fragmented!) */
 697		nents = 1;
 698		*is_icv_fragmented = false;
 699	} else if (icv_max_size > icv_required_size) {
 700		nents = 1;
 701		*is_icv_fragmented = true;
 702	} else if (icv_max_size == icv_required_size) {
 703		nents = 2;
 704		*is_icv_fragmented = true;
 705	} else {
 706		dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
 707			MAX_ICV_NENTS_SUPPORTED);
 708		nents = -1; /*unsupported*/
 709	}
 710	dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
 711		(*is_icv_fragmented ? "true" : "false"), nents);
 712
 713	return nents;
 714}
 715
 716static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
 717			    struct aead_request *req,
 718			    struct buffer_array *sg_data,
 719			    bool is_last, bool do_chain)
 720{
 721	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 722	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 723	struct device *dev = drvdata_to_dev(drvdata);
 
 724	int rc = 0;
 725
 726	if (!req->iv) {
 727		areq_ctx->gen_ctx.iv_dma_addr = 0;
 
 728		goto chain_iv_exit;
 729	}
 730
 731	areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
 732						       hw_iv_size,
 733						       DMA_BIDIRECTIONAL);
 
 
 
 
 734	if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
 735		dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 736			hw_iv_size, req->iv);
 
 
 737		rc = -ENOMEM;
 738		goto chain_iv_exit;
 739	}
 740
 741	dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 742		hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
 743	// TODO: what about CTR?? ask Ron
 744	if (do_chain && areq_ctx->plaintext_authenticate_only) {
 745		struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 746		unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
 747		unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
 748		/* Chain to given list */
 749		cc_add_buffer_entry(dev, sg_data,
 750				    (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
 751				    iv_size_to_authenc, is_last,
 752				    &areq_ctx->assoc.mlli_nents);
 753		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 754	}
 755
 756chain_iv_exit:
 757	return rc;
 758}
 759
 760static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
 761			       struct aead_request *req,
 762			       struct buffer_array *sg_data,
 763			       bool is_last, bool do_chain)
 764{
 765	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 766	int rc = 0;
 767	u32 mapped_nents = 0;
 768	struct scatterlist *current_sg = req->src;
 769	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 770	unsigned int sg_index = 0;
 771	u32 size_of_assoc = req->assoclen;
 772	struct device *dev = drvdata_to_dev(drvdata);
 773
 774	if (areq_ctx->is_gcm4543)
 775		size_of_assoc += crypto_aead_ivsize(tfm);
 776
 777	if (!sg_data) {
 778		rc = -EINVAL;
 779		goto chain_assoc_exit;
 780	}
 781
 782	if (req->assoclen == 0) {
 783		areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
 784		areq_ctx->assoc.nents = 0;
 785		areq_ctx->assoc.mlli_nents = 0;
 786		dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
 787			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 788			areq_ctx->assoc.nents);
 789		goto chain_assoc_exit;
 790	}
 791
 792	//iterate over the sgl to see how many entries are for associated data
 793	//it is assumed that if we reach here , the sgl is already mapped
 794	sg_index = current_sg->length;
 795	//the first entry in the scatter list contains all the associated data
 796	if (sg_index > size_of_assoc) {
 797		mapped_nents++;
 798	} else {
 799		while (sg_index <= size_of_assoc) {
 800			current_sg = sg_next(current_sg);
 801			/* if have reached the end of the sgl, then this is
 802			 * unexpected
 803			 */
 804			if (!current_sg) {
 805				dev_err(dev, "reached end of sg list. unexpected\n");
 806				return -EINVAL;
 807			}
 808			sg_index += current_sg->length;
 809			mapped_nents++;
 810		}
 811	}
 812	if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 813		dev_err(dev, "Too many fragments. current %d max %d\n",
 814			mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 815		return -ENOMEM;
 816	}
 817	areq_ctx->assoc.nents = mapped_nents;
 818
 819	/* in CCM case we have additional entry for
 820	 * ccm header configurations
 821	 */
 822	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 823		if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 824			dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
 825				(areq_ctx->assoc.nents + 1),
 826				LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 827			rc = -ENOMEM;
 828			goto chain_assoc_exit;
 829		}
 830	}
 831
 832	if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
 833		areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
 834	else
 835		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 836
 837	if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 838		dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
 839			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 840			areq_ctx->assoc.nents);
 841		cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
 842				req->assoclen, 0, is_last,
 843				&areq_ctx->assoc.mlli_nents);
 844		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 845	}
 846
 847chain_assoc_exit:
 848	return rc;
 849}
 850
 851static void cc_prepare_aead_data_dlli(struct aead_request *req,
 852				      u32 *src_last_bytes, u32 *dst_last_bytes)
 853{
 854	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 855	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 856	unsigned int authsize = areq_ctx->req_authsize;
 
 
 857
 858	areq_ctx->is_icv_fragmented = false;
 859	if (req->src == req->dst) {
 860		/*INPLACE*/
 861		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
 862			(*src_last_bytes - authsize);
 863		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
 864			(*src_last_bytes - authsize);
 865	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 866		/*NON-INPLACE and DECRYPT*/
 867		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
 868			(*src_last_bytes - authsize);
 869		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
 870			(*src_last_bytes - authsize);
 871	} else {
 872		/*NON-INPLACE and ENCRYPT*/
 873		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
 874			(*dst_last_bytes - authsize);
 875		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
 876			(*dst_last_bytes - authsize);
 877	}
 
 
 
 878}
 879
 880static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
 881				     struct aead_request *req,
 882				     struct buffer_array *sg_data,
 883				     u32 *src_last_bytes, u32 *dst_last_bytes,
 884				     bool is_last_table)
 885{
 886	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 887	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 888	unsigned int authsize = areq_ctx->req_authsize;
 889	int rc = 0, icv_nents;
 890	struct device *dev = drvdata_to_dev(drvdata);
 891	struct scatterlist *sg;
 892
 893	if (req->src == req->dst) {
 894		/*INPLACE*/
 895		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 896				areq_ctx->src_sgl, areq_ctx->cryptlen,
 897				areq_ctx->src_offset, is_last_table,
 898				&areq_ctx->src.mlli_nents);
 899
 900		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
 901						  areq_ctx->src.nents,
 902						  authsize, *src_last_bytes,
 903						  &areq_ctx->is_icv_fragmented);
 904		if (icv_nents < 0) {
 905			rc = -ENOTSUPP;
 906			goto prepare_data_mlli_exit;
 907		}
 908
 909		if (areq_ctx->is_icv_fragmented) {
 910			/* Backup happens only when ICV is fragmented, ICV
 911			 * verification is made by CPU compare in order to
 912			 * simplify MAC verification upon request completion
 913			 */
 914			if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 915				/* In coherent platforms (e.g. ACP)
 916				 * already copying ICV for any
 917				 * INPLACE-DECRYPT operation, hence
 918				 * we must neglect this code.
 919				 */
 920				if (!drvdata->coherent)
 921					cc_copy_mac(dev, req, CC_SG_TO_BUF);
 922
 923				areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 924			} else {
 925				areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 926				areq_ctx->icv_dma_addr =
 927					areq_ctx->mac_buf_dma_addr;
 928			}
 929		} else { /* Contig. ICV */
 930			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 931			/*Should hanlde if the sg is not contig.*/
 932			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 933				(*src_last_bytes - authsize);
 934			areq_ctx->icv_virt_addr = sg_virt(sg) +
 935				(*src_last_bytes - authsize);
 936		}
 937
 938	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 939		/*NON-INPLACE and DECRYPT*/
 940		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 941				areq_ctx->src_sgl, areq_ctx->cryptlen,
 942				areq_ctx->src_offset, is_last_table,
 943				&areq_ctx->src.mlli_nents);
 944		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 945				areq_ctx->dst_sgl, areq_ctx->cryptlen,
 946				areq_ctx->dst_offset, is_last_table,
 947				&areq_ctx->dst.mlli_nents);
 948
 949		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
 950						  areq_ctx->src.nents,
 951						  authsize, *src_last_bytes,
 952						  &areq_ctx->is_icv_fragmented);
 953		if (icv_nents < 0) {
 954			rc = -ENOTSUPP;
 955			goto prepare_data_mlli_exit;
 956		}
 957
 958		/* Backup happens only when ICV is fragmented, ICV
 959		 * verification is made by CPU compare in order to simplify
 960		 * MAC verification upon request completion
 961		 */
 962		if (areq_ctx->is_icv_fragmented) {
 963			cc_copy_mac(dev, req, CC_SG_TO_BUF);
 964			areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 965
 966		} else { /* Contig. ICV */
 967			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 968			/*Should hanlde if the sg is not contig.*/
 969			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 970				(*src_last_bytes - authsize);
 971			areq_ctx->icv_virt_addr = sg_virt(sg) +
 972				(*src_last_bytes - authsize);
 973		}
 974
 975	} else {
 976		/*NON-INPLACE and ENCRYPT*/
 977		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 978				areq_ctx->dst_sgl, areq_ctx->cryptlen,
 979				areq_ctx->dst_offset, is_last_table,
 980				&areq_ctx->dst.mlli_nents);
 981		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 982				areq_ctx->src_sgl, areq_ctx->cryptlen,
 983				areq_ctx->src_offset, is_last_table,
 984				&areq_ctx->src.mlli_nents);
 985
 986		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
 987						  areq_ctx->dst.nents,
 988						  authsize, *dst_last_bytes,
 989						  &areq_ctx->is_icv_fragmented);
 990		if (icv_nents < 0) {
 991			rc = -ENOTSUPP;
 992			goto prepare_data_mlli_exit;
 993		}
 994
 995		if (!areq_ctx->is_icv_fragmented) {
 996			sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
 997			/* Contig. ICV */
 998			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 999				(*dst_last_bytes - authsize);
1000			areq_ctx->icv_virt_addr = sg_virt(sg) +
1001				(*dst_last_bytes - authsize);
1002		} else {
1003			areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1004			areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1005		}
1006	}
1007
1008prepare_data_mlli_exit:
1009	return rc;
1010}
1011
1012static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1013			      struct aead_request *req,
1014			      struct buffer_array *sg_data,
1015			      bool is_last_table, bool do_chain)
1016{
1017	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1018	struct device *dev = drvdata_to_dev(drvdata);
1019	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
1020	unsigned int authsize = areq_ctx->req_authsize;
1021	unsigned int src_last_bytes = 0, dst_last_bytes = 0;
1022	int rc = 0;
1023	u32 src_mapped_nents = 0, dst_mapped_nents = 0;
1024	u32 offset = 0;
1025	/* non-inplace mode */
1026	unsigned int size_for_map = req->assoclen + req->cryptlen;
1027	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1028	u32 sg_index = 0;
1029	bool chained = false;
1030	bool is_gcm4543 = areq_ctx->is_gcm4543;
1031	u32 size_to_skip = req->assoclen;
1032
1033	if (is_gcm4543)
1034		size_to_skip += crypto_aead_ivsize(tfm);
1035
1036	offset = size_to_skip;
1037
1038	if (!sg_data)
1039		return -EINVAL;
1040
1041	areq_ctx->src_sgl = req->src;
1042	areq_ctx->dst_sgl = req->dst;
1043
1044	if (is_gcm4543)
1045		size_for_map += crypto_aead_ivsize(tfm);
1046
1047	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1048			authsize : 0;
1049	src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
1050					    &src_last_bytes, &chained);
1051	sg_index = areq_ctx->src_sgl->length;
1052	//check where the data starts
1053	while (sg_index <= size_to_skip) {
 
1054		offset -= areq_ctx->src_sgl->length;
1055		areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
1056		//if have reached the end of the sgl, then this is unexpected
1057		if (!areq_ctx->src_sgl) {
1058			dev_err(dev, "reached end of sg list. unexpected\n");
1059			return -EINVAL;
1060		}
1061		sg_index += areq_ctx->src_sgl->length;
1062		src_mapped_nents--;
1063	}
1064	if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
1065		dev_err(dev, "Too many fragments. current %d max %d\n",
1066			src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1067		return -ENOMEM;
1068	}
1069
1070	areq_ctx->src.nents = src_mapped_nents;
1071
1072	areq_ctx->src_offset = offset;
1073
1074	if (req->src != req->dst) {
1075		size_for_map = req->assoclen + req->cryptlen;
1076		size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1077				authsize : 0;
1078		if (is_gcm4543)
1079			size_for_map += crypto_aead_ivsize(tfm);
1080
1081		rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
1082			       &areq_ctx->dst.nents,
 
 
 
 
 
1083			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
1084			       &dst_mapped_nents);
1085		if (rc) {
1086			rc = -ENOMEM;
1087			goto chain_data_exit;
1088		}
1089	}
1090
1091	dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
1092					    &dst_last_bytes, &chained);
1093	sg_index = areq_ctx->dst_sgl->length;
1094	offset = size_to_skip;
1095
1096	//check where the data starts
1097	while (sg_index <= size_to_skip) {
 
1098		offset -= areq_ctx->dst_sgl->length;
1099		areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
1100		//if have reached the end of the sgl, then this is unexpected
1101		if (!areq_ctx->dst_sgl) {
1102			dev_err(dev, "reached end of sg list. unexpected\n");
1103			return -EINVAL;
1104		}
1105		sg_index += areq_ctx->dst_sgl->length;
1106		dst_mapped_nents--;
1107	}
1108	if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
1109		dev_err(dev, "Too many fragments. current %d max %d\n",
1110			dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1111		return -ENOMEM;
1112	}
1113	areq_ctx->dst.nents = dst_mapped_nents;
1114	areq_ctx->dst_offset = offset;
1115	if (src_mapped_nents > 1 ||
1116	    dst_mapped_nents  > 1 ||
1117	    do_chain) {
1118		areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
1119		rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
1120					       &src_last_bytes,
1121					       &dst_last_bytes, is_last_table);
1122	} else {
1123		areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
1124		cc_prepare_aead_data_dlli(req, &src_last_bytes,
1125					  &dst_last_bytes);
1126	}
1127
1128chain_data_exit:
1129	return rc;
1130}
1131
1132static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
1133				      struct aead_request *req)
1134{
1135	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1136	u32 curr_mlli_size = 0;
1137
1138	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
1139		areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
1140		curr_mlli_size = areq_ctx->assoc.mlli_nents *
1141						LLI_ENTRY_BYTE_SIZE;
1142	}
1143
1144	if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1145		/*Inplace case dst nents equal to src nents*/
1146		if (req->src == req->dst) {
1147			areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
1148			areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
1149								curr_mlli_size;
1150			areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
1151			if (!areq_ctx->is_single_pass)
1152				areq_ctx->assoc.mlli_nents +=
1153					areq_ctx->src.mlli_nents;
1154		} else {
1155			if (areq_ctx->gen_ctx.op_type ==
1156					DRV_CRYPTO_DIRECTION_DECRYPT) {
1157				areq_ctx->src.sram_addr =
1158						drvdata->mlli_sram_addr +
1159								curr_mlli_size;
1160				areq_ctx->dst.sram_addr =
1161						areq_ctx->src.sram_addr +
1162						areq_ctx->src.mlli_nents *
1163						LLI_ENTRY_BYTE_SIZE;
1164				if (!areq_ctx->is_single_pass)
1165					areq_ctx->assoc.mlli_nents +=
1166						areq_ctx->src.mlli_nents;
1167			} else {
1168				areq_ctx->dst.sram_addr =
1169						drvdata->mlli_sram_addr +
1170								curr_mlli_size;
1171				areq_ctx->src.sram_addr =
1172						areq_ctx->dst.sram_addr +
1173						areq_ctx->dst.mlli_nents *
1174						LLI_ENTRY_BYTE_SIZE;
1175				if (!areq_ctx->is_single_pass)
1176					areq_ctx->assoc.mlli_nents +=
1177						areq_ctx->dst.mlli_nents;
1178			}
1179		}
1180	}
1181}
1182
1183int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
1184{
1185	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1186	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1187	struct device *dev = drvdata_to_dev(drvdata);
1188	struct buffer_array sg_data;
1189	unsigned int authsize = areq_ctx->req_authsize;
1190	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1191	int rc = 0;
1192	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1193	bool is_gcm4543 = areq_ctx->is_gcm4543;
1194	dma_addr_t dma_addr;
1195	u32 mapped_nents = 0;
1196	u32 dummy = 0; /*used for the assoc data fragments */
1197	u32 size_to_map = 0;
1198	gfp_t flags = cc_gfp_flags(&req->base);
1199
1200	mlli_params->curr_pool = NULL;
1201	sg_data.num_of_buffers = 0;
1202
1203	/* copy mac to a temporary location to deal with possible
1204	 * data memory overriding that caused by cache coherence problem.
1205	 */
1206	if (drvdata->coherent &&
1207	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
1208	    req->src == req->dst)
1209		cc_copy_mac(dev, req, CC_SG_TO_BUF);
1210
1211	/* cacluate the size for cipher remove ICV in decrypt*/
1212	areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1213				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1214				req->cryptlen :
1215				(req->cryptlen - authsize);
1216
1217	dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
1218				  DMA_BIDIRECTIONAL);
1219	if (dma_mapping_error(dev, dma_addr)) {
1220		dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1221			MAX_MAC_SIZE, areq_ctx->mac_buf);
1222		rc = -ENOMEM;
1223		goto aead_map_failure;
1224	}
1225	areq_ctx->mac_buf_dma_addr = dma_addr;
1226
1227	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1228		void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1229
1230		dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
1231					  DMA_TO_DEVICE);
1232
1233		if (dma_mapping_error(dev, dma_addr)) {
1234			dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1235				AES_BLOCK_SIZE, addr);
1236			areq_ctx->ccm_iv0_dma_addr = 0;
1237			rc = -ENOMEM;
1238			goto aead_map_failure;
1239		}
1240		areq_ctx->ccm_iv0_dma_addr = dma_addr;
1241
1242		if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1243					 &sg_data, req->assoclen)) {
1244			rc = -ENOMEM;
1245			goto aead_map_failure;
1246		}
1247	}
1248
1249	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1250		dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1251					  DMA_BIDIRECTIONAL);
1252		if (dma_mapping_error(dev, dma_addr)) {
1253			dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1254				AES_BLOCK_SIZE, areq_ctx->hkey);
1255			rc = -ENOMEM;
1256			goto aead_map_failure;
1257		}
1258		areq_ctx->hkey_dma_addr = dma_addr;
1259
1260		dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1261					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1262		if (dma_mapping_error(dev, dma_addr)) {
1263			dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1264				AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1265			rc = -ENOMEM;
1266			goto aead_map_failure;
1267		}
1268		areq_ctx->gcm_block_len_dma_addr = dma_addr;
1269
1270		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1271					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1272
1273		if (dma_mapping_error(dev, dma_addr)) {
1274			dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1275				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1276			areq_ctx->gcm_iv_inc1_dma_addr = 0;
1277			rc = -ENOMEM;
1278			goto aead_map_failure;
1279		}
1280		areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1281
1282		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1283					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
1284
1285		if (dma_mapping_error(dev, dma_addr)) {
1286			dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1287				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1288			areq_ctx->gcm_iv_inc2_dma_addr = 0;
1289			rc = -ENOMEM;
1290			goto aead_map_failure;
1291		}
1292		areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1293	}
1294
1295	size_to_map = req->cryptlen + req->assoclen;
1296	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
 
 
1297		size_to_map += authsize;
 
1298
1299	if (is_gcm4543)
1300		size_to_map += crypto_aead_ivsize(tfm);
1301	rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
1302		       &areq_ctx->src.nents,
1303		       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1304			LLI_MAX_NUM_OF_DATA_ENTRIES),
1305		       &dummy, &mapped_nents);
1306	if (rc) {
1307		rc = -ENOMEM;
1308		goto aead_map_failure;
1309	}
1310
1311	if (areq_ctx->is_single_pass) {
1312		/*
1313		 * Create MLLI table for:
1314		 *   (1) Assoc. data
1315		 *   (2) Src/Dst SGLs
1316		 *   Note: IV is contg. buffer (not an SGL)
1317		 */
1318		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1319		if (rc)
1320			goto aead_map_failure;
1321		rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1322		if (rc)
1323			goto aead_map_failure;
1324		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1325		if (rc)
1326			goto aead_map_failure;
1327	} else { /* DOUBLE-PASS flow */
1328		/*
1329		 * Prepare MLLI table(s) in this order:
1330		 *
1331		 * If ENCRYPT/DECRYPT (inplace):
1332		 *   (1) MLLI table for assoc
1333		 *   (2) IV entry (chained right after end of assoc)
1334		 *   (3) MLLI for src/dst (inplace operation)
1335		 *
1336		 * If ENCRYPT (non-inplace)
1337		 *   (1) MLLI table for assoc
1338		 *   (2) IV entry (chained right after end of assoc)
1339		 *   (3) MLLI for dst
1340		 *   (4) MLLI for src
1341		 *
1342		 * If DECRYPT (non-inplace)
1343		 *   (1) MLLI table for assoc
1344		 *   (2) IV entry (chained right after end of assoc)
1345		 *   (3) MLLI for src
1346		 *   (4) MLLI for dst
1347		 */
1348		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1349		if (rc)
1350			goto aead_map_failure;
1351		rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1352		if (rc)
1353			goto aead_map_failure;
1354		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1355		if (rc)
1356			goto aead_map_failure;
1357	}
1358
1359	/* Mlli support -start building the MLLI according to the above
1360	 * results
1361	 */
1362	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1363	    areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1364		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1365		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1366		if (rc)
1367			goto aead_map_failure;
1368
1369		cc_update_aead_mlli_nents(drvdata, req);
1370		dev_dbg(dev, "assoc params mn %d\n",
1371			areq_ctx->assoc.mlli_nents);
1372		dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1373		dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1374	}
1375	return 0;
1376
1377aead_map_failure:
1378	cc_unmap_aead_request(dev, req);
1379	return rc;
1380}
1381
1382int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1383			      struct scatterlist *src, unsigned int nbytes,
1384			      bool do_update, gfp_t flags)
1385{
1386	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1387	struct device *dev = drvdata_to_dev(drvdata);
1388	u8 *curr_buff = cc_hash_buf(areq_ctx);
1389	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1390	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1391	struct buffer_array sg_data;
1392	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1393	u32 dummy = 0;
1394	u32 mapped_nents = 0;
1395
1396	dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1397		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1398	/* Init the type of the dma buffer */
1399	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1400	mlli_params->curr_pool = NULL;
1401	sg_data.num_of_buffers = 0;
1402	areq_ctx->in_nents = 0;
1403
1404	if (nbytes == 0 && *curr_buff_cnt == 0) {
1405		/* nothing to do */
1406		return 0;
1407	}
1408
1409	/*TODO: copy data in case that buffer is enough for operation */
1410	/* map the previous buffer */
1411	if (*curr_buff_cnt) {
1412		if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1413				    &sg_data)) {
1414			return -ENOMEM;
1415		}
1416	}
1417
1418	if (src && nbytes > 0 && do_update) {
1419		if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1420			      &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1421			      &dummy, &mapped_nents)) {
 
1422			goto unmap_curr_buff;
1423		}
1424		if (src && mapped_nents == 1 &&
1425		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1426			memcpy(areq_ctx->buff_sg, src,
1427			       sizeof(struct scatterlist));
1428			areq_ctx->buff_sg->length = nbytes;
1429			areq_ctx->curr_sg = areq_ctx->buff_sg;
1430			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1431		} else {
1432			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1433		}
1434	}
1435
1436	/*build mlli */
1437	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1438		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1439		/* add the src data to the sg_data */
1440		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1441				0, true, &areq_ctx->mlli_nents);
1442		if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
 
1443			goto fail_unmap_din;
1444	}
1445	/* change the buffer index for the unmap function */
1446	areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1447	dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1448		cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1449	return 0;
1450
1451fail_unmap_din:
1452	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1453
1454unmap_curr_buff:
1455	if (*curr_buff_cnt)
1456		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1457
1458	return -ENOMEM;
1459}
1460
1461int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1462			       struct scatterlist *src, unsigned int nbytes,
1463			       unsigned int block_size, gfp_t flags)
1464{
1465	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1466	struct device *dev = drvdata_to_dev(drvdata);
1467	u8 *curr_buff = cc_hash_buf(areq_ctx);
1468	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1469	u8 *next_buff = cc_next_buf(areq_ctx);
1470	u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1471	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1472	unsigned int update_data_len;
1473	u32 total_in_len = nbytes + *curr_buff_cnt;
1474	struct buffer_array sg_data;
1475	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1476	unsigned int swap_index = 0;
 
1477	u32 dummy = 0;
1478	u32 mapped_nents = 0;
1479
1480	dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1481		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1482	/* Init the type of the dma buffer */
1483	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1484	mlli_params->curr_pool = NULL;
1485	areq_ctx->curr_sg = NULL;
1486	sg_data.num_of_buffers = 0;
1487	areq_ctx->in_nents = 0;
1488
1489	if (total_in_len < block_size) {
1490		dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1491			curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1492		areq_ctx->in_nents =
1493			cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
1494		sg_copy_to_buffer(src, areq_ctx->in_nents,
1495				  &curr_buff[*curr_buff_cnt], nbytes);
1496		*curr_buff_cnt += nbytes;
1497		return 1;
1498	}
1499
1500	/* Calculate the residue size*/
1501	*next_buff_cnt = total_in_len & (block_size - 1);
1502	/* update data len */
1503	update_data_len = total_in_len - *next_buff_cnt;
1504
1505	dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1506		*next_buff_cnt, update_data_len);
1507
1508	/* Copy the new residue to next buffer */
1509	if (*next_buff_cnt) {
1510		dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1511			next_buff, (update_data_len - *curr_buff_cnt),
1512			*next_buff_cnt);
1513		cc_copy_sg_portion(dev, next_buff, src,
1514				   (update_data_len - *curr_buff_cnt),
1515				   nbytes, CC_SG_TO_BUF);
1516		/* change the buffer index for next operation */
1517		swap_index = 1;
1518	}
1519
1520	if (*curr_buff_cnt) {
1521		if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1522				    &sg_data)) {
1523			return -ENOMEM;
1524		}
1525		/* change the buffer index for next operation */
1526		swap_index = 1;
1527	}
1528
1529	if (update_data_len > *curr_buff_cnt) {
1530		if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1531			      DMA_TO_DEVICE, &areq_ctx->in_nents,
1532			      LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1533			      &mapped_nents)) {
 
1534			goto unmap_curr_buff;
1535		}
1536		if (mapped_nents == 1 &&
1537		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1538			/* only one entry in the SG and no previous data */
1539			memcpy(areq_ctx->buff_sg, src,
1540			       sizeof(struct scatterlist));
1541			areq_ctx->buff_sg->length = update_data_len;
1542			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1543			areq_ctx->curr_sg = areq_ctx->buff_sg;
1544		} else {
1545			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1546		}
1547	}
1548
1549	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1550		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1551		/* add the src data to the sg_data */
1552		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1553				(update_data_len - *curr_buff_cnt), 0, true,
1554				&areq_ctx->mlli_nents);
1555		if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
 
1556			goto fail_unmap_din;
1557	}
1558	areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1559
1560	return 0;
1561
1562fail_unmap_din:
1563	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1564
1565unmap_curr_buff:
1566	if (*curr_buff_cnt)
1567		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1568
1569	return -ENOMEM;
1570}
1571
1572void cc_unmap_hash_request(struct device *dev, void *ctx,
1573			   struct scatterlist *src, bool do_revert)
1574{
1575	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1576	u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1577
1578	/*In case a pool was set, a table was
1579	 *allocated and should be released
1580	 */
1581	if (areq_ctx->mlli_params.curr_pool) {
1582		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1583			&areq_ctx->mlli_params.mlli_dma_addr,
1584			areq_ctx->mlli_params.mlli_virt_addr);
1585		dma_pool_free(areq_ctx->mlli_params.curr_pool,
1586			      areq_ctx->mlli_params.mlli_virt_addr,
1587			      areq_ctx->mlli_params.mlli_dma_addr);
1588	}
1589
1590	if (src && areq_ctx->in_nents) {
1591		dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1592			sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1593		dma_unmap_sg(dev, src,
1594			     areq_ctx->in_nents, DMA_TO_DEVICE);
1595	}
1596
1597	if (*prev_len) {
1598		dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1599			sg_virt(areq_ctx->buff_sg),
1600			&sg_dma_address(areq_ctx->buff_sg),
1601			sg_dma_len(areq_ctx->buff_sg));
1602		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1603		if (!do_revert) {
1604			/* clean the previous data length for update
1605			 * operation
1606			 */
1607			*prev_len = 0;
1608		} else {
1609			areq_ctx->buff_index ^= 1;
1610		}
1611	}
1612}
1613
1614int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1615{
1616	struct buff_mgr_handle *buff_mgr_handle;
1617	struct device *dev = drvdata_to_dev(drvdata);
1618
1619	buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
1620	if (!buff_mgr_handle)
1621		return -ENOMEM;
1622
1623	drvdata->buff_mgr_handle = buff_mgr_handle;
1624
1625	buff_mgr_handle->mlli_buffs_pool =
1626		dma_pool_create("dx_single_mlli_tables", dev,
1627				MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1628				LLI_ENTRY_BYTE_SIZE,
1629				MLLI_TABLE_MIN_ALIGNMENT, 0);
1630
1631	if (!buff_mgr_handle->mlli_buffs_pool)
1632		goto error;
1633
1634	return 0;
1635
1636error:
1637	cc_buffer_mgr_fini(drvdata);
1638	return -ENOMEM;
1639}
1640
1641int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1642{
1643	struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1644
1645	if (buff_mgr_handle) {
1646		dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1647		kfree(drvdata->buff_mgr_handle);
1648		drvdata->buff_mgr_handle = NULL;
1649	}
1650	return 0;
1651}