Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * caam - Freescale FSL CAAM support for crypto API
   4 *
   5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
   6 * Copyright 2016-2019, 2023 NXP
   7 *
   8 * Based on talitos crypto API driver.
   9 *
  10 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
  11 *
  12 * ---------------                     ---------------
  13 * | JobDesc #1  |-------------------->|  ShareDesc  |
  14 * | *(packet 1) |                     |   (PDB)     |
  15 * ---------------      |------------->|  (hashKey)  |
  16 *       .              |              | (cipherKey) |
  17 *       .              |    |-------->| (operation) |
  18 * ---------------      |    |         ---------------
  19 * | JobDesc #2  |------|    |
  20 * | *(packet 2) |           |
  21 * ---------------           |
  22 *       .                   |
  23 *       .                   |
  24 * ---------------           |
  25 * | JobDesc #3  |------------
  26 * | *(packet 3) |
  27 * ---------------
  28 *
  29 * The SharedDesc never changes for a connection unless rekeyed, but
  30 * each packet will likely be in a different place. So all we need
  31 * to know to process the packet is where the input is, where the
  32 * output goes, and what context we want to process with. Context is
  33 * in the SharedDesc, packet references in the JobDesc.
  34 *
  35 * So, a job desc looks like:
  36 *
  37 * ---------------------
  38 * | Header            |
  39 * | ShareDesc Pointer |
  40 * | SEQ_OUT_PTR       |
  41 * | (output buffer)   |
  42 * | (output length)   |
  43 * | SEQ_IN_PTR        |
  44 * | (input buffer)    |
  45 * | (input length)    |
  46 * ---------------------
  47 */
  48
  49#include "compat.h"
  50
  51#include "regs.h"
  52#include "intern.h"
  53#include "desc_constr.h"
  54#include "jr.h"
  55#include "error.h"
  56#include "sg_sw_sec4.h"
  57#include "key_gen.h"
  58#include "caamalg_desc.h"
  59#include <asm/unaligned.h>
  60#include <crypto/internal/aead.h>
  61#include <crypto/internal/engine.h>
  62#include <crypto/internal/skcipher.h>
  63#include <crypto/xts.h>
  64#include <linux/dma-mapping.h>
  65#include <linux/device.h>
  66#include <linux/err.h>
  67#include <linux/module.h>
  68#include <linux/kernel.h>
  69#include <linux/slab.h>
  70#include <linux/string.h>
  71
  72/*
  73 * crypto alg
  74 */
  75#define CAAM_CRA_PRIORITY		3000
  76/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  77#define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
  78					 CTR_RFC3686_NONCE_SIZE + \
  79					 SHA512_DIGEST_SIZE * 2)
 
 
  80
  81#define AEAD_DESC_JOB_IO_LEN		(DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
  82#define GCM_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
  83					 CAAM_CMD_SZ * 4)
  84#define AUTHENC_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
  85					 CAAM_CMD_SZ * 5)
  86
  87#define CHACHAPOLY_DESC_JOB_IO_LEN	(AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
 
 
 
 
 
 
 
 
 
  88
  89#define DESC_MAX_USED_BYTES		(CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN)
 
  90#define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  91
  92struct caam_alg_entry {
  93	int class1_alg_type;
  94	int class2_alg_type;
  95	bool rfc3686;
  96	bool geniv;
  97	bool nodkp;
  98};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  99
 100struct caam_aead_alg {
 101	struct aead_engine_alg aead;
 102	struct caam_alg_entry caam;
 103	bool registered;
 104};
 
 
 105
 106struct caam_skcipher_alg {
 107	struct skcipher_engine_alg skcipher;
 108	struct caam_alg_entry caam;
 109	bool registered;
 110};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111
 112/*
 113 * per-session context
 114 */
 115struct caam_ctx {
 
 116	u32 sh_desc_enc[DESC_MAX_USED_LEN];
 117	u32 sh_desc_dec[DESC_MAX_USED_LEN];
 118	u8 key[CAAM_MAX_KEY_SIZE];
 119	dma_addr_t sh_desc_enc_dma;
 120	dma_addr_t sh_desc_dec_dma;
 
 
 
 
 
 121	dma_addr_t key_dma;
 122	enum dma_data_direction dir;
 123	struct device *jrdev;
 124	struct alginfo adata;
 125	struct alginfo cdata;
 126	unsigned int authsize;
 127	bool xts_key_fallback;
 128	struct crypto_skcipher *fallback;
 129};
 130
 131struct caam_skcipher_req_ctx {
 132	struct skcipher_edesc *edesc;
 133	struct skcipher_request fallback_req;
 134};
 135
 136struct caam_aead_req_ctx {
 137	struct aead_edesc *edesc;
 138};
 139
 140static int aead_null_set_sh_desc(struct crypto_aead *aead)
 141{
 142	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 143	struct device *jrdev = ctx->jrdev;
 144	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
 145	u32 *desc;
 146	int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
 147			ctx->adata.keylen_pad;
 148
 149	/*
 150	 * Job Descriptor and Shared Descriptors
 151	 * must all fit into the 64-word Descriptor h/w Buffer
 152	 */
 153	if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
 154		ctx->adata.key_inline = true;
 155		ctx->adata.key_virt = ctx->key;
 156	} else {
 157		ctx->adata.key_inline = false;
 158		ctx->adata.key_dma = ctx->key_dma;
 
 
 159	}
 
 160
 161	/* aead_encrypt shared descriptor */
 162	desc = ctx->sh_desc_enc;
 163	cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
 164				    ctrlpriv->era);
 165	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
 166				   desc_bytes(desc), ctx->dir);
 167
 168	/*
 169	 * Job Descriptor and Shared Descriptors
 170	 * must all fit into the 64-word Descriptor h/w Buffer
 171	 */
 172	if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
 173		ctx->adata.key_inline = true;
 174		ctx->adata.key_virt = ctx->key;
 175	} else {
 176		ctx->adata.key_inline = false;
 177		ctx->adata.key_dma = ctx->key_dma;
 178	}
 179
 180	/* aead_decrypt shared descriptor */
 181	desc = ctx->sh_desc_dec;
 182	cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
 183				    ctrlpriv->era);
 184	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
 185				   desc_bytes(desc), ctx->dir);
 186
 187	return 0;
 
 
 
 
 
 188}
 189
 190static int aead_set_sh_desc(struct crypto_aead *aead)
 191{
 192	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
 193						 struct caam_aead_alg,
 194						 aead.base);
 195	unsigned int ivsize = crypto_aead_ivsize(aead);
 196	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 197	struct device *jrdev = ctx->jrdev;
 198	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
 199	u32 ctx1_iv_off = 0;
 200	u32 *desc, *nonce = NULL;
 201	u32 inl_mask;
 202	unsigned int data_len[2];
 203	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
 204			       OP_ALG_AAI_CTR_MOD128);
 205	const bool is_rfc3686 = alg->caam.rfc3686;
 206
 207	if (!ctx->authsize)
 208		return 0;
 209
 210	/* NULL encryption / decryption */
 211	if (!ctx->cdata.keylen)
 212		return aead_null_set_sh_desc(aead);
 213
 214	/*
 215	 * AES-CTR needs to load IV in CONTEXT1 reg
 216	 * at an offset of 128bits (16bytes)
 217	 * CONTEXT1[255:128] = IV
 218	 */
 219	if (ctr_mode)
 220		ctx1_iv_off = 16;
 
 
 221
 222	/*
 223	 * RFC3686 specific:
 224	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
 225	 */
 226	if (is_rfc3686) {
 227		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
 228		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
 229				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
 230	}
 231
 232	/*
 233	 * In case |user key| > |derived key|, using DKP<imm,imm>
 234	 * would result in invalid opcodes (last bytes of user key) in
 235	 * the resulting descriptor. Use DKP<ptr,imm> instead => both
 236	 * virtual and dma key addresses are needed.
 237	 */
 238	ctx->adata.key_virt = ctx->key;
 239	ctx->adata.key_dma = ctx->key_dma;
 240
 241	ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
 242	ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
 
 243
 244	data_len[0] = ctx->adata.keylen_pad;
 245	data_len[1] = ctx->cdata.keylen;
 246
 247	if (alg->caam.geniv)
 248		goto skip_enc;
 249
 250	/*
 251	 * Job Descriptor and Shared Descriptors
 252	 * must all fit into the 64-word Descriptor h/w Buffer
 253	 */
 254	if (desc_inline_query(DESC_AEAD_ENC_LEN +
 255			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
 256			      AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
 257			      ARRAY_SIZE(data_len)) < 0)
 258		return -EINVAL;
 259
 260	ctx->adata.key_inline = !!(inl_mask & 1);
 261	ctx->cdata.key_inline = !!(inl_mask & 2);
 
 
 262
 263	/* aead_encrypt shared descriptor */
 264	desc = ctx->sh_desc_enc;
 265	cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
 266			       ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
 267			       false, ctrlpriv->era);
 268	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
 269				   desc_bytes(desc), ctx->dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 270
 271skip_enc:
 272	/*
 273	 * Job Descriptor and Shared Descriptors
 274	 * must all fit into the 64-word Descriptor h/w Buffer
 275	 */
 276	if (desc_inline_query(DESC_AEAD_DEC_LEN +
 277			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
 278			      AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
 279			      ARRAY_SIZE(data_len)) < 0)
 280		return -EINVAL;
 281
 282	ctx->adata.key_inline = !!(inl_mask & 1);
 283	ctx->cdata.key_inline = !!(inl_mask & 2);
 284
 285	/* aead_decrypt shared descriptor */
 286	desc = ctx->sh_desc_dec;
 287	cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
 288			       ctx->authsize, alg->caam.geniv, is_rfc3686,
 289			       nonce, ctx1_iv_off, false, ctrlpriv->era);
 290	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
 291				   desc_bytes(desc), ctx->dir);
 292
 293	if (!alg->caam.geniv)
 294		goto skip_givenc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 295
 296	/*
 297	 * Job Descriptor and Shared Descriptors
 298	 * must all fit into the 64-word Descriptor h/w Buffer
 299	 */
 300	if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
 301			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
 302			      AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
 303			      ARRAY_SIZE(data_len)) < 0)
 304		return -EINVAL;
 305
 306	ctx->adata.key_inline = !!(inl_mask & 1);
 307	ctx->cdata.key_inline = !!(inl_mask & 2);
 308
 309	/* aead_givencrypt shared descriptor */
 310	desc = ctx->sh_desc_enc;
 311	cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
 312				  ctx->authsize, is_rfc3686, nonce,
 313				  ctx1_iv_off, false, ctrlpriv->era);
 314	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
 315				   desc_bytes(desc), ctx->dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 316
 317skip_givenc:
 318	return 0;
 319}
 320
 321static int aead_setauthsize(struct crypto_aead *authenc,
 322				    unsigned int authsize)
 323{
 324	struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
 325
 326	ctx->authsize = authsize;
 327	aead_set_sh_desc(authenc);
 328
 329	return 0;
 330}
 331
 332static int gcm_set_sh_desc(struct crypto_aead *aead)
 333{
 334	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 335	struct device *jrdev = ctx->jrdev;
 336	unsigned int ivsize = crypto_aead_ivsize(aead);
 337	u32 *desc;
 338	int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
 339			ctx->cdata.keylen;
 340
 341	if (!ctx->cdata.keylen || !ctx->authsize)
 342		return 0;
 
 
 343
 344	/*
 345	 * AES GCM encrypt shared descriptor
 346	 * Job Descriptor and Shared Descriptor
 347	 * must fit into the 64-word Descriptor h/w Buffer
 348	 */
 349	if (rem_bytes >= DESC_GCM_ENC_LEN) {
 350		ctx->cdata.key_inline = true;
 351		ctx->cdata.key_virt = ctx->key;
 352	} else {
 353		ctx->cdata.key_inline = false;
 354		ctx->cdata.key_dma = ctx->key_dma;
 355	}
 356
 357	desc = ctx->sh_desc_enc;
 358	cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
 359	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
 360				   desc_bytes(desc), ctx->dir);
 361
 362	/*
 363	 * Job Descriptor and Shared Descriptors
 364	 * must all fit into the 64-word Descriptor h/w Buffer
 365	 */
 366	if (rem_bytes >= DESC_GCM_DEC_LEN) {
 367		ctx->cdata.key_inline = true;
 368		ctx->cdata.key_virt = ctx->key;
 369	} else {
 370		ctx->cdata.key_inline = false;
 371		ctx->cdata.key_dma = ctx->key_dma;
 372	}
 373
 374	desc = ctx->sh_desc_dec;
 375	cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
 376	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
 377				   desc_bytes(desc), ctx->dir);
 378
 379	return 0;
 380}
 381
 382static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
 383{
 384	struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
 385	int err;
 386
 387	err = crypto_gcm_check_authsize(authsize);
 388	if (err)
 389		return err;
 390
 391	ctx->authsize = authsize;
 392	gcm_set_sh_desc(authenc);
 393
 394	return 0;
 395}
 396
 397static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 
 
 
 
 
 
 398{
 399	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 400	struct device *jrdev = ctx->jrdev;
 401	unsigned int ivsize = crypto_aead_ivsize(aead);
 402	u32 *desc;
 403	int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
 404			ctx->cdata.keylen;
 405
 406	if (!ctx->cdata.keylen || !ctx->authsize)
 407		return 0;
 408
 409	/*
 410	 * RFC4106 encrypt shared descriptor
 411	 * Job Descriptor and Shared Descriptor
 412	 * must fit into the 64-word Descriptor h/w Buffer
 413	 */
 414	if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
 415		ctx->cdata.key_inline = true;
 416		ctx->cdata.key_virt = ctx->key;
 417	} else {
 418		ctx->cdata.key_inline = false;
 419		ctx->cdata.key_dma = ctx->key_dma;
 420	}
 421
 422	desc = ctx->sh_desc_enc;
 423	cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
 424				  false);
 425	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
 426				   desc_bytes(desc), ctx->dir);
 427
 428	/*
 429	 * Job Descriptor and Shared Descriptors
 430	 * must all fit into the 64-word Descriptor h/w Buffer
 431	 */
 432	if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
 433		ctx->cdata.key_inline = true;
 434		ctx->cdata.key_virt = ctx->key;
 435	} else {
 436		ctx->cdata.key_inline = false;
 437		ctx->cdata.key_dma = ctx->key_dma;
 438	}
 
 
 439
 440	desc = ctx->sh_desc_dec;
 441	cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
 442				  false);
 443	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
 444				   desc_bytes(desc), ctx->dir);
 445
 446	return 0;
 447}
 448
 449static int rfc4106_setauthsize(struct crypto_aead *authenc,
 450			       unsigned int authsize)
 451{
 452	struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
 453	int err;
 454
 455	err = crypto_rfc4106_check_authsize(authsize);
 456	if (err)
 457		return err;
 458
 459	ctx->authsize = authsize;
 460	rfc4106_set_sh_desc(authenc);
 461
 462	return 0;
 463}
 464
 465static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 466{
 467	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 468	struct device *jrdev = ctx->jrdev;
 469	unsigned int ivsize = crypto_aead_ivsize(aead);
 470	u32 *desc;
 471	int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
 472			ctx->cdata.keylen;
 473
 474	if (!ctx->cdata.keylen || !ctx->authsize)
 475		return 0;
 476
 477	/*
 478	 * RFC4543 encrypt shared descriptor
 479	 * Job Descriptor and Shared Descriptor
 480	 * must fit into the 64-word Descriptor h/w Buffer
 481	 */
 482	if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
 483		ctx->cdata.key_inline = true;
 484		ctx->cdata.key_virt = ctx->key;
 485	} else {
 486		ctx->cdata.key_inline = false;
 487		ctx->cdata.key_dma = ctx->key_dma;
 488	}
 489
 490	desc = ctx->sh_desc_enc;
 491	cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
 492				  false);
 493	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
 494				   desc_bytes(desc), ctx->dir);
 495
 496	/*
 497	 * Job Descriptor and Shared Descriptors
 498	 * must all fit into the 64-word Descriptor h/w Buffer
 499	 */
 500	if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
 501		ctx->cdata.key_inline = true;
 502		ctx->cdata.key_virt = ctx->key;
 503	} else {
 504		ctx->cdata.key_inline = false;
 505		ctx->cdata.key_dma = ctx->key_dma;
 506	}
 
 
 507
 508	desc = ctx->sh_desc_dec;
 509	cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
 510				  false);
 511	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
 512				   desc_bytes(desc), ctx->dir);
 513
 514	return 0;
 515}
 516
 517static int rfc4543_setauthsize(struct crypto_aead *authenc,
 518			       unsigned int authsize)
 519{
 520	struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
 
 
 
 
 
 
 
 
 
 
 
 
 521
 522	if (authsize != 16)
 523		return -EINVAL;
 524
 525	ctx->authsize = authsize;
 526	rfc4543_set_sh_desc(authenc);
 527
 528	return 0;
 529}
 530
 531static int chachapoly_set_sh_desc(struct crypto_aead *aead)
 
 532{
 533	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 
 
 534	struct device *jrdev = ctx->jrdev;
 535	unsigned int ivsize = crypto_aead_ivsize(aead);
 536	u32 *desc;
 537
 538	if (!ctx->cdata.keylen || !ctx->authsize)
 539		return 0;
 540
 541	desc = ctx->sh_desc_enc;
 542	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
 543			       ctx->authsize, true, false);
 544	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
 545				   desc_bytes(desc), ctx->dir);
 546
 547	desc = ctx->sh_desc_dec;
 548	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
 549			       ctx->authsize, false, false);
 550	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
 551				   desc_bytes(desc), ctx->dir);
 552
 553	return 0;
 554}
 555
 556static int chachapoly_setauthsize(struct crypto_aead *aead,
 557				  unsigned int authsize)
 558{
 559	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 560
 561	if (authsize != POLY1305_DIGEST_SIZE)
 562		return -EINVAL;
 563
 564	ctx->authsize = authsize;
 565	return chachapoly_set_sh_desc(aead);
 566}
 567
 568static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
 569			     unsigned int keylen)
 570{
 571	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 572	unsigned int ivsize = crypto_aead_ivsize(aead);
 573	unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
 574
 575	if (keylen != CHACHA_KEY_SIZE + saltlen)
 576		return -EINVAL;
 577
 578	memcpy(ctx->key, key, keylen);
 579	ctx->cdata.key_virt = ctx->key;
 580	ctx->cdata.keylen = keylen - saltlen;
 581
 582	return chachapoly_set_sh_desc(aead);
 583}
 584
 585static int aead_setkey(struct crypto_aead *aead,
 586			       const u8 *key, unsigned int keylen)
 587{
 588	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 589	struct device *jrdev = ctx->jrdev;
 590	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
 591	struct crypto_authenc_keys keys;
 592	int ret = 0;
 593
 594	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 595		goto badkey;
 596
 597	dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
 598	       keys.authkeylen + keys.enckeylen, keys.enckeylen,
 599	       keys.authkeylen);
 600	print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
 601			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 
 
 
 
 
 
 
 
 602
 603	/*
 604	 * If DKP is supported, use it in the shared descriptor to generate
 605	 * the split key.
 606	 */
 607	if (ctrlpriv->era >= 6) {
 608		ctx->adata.keylen = keys.authkeylen;
 609		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
 610						      OP_ALG_ALGSEL_MASK);
 611
 612		if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
 613			goto badkey;
 614
 615		memcpy(ctx->key, keys.authkey, keys.authkeylen);
 616		memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
 617		       keys.enckeylen);
 618		dma_sync_single_for_device(jrdev, ctx->key_dma,
 619					   ctx->adata.keylen_pad +
 620					   keys.enckeylen, ctx->dir);
 621		goto skip_split_key;
 622	}
 623
 624	ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
 625			    keys.authkeylen, CAAM_MAX_KEY_SIZE -
 626			    keys.enckeylen);
 627	if (ret) {
 628		goto badkey;
 629	}
 630
 631	/* postpend encryption key to auth split key */
 632	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
 633	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
 634				   keys.enckeylen, ctx->dir);
 635
 636	print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
 637			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
 638			     ctx->adata.keylen_pad + keys.enckeylen, 1);
 639
 640skip_split_key:
 641	ctx->cdata.keylen = keys.enckeylen;
 642	memzero_explicit(&keys, sizeof(keys));
 643	return aead_set_sh_desc(aead);
 644badkey:
 645	memzero_explicit(&keys, sizeof(keys));
 646	return -EINVAL;
 647}
 648
 649static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
 650			    unsigned int keylen)
 651{
 652	struct crypto_authenc_keys keys;
 653	int err;
 654
 655	err = crypto_authenc_extractkeys(&keys, key, keylen);
 656	if (unlikely(err))
 657		return err;
 658
 659	err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
 660	      aead_setkey(aead, key, keylen);
 661
 662	memzero_explicit(&keys, sizeof(keys));
 663	return err;
 664}
 665
 666static int gcm_setkey(struct crypto_aead *aead,
 667		      const u8 *key, unsigned int keylen)
 668{
 669	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 670	struct device *jrdev = ctx->jrdev;
 671	int err;
 672
 673	err = aes_check_keylen(keylen);
 674	if (err)
 675		return err;
 676
 677	print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
 678			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 679
 680	memcpy(ctx->key, key, keylen);
 681	dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
 682	ctx->cdata.keylen = keylen;
 683
 684	return gcm_set_sh_desc(aead);
 685}
 686
 687static int rfc4106_setkey(struct crypto_aead *aead,
 688			  const u8 *key, unsigned int keylen)
 689{
 690	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 691	struct device *jrdev = ctx->jrdev;
 692	int err;
 693
 694	err = aes_check_keylen(keylen - 4);
 695	if (err)
 696		return err;
 
 
 
 
 
 
 
 
 697
 698	print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
 699			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 700
 701	memcpy(ctx->key, key, keylen);
 
 
 
 
 702
 703	/*
 704	 * The last four bytes of the key material are used as the salt value
 705	 * in the nonce. Update the AES key length.
 706	 */
 707	ctx->cdata.keylen = keylen - 4;
 708	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
 709				   ctx->dir);
 710	return rfc4106_set_sh_desc(aead);
 711}
 712
 713static int rfc4543_setkey(struct crypto_aead *aead,
 714			  const u8 *key, unsigned int keylen)
 715{
 716	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 
 717	struct device *jrdev = ctx->jrdev;
 718	int err;
 719
 720	err = aes_check_keylen(keylen - 4);
 721	if (err)
 722		return err;
 723
 724	print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
 725			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 726
 727	memcpy(ctx->key, key, keylen);
 728
 729	/*
 730	 * The last four bytes of the key material are used as the salt value
 731	 * in the nonce. Update the AES key length.
 732	 */
 733	ctx->cdata.keylen = keylen - 4;
 734	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
 735				   ctx->dir);
 736	return rfc4543_set_sh_desc(aead);
 737}
 738
 739static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 740			   unsigned int keylen, const u32 ctx1_iv_off)
 741{
 742	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
 743	struct caam_skcipher_alg *alg =
 744		container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
 745			     skcipher.base);
 746	struct device *jrdev = ctx->jrdev;
 747	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 748	u32 *desc;
 749	const bool is_rfc3686 = alg->caam.rfc3686;
 750
 751	print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
 752			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 
 
 753
 754	ctx->cdata.keylen = keylen;
 755	ctx->cdata.key_virt = key;
 756	ctx->cdata.key_inline = true;
 
 
 
 
 
 757
 758	/* skcipher_encrypt shared descriptor */
 759	desc = ctx->sh_desc_enc;
 760	cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
 761				   ctx1_iv_off);
 762	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
 763				   desc_bytes(desc), ctx->dir);
 764
 765	/* skcipher_decrypt shared descriptor */
 766	desc = ctx->sh_desc_dec;
 767	cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
 768				   ctx1_iv_off);
 769	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
 770				   desc_bytes(desc), ctx->dir);
 771
 772	return 0;
 773}
 774
 775static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
 776			       const u8 *key, unsigned int keylen)
 777{
 778	int err;
 779
 780	err = aes_check_keylen(keylen);
 781	if (err)
 782		return err;
 783
 784	return skcipher_setkey(skcipher, key, keylen, 0);
 785}
 786
 787static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
 788				   const u8 *key, unsigned int keylen)
 789{
 790	u32 ctx1_iv_off;
 791	int err;
 792
 793	/*
 794	 * RFC3686 specific:
 795	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
 796	 *	| *key = {KEY, NONCE}
 797	 */
 798	ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
 799	keylen -= CTR_RFC3686_NONCE_SIZE;
 800
 801	err = aes_check_keylen(keylen);
 802	if (err)
 803		return err;
 804
 805	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
 806}
 807
 808static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
 809			       const u8 *key, unsigned int keylen)
 810{
 811	u32 ctx1_iv_off;
 812	int err;
 813
 814	/*
 815	 * AES-CTR needs to load IV in CONTEXT1 reg
 816	 * at an offset of 128bits (16bytes)
 817	 * CONTEXT1[255:128] = IV
 818	 */
 819	ctx1_iv_off = 16;
 820
 821	err = aes_check_keylen(keylen);
 822	if (err)
 823		return err;
 824
 825	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
 826}
 827
 828static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
 829			       const u8 *key, unsigned int keylen)
 830{
 831	return verify_skcipher_des_key(skcipher, key) ?:
 832	       skcipher_setkey(skcipher, key, keylen, 0);
 833}
 834
 835static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
 836				const u8 *key, unsigned int keylen)
 837{
 838	return verify_skcipher_des3_key(skcipher, key) ?:
 839	       skcipher_setkey(skcipher, key, keylen, 0);
 840}
 841
 842static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 843			       unsigned int keylen)
 844{
 845	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
 846	struct device *jrdev = ctx->jrdev;
 847	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
 848	u32 *desc;
 849	int err;
 850
 851	err = xts_verify_key(skcipher, key, keylen);
 852	if (err) {
 853		dev_dbg(jrdev, "key size mismatch\n");
 854		return err;
 
 
 855	}
 
 
 
 
 
 
 
 856
 857	if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
 858		ctx->xts_key_fallback = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 859
 860	if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) {
 861		err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
 862		if (err)
 863			return err;
 864	}
 865
 866	ctx->cdata.keylen = keylen;
 867	ctx->cdata.key_virt = key;
 868	ctx->cdata.key_inline = true;
 869
 870	/* xts_skcipher_encrypt shared descriptor */
 871	desc = ctx->sh_desc_enc;
 872	cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
 873	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
 874				   desc_bytes(desc), ctx->dir);
 
 
 875
 876	/* xts_skcipher_decrypt shared descriptor */
 877	desc = ctx->sh_desc_dec;
 878	cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
 879	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
 880				   desc_bytes(desc), ctx->dir);
 881
 882	return 0;
 883}
 884
 
 
 
 
 
 
 
 
 885/*
 886 * aead_edesc - s/w-extended aead descriptor
 887 * @src_nents: number of segments in input s/w scatterlist
 888 * @dst_nents: number of segments in output s/w scatterlist
 889 * @mapped_src_nents: number of segments in input h/w link table
 890 * @mapped_dst_nents: number of segments in output h/w link table
 891 * @sec4_sg_bytes: length of dma mapped sec4_sg space
 892 * @bklog: stored to determine if the request needs backlog
 893 * @sec4_sg_dma: bus physical mapped address of h/w link table
 894 * @sec4_sg: pointer to h/w link table
 895 * @hw_desc: the h/w job descriptor followed by any referenced link tables
 896 */
 897struct aead_edesc {
 
 898	int src_nents;
 899	int dst_nents;
 900	int mapped_src_nents;
 901	int mapped_dst_nents;
 902	int sec4_sg_bytes;
 903	bool bklog;
 904	dma_addr_t sec4_sg_dma;
 905	struct sec4_sg_entry *sec4_sg;
 906	u32 hw_desc[];
 907};
 908
 909/*
 910 * skcipher_edesc - s/w-extended skcipher descriptor
 911 * @src_nents: number of segments in input s/w scatterlist
 912 * @dst_nents: number of segments in output s/w scatterlist
 913 * @mapped_src_nents: number of segments in input h/w link table
 914 * @mapped_dst_nents: number of segments in output h/w link table
 915 * @iv_dma: dma address of iv for checking continuity and link table
 916 * @sec4_sg_bytes: length of dma mapped sec4_sg space
 917 * @bklog: stored to determine if the request needs backlog
 918 * @sec4_sg_dma: bus physical mapped address of h/w link table
 919 * @sec4_sg: pointer to h/w link table
 920 * @hw_desc: the h/w job descriptor followed by any referenced link tables
 921 *	     and IV
 922 */
 923struct skcipher_edesc {
 924	int src_nents;
 925	int dst_nents;
 926	int mapped_src_nents;
 927	int mapped_dst_nents;
 928	dma_addr_t iv_dma;
 929	int sec4_sg_bytes;
 930	bool bklog;
 931	dma_addr_t sec4_sg_dma;
 932	struct sec4_sg_entry *sec4_sg;
 933	u32 hw_desc[];
 934};
 935
 936static void caam_unmap(struct device *dev, struct scatterlist *src,
 937		       struct scatterlist *dst, int src_nents,
 938		       int dst_nents,
 939		       dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
 940		       int sec4_sg_bytes)
 941{
 942	if (dst != src) {
 943		if (src_nents)
 944			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
 945		if (dst_nents)
 946			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
 947	} else {
 948		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
 949	}
 950
 951	if (iv_dma)
 952		dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL);
 953	if (sec4_sg_bytes)
 954		dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
 955				 DMA_TO_DEVICE);
 956}
 957
 958static void aead_unmap(struct device *dev,
 959		       struct aead_edesc *edesc,
 960		       struct aead_request *req)
 961{
 
 
 
 
 
 962	caam_unmap(dev, req->src, req->dst,
 963		   edesc->src_nents, edesc->dst_nents, 0, 0,
 964		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
 
 965}
 966
 967static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
 968			   struct skcipher_request *req)
 
 969{
 970	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 971	int ivsize = crypto_skcipher_ivsize(skcipher);
 972
 973	caam_unmap(dev, req->src, req->dst,
 974		   edesc->src_nents, edesc->dst_nents,
 975		   edesc->iv_dma, ivsize,
 976		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
 977}
 978
 979static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
 980			    void *context)
 981{
 982	struct aead_request *req = context;
 983	struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
 984	struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
 985	struct aead_edesc *edesc;
 986	int ecode = 0;
 987	bool has_bklog;
 
 
 988
 989	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 
 990
 991	edesc = rctx->edesc;
 992	has_bklog = edesc->bklog;
 
 
 
 993
 994	if (err)
 995		ecode = caam_jr_strstatus(jrdev, err);
 996
 997	aead_unmap(jrdev, edesc, req);
 998
 
 
 
 
 
 
 
 
 
 
 
 
 
 999	kfree(edesc);
1000
1001	/*
1002	 * If no backlog flag, the completion of the request is done
1003	 * by CAAM, not crypto engine.
1004	 */
1005	if (!has_bklog)
1006		aead_request_complete(req, ecode);
1007	else
1008		crypto_finalize_aead_request(jrp->engine, req, ecode);
1009}
1010
1011static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc)
 
1012{
 
 
 
 
 
 
1013
1014	return PTR_ALIGN((u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
1015			 dma_get_cache_alignment());
1016}
1017
1018static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
1019				void *context)
1020{
1021	struct skcipher_request *req = context;
1022	struct skcipher_edesc *edesc;
1023	struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1024	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1025	struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
1026	int ivsize = crypto_skcipher_ivsize(skcipher);
1027	int ecode = 0;
1028	bool has_bklog;
1029
1030	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 
 
 
 
 
 
 
1031
1032	edesc = rctx->edesc;
1033	has_bklog = edesc->bklog;
1034	if (err)
1035		ecode = caam_jr_strstatus(jrdev, err);
1036
1037	skcipher_unmap(jrdev, edesc, req);
 
 
 
1038
1039	/*
1040	 * The crypto API expects us to set the IV (req->iv) to the last
1041	 * ciphertext block (CBC mode) or last counter (CTR mode).
1042	 * This is used e.g. by the CTS mode.
1043	 */
1044	if (ivsize && !ecode) {
1045		memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1046
1047		print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1048				     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1049				     ivsize, 1);
1050	}
1051
1052	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1053		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1054		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
 
 
 
 
 
1055
 
1056	kfree(edesc);
1057
1058	/*
1059	 * If no backlog flag, the completion of the request is done
1060	 * by CAAM, not crypto engine.
1061	 */
1062	if (!has_bklog)
1063		skcipher_request_complete(req, ecode);
1064	else
1065		crypto_finalize_skcipher_request(jrp->engine, req, ecode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1066}
1067
1068/*
1069 * Fill in aead job descriptor
1070 */
1071static void init_aead_job(struct aead_request *req,
1072			  struct aead_edesc *edesc,
 
1073			  bool all_contig, bool encrypt)
1074{
1075	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1076	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 
1077	int authsize = ctx->authsize;
1078	u32 *desc = edesc->hw_desc;
1079	u32 out_options, in_options;
1080	dma_addr_t dst_dma, src_dma;
1081	int len, sec4_sg_index = 0;
1082	dma_addr_t ptr;
1083	u32 *sh_desc;
1084
1085	sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
1086	ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1087
1088	len = desc_len(sh_desc);
1089	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1090
1091	if (all_contig) {
1092		src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) :
1093						    0;
1094		in_options = 0;
1095	} else {
1096		src_dma = edesc->sec4_sg_dma;
1097		sec4_sg_index += edesc->mapped_src_nents;
 
1098		in_options = LDST_SGF;
1099	}
 
 
 
 
 
 
1100
1101	append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
1102			  in_options);
1103
1104	dst_dma = src_dma;
1105	out_options = in_options;
1106
1107	if (unlikely(req->src != req->dst)) {
1108		if (!edesc->mapped_dst_nents) {
1109			dst_dma = 0;
1110			out_options = 0;
1111		} else if (edesc->mapped_dst_nents == 1) {
1112			dst_dma = sg_dma_address(req->dst);
1113			out_options = 0;
1114		} else {
1115			dst_dma = edesc->sec4_sg_dma +
1116				  sec4_sg_index *
1117				  sizeof(struct sec4_sg_entry);
1118			out_options = LDST_SGF;
1119		}
1120	}
1121
1122	if (encrypt)
1123		append_seq_out_ptr(desc, dst_dma,
1124				   req->assoclen + req->cryptlen + authsize,
1125				   out_options);
1126	else
1127		append_seq_out_ptr(desc, dst_dma,
1128				   req->assoclen + req->cryptlen - authsize,
1129				   out_options);
1130}
1131
1132static void init_gcm_job(struct aead_request *req,
1133			 struct aead_edesc *edesc,
1134			 bool all_contig, bool encrypt)
1135{
1136	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1137	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1138	unsigned int ivsize = crypto_aead_ivsize(aead);
1139	u32 *desc = edesc->hw_desc;
1140	bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
1141	unsigned int last;
1142
1143	init_aead_job(req, edesc, all_contig, encrypt);
1144	append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1145
1146	/* BUG This should not be specific to generic GCM. */
1147	last = 0;
1148	if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
1149		last = FIFOLD_TYPE_LAST1;
1150
1151	/* Read GCM IV */
1152	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1153			 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last);
1154	/* Append Salt */
1155	if (!generic_gcm)
1156		append_data(desc, ctx->key + ctx->cdata.keylen, 4);
1157	/* Append IV */
1158	append_data(desc, req->iv, ivsize);
1159	/* End of blank commands */
1160}
1161
1162static void init_chachapoly_job(struct aead_request *req,
1163				struct aead_edesc *edesc, bool all_contig,
1164				bool encrypt)
1165{
1166	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1167	unsigned int ivsize = crypto_aead_ivsize(aead);
1168	unsigned int assoclen = req->assoclen;
 
1169	u32 *desc = edesc->hw_desc;
1170	u32 ctx_iv_off = 4;
 
 
1171
1172	init_aead_job(req, edesc, all_contig, encrypt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1173
1174	if (ivsize != CHACHAPOLY_IV_SIZE) {
1175		/* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
1176		ctx_iv_off += 4;
1177
1178		/*
1179		 * The associated data comes already with the IV but we need
1180		 * to skip it when we authenticate or encrypt...
1181		 */
1182		assoclen -= ivsize;
 
 
1183	}
 
 
1184
1185	append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
1186
1187	/*
1188	 * For IPsec load the IV further in the same register.
1189	 * For RFC7539 simply load the 12 bytes nonce in a single operation
1190	 */
1191	append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
1192			   LDST_SRCDST_BYTE_CONTEXT |
1193			   ctx_iv_off << LDST_OFFSET_SHIFT);
1194}
1195
1196static void init_authenc_job(struct aead_request *req,
1197			     struct aead_edesc *edesc,
1198			     bool all_contig, bool encrypt)
1199{
1200	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1201	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
1202						 struct caam_aead_alg,
1203						 aead.base);
1204	unsigned int ivsize = crypto_aead_ivsize(aead);
1205	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1206	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
1207	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
1208			       OP_ALG_AAI_CTR_MOD128);
1209	const bool is_rfc3686 = alg->caam.rfc3686;
1210	u32 *desc = edesc->hw_desc;
1211	u32 ivoffset = 0;
1212
1213	/*
1214	 * AES-CTR needs to load IV in CONTEXT1 reg
1215	 * at an offset of 128bits (16bytes)
1216	 * CONTEXT1[255:128] = IV
1217	 */
1218	if (ctr_mode)
1219		ivoffset = 16;
1220
1221	/*
1222	 * RFC3686 specific:
1223	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1224	 */
1225	if (is_rfc3686)
1226		ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
1227
1228	init_aead_job(req, edesc, all_contig, encrypt);
1229
1230	/*
1231	 * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
1232	 * having DPOVRD as destination.
1233	 */
1234	if (ctrlpriv->era < 3)
1235		append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1236	else
1237		append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
1238
1239	if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
1240		append_load_as_imm(desc, req->iv, ivsize,
1241				   LDST_CLASS_1_CCB |
1242				   LDST_SRCDST_BYTE_CONTEXT |
1243				   (ivoffset << LDST_OFFSET_SHIFT));
1244}
1245
1246/*
1247 * Fill in skcipher job descriptor
1248 */
1249static void init_skcipher_job(struct skcipher_request *req,
1250			      struct skcipher_edesc *edesc,
1251			      const bool encrypt)
 
1252{
1253	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1254	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1255	struct device *jrdev = ctx->jrdev;
1256	int ivsize = crypto_skcipher_ivsize(skcipher);
1257	u32 *desc = edesc->hw_desc;
1258	u32 *sh_desc;
1259	u32 in_options = 0, out_options = 0;
1260	dma_addr_t src_dma, dst_dma, ptr;
1261	int len, sec4_sg_index = 0;
1262
1263	print_hex_dump_debug("presciv@"__stringify(__LINE__)": ",
1264			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1265	dev_dbg(jrdev, "asked=%d, cryptlen%d\n",
1266	       (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
1267
1268	caam_dump_sg("src    @" __stringify(__LINE__)": ",
1269		     DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1270		     edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1271
1272	sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
1273	ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
 
 
 
 
 
 
1274
1275	len = desc_len(sh_desc);
1276	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1277
1278	if (ivsize || edesc->mapped_src_nents > 1) {
1279		src_dma = edesc->sec4_sg_dma;
1280		sec4_sg_index = edesc->mapped_src_nents + !!ivsize;
1281		in_options = LDST_SGF;
1282	} else {
1283		src_dma = sg_dma_address(req->src);
 
 
1284	}
1285
1286	append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
1287
1288	if (likely(req->src == req->dst)) {
1289		dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
1290		out_options = in_options;
1291	} else if (!ivsize && edesc->mapped_dst_nents == 1) {
1292		dst_dma = sg_dma_address(req->dst);
 
 
 
1293	} else {
1294		dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
1295			  sizeof(struct sec4_sg_entry);
1296		out_options = LDST_SGF;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1297	}
1298
1299	append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
 
 
 
1300}
1301
1302/*
1303 * allocate and map the aead extended descriptor
1304 */
1305static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1306					   int desc_bytes, bool *all_contig_ptr,
1307					   bool encrypt)
1308{
1309	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1310	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1311	struct device *jrdev = ctx->jrdev;
1312	struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
1313	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1314		       GFP_KERNEL : GFP_ATOMIC;
1315	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1316	int src_len, dst_len = 0;
1317	struct aead_edesc *edesc;
1318	int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
1319	unsigned int authsize = ctx->authsize;
 
 
 
1320
1321	if (unlikely(req->dst != req->src)) {
1322		src_len = req->assoclen + req->cryptlen;
1323		dst_len = src_len + (encrypt ? authsize : (-authsize));
1324
1325		src_nents = sg_nents_for_len(req->src, src_len);
1326		if (unlikely(src_nents < 0)) {
1327			dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1328				src_len);
1329			return ERR_PTR(src_nents);
1330		}
1331
1332		dst_nents = sg_nents_for_len(req->dst, dst_len);
1333		if (unlikely(dst_nents < 0)) {
1334			dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1335				dst_len);
1336			return ERR_PTR(dst_nents);
1337		}
1338	} else {
1339		src_len = req->assoclen + req->cryptlen +
1340			  (encrypt ? authsize : 0);
1341
1342		src_nents = sg_nents_for_len(req->src, src_len);
1343		if (unlikely(src_nents < 0)) {
1344			dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1345				src_len);
1346			return ERR_PTR(src_nents);
1347		}
1348	}
1349
 
 
1350	if (likely(req->src == req->dst)) {
1351		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1352					      DMA_BIDIRECTIONAL);
1353		if (unlikely(!mapped_src_nents)) {
1354			dev_err(jrdev, "unable to map source\n");
1355			return ERR_PTR(-ENOMEM);
1356		}
1357	} else {
1358		/* Cover also the case of null (zero length) input data */
1359		if (src_nents) {
1360			mapped_src_nents = dma_map_sg(jrdev, req->src,
1361						      src_nents, DMA_TO_DEVICE);
1362			if (unlikely(!mapped_src_nents)) {
1363				dev_err(jrdev, "unable to map source\n");
1364				return ERR_PTR(-ENOMEM);
1365			}
1366		} else {
1367			mapped_src_nents = 0;
1368		}
1369
1370		/* Cover also the case of null (zero length) output data */
1371		if (dst_nents) {
1372			mapped_dst_nents = dma_map_sg(jrdev, req->dst,
1373						      dst_nents,
1374						      DMA_FROM_DEVICE);
1375			if (unlikely(!mapped_dst_nents)) {
1376				dev_err(jrdev, "unable to map destination\n");
1377				dma_unmap_sg(jrdev, req->src, src_nents,
1378					     DMA_TO_DEVICE);
1379				return ERR_PTR(-ENOMEM);
1380			}
1381		} else {
1382			mapped_dst_nents = 0;
1383		}
1384	}
1385
1386	/*
1387	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1388	 * the end of the table by allocating more S/G entries.
1389	 */
1390	sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
1391	if (mapped_dst_nents > 1)
1392		sec4_sg_len += pad_sg_nents(mapped_dst_nents);
1393	else
1394		sec4_sg_len = pad_sg_nents(sec4_sg_len);
 
 
1395
1396	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1397
1398	/* allocate space for base edesc and hw desc commands, link tables */
1399	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, flags);
 
1400	if (!edesc) {
1401		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1402			   0, 0, 0);
1403		return ERR_PTR(-ENOMEM);
1404	}
1405
 
1406	edesc->src_nents = src_nents;
1407	edesc->dst_nents = dst_nents;
1408	edesc->mapped_src_nents = mapped_src_nents;
1409	edesc->mapped_dst_nents = mapped_dst_nents;
1410	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1411			 desc_bytes;
1412
1413	rctx->edesc = edesc;
1414
1415	*all_contig_ptr = !(mapped_src_nents > 1);
1416
1417	sec4_sg_index = 0;
1418	if (mapped_src_nents > 1) {
1419		sg_to_sec4_sg_last(req->src, src_len,
1420				   edesc->sec4_sg + sec4_sg_index, 0);
1421		sec4_sg_index += mapped_src_nents;
1422	}
1423	if (mapped_dst_nents > 1) {
1424		sg_to_sec4_sg_last(req->dst, dst_len,
1425				   edesc->sec4_sg + sec4_sg_index, 0);
1426	}
1427
1428	if (!sec4_sg_bytes)
1429		return edesc;
1430
1431	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1432					    sec4_sg_bytes, DMA_TO_DEVICE);
1433	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1434		dev_err(jrdev, "unable to map S/G table\n");
1435		aead_unmap(jrdev, edesc, req);
1436		kfree(edesc);
1437		return ERR_PTR(-ENOMEM);
1438	}
1439
1440	edesc->sec4_sg_bytes = sec4_sg_bytes;
1441
1442	return edesc;
1443}
1444
1445static int aead_enqueue_req(struct device *jrdev, struct aead_request *req)
1446{
1447	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
1448	struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
1449	struct aead_edesc *edesc = rctx->edesc;
1450	u32 *desc = edesc->hw_desc;
1451	int ret;
1452
1453	/*
1454	 * Only the backlog request are sent to crypto-engine since the others
1455	 * can be handled by CAAM, if free, especially since JR has up to 1024
1456	 * entries (more than the 10 entries from crypto-engine).
1457	 */
1458	if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
1459		ret = crypto_transfer_aead_request_to_engine(jrpriv->engine,
1460							     req);
1461	else
1462		ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req);
1463
1464	if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
1465		aead_unmap(jrdev, edesc, req);
1466		kfree(rctx->edesc);
1467	}
1468
1469	return ret;
1470}
1471
1472static inline int chachapoly_crypt(struct aead_request *req, bool encrypt)
1473{
1474	struct aead_edesc *edesc;
1475	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1476	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1477	struct device *jrdev = ctx->jrdev;
1478	bool all_contig;
1479	u32 *desc;
 
1480
1481	edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
1482				 encrypt);
 
 
 
1483	if (IS_ERR(edesc))
1484		return PTR_ERR(edesc);
1485
1486	desc = edesc->hw_desc;
1487
1488	init_chachapoly_job(req, edesc, all_contig, encrypt);
1489	print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
1490			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1491			     1);
1492
1493	return aead_enqueue_req(jrdev, req);
1494}
1495
1496static int chachapoly_encrypt(struct aead_request *req)
1497{
1498	return chachapoly_crypt(req, true);
1499}
 
 
 
 
1500
1501static int chachapoly_decrypt(struct aead_request *req)
1502{
1503	return chachapoly_crypt(req, false);
1504}
1505
1506static inline int aead_crypt(struct aead_request *req, bool encrypt)
1507{
1508	struct aead_edesc *edesc;
1509	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1510	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1511	struct device *jrdev = ctx->jrdev;
1512	bool all_contig;
 
 
1513
1514	/* allocate extended descriptor */
1515	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1516				 &all_contig, encrypt);
1517	if (IS_ERR(edesc))
1518		return PTR_ERR(edesc);
1519
1520	/* Create and submit job descriptor */
1521	init_authenc_job(req, edesc, all_contig, encrypt);
 
 
 
1522
1523	print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1524			     DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1525			     desc_bytes(edesc->hw_desc), 1);
 
 
 
 
 
1526
1527	return aead_enqueue_req(jrdev, req);
1528}
 
 
 
 
 
 
1529
1530static int aead_encrypt(struct aead_request *req)
1531{
1532	return aead_crypt(req, true);
1533}
1534
1535static int aead_decrypt(struct aead_request *req)
 
 
 
 
 
1536{
1537	return aead_crypt(req, false);
1538}
 
 
 
 
 
 
 
 
 
 
 
1539
1540static int aead_do_one_req(struct crypto_engine *engine, void *areq)
1541{
1542	struct aead_request *req = aead_request_cast(areq);
1543	struct caam_ctx *ctx = crypto_aead_ctx_dma(crypto_aead_reqtfm(req));
1544	struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
1545	u32 *desc = rctx->edesc->hw_desc;
1546	int ret;
1547
1548	rctx->edesc->bklog = true;
 
 
 
 
 
 
 
 
 
 
1549
1550	ret = caam_jr_enqueue(ctx->jrdev, desc, aead_crypt_done, req);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1551
1552	if (ret == -ENOSPC && engine->retry_support)
1553		return ret;
1554
1555	if (ret != -EINPROGRESS) {
1556		aead_unmap(ctx->jrdev, rctx->edesc, req);
1557		kfree(rctx->edesc);
1558	} else {
1559		ret = 0;
 
1560	}
1561
1562	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1563}
1564
1565static inline int gcm_crypt(struct aead_request *req, bool encrypt)
1566{
 
1567	struct aead_edesc *edesc;
1568	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1569	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1570	struct device *jrdev = ctx->jrdev;
1571	bool all_contig;
 
 
 
 
1572
1573	/* allocate extended descriptor */
1574	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig,
1575				 encrypt);
 
1576	if (IS_ERR(edesc))
1577		return PTR_ERR(edesc);
1578
1579	/* Create and submit job descriptor */
1580	init_gcm_job(req, edesc, all_contig, encrypt);
1581
1582	print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1583			     DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1584			     desc_bytes(edesc->hw_desc), 1);
1585
1586	return aead_enqueue_req(jrdev, req);
1587}
1588
1589static int gcm_encrypt(struct aead_request *req)
1590{
1591	return gcm_crypt(req, true);
1592}
1593
1594static int gcm_decrypt(struct aead_request *req)
1595{
1596	return gcm_crypt(req, false);
1597}
1598
1599static int ipsec_gcm_encrypt(struct aead_request *req)
1600{
1601	return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req);
1602}
 
 
 
 
1603
1604static int ipsec_gcm_decrypt(struct aead_request *req)
1605{
1606	return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req);
1607}
1608
1609/*
1610 * allocate and map the skcipher extended descriptor for skcipher
1611 */
1612static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1613						   int desc_bytes)
 
1614{
1615	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1616	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1617	struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1618	struct device *jrdev = ctx->jrdev;
1619	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 
1620		       GFP_KERNEL : GFP_ATOMIC;
1621	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1622	struct skcipher_edesc *edesc;
1623	dma_addr_t iv_dma = 0;
1624	u8 *iv;
1625	int ivsize = crypto_skcipher_ivsize(skcipher);
1626	int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
1627	unsigned int aligned_size;
1628
1629	src_nents = sg_nents_for_len(req->src, req->cryptlen);
1630	if (unlikely(src_nents < 0)) {
1631		dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1632			req->cryptlen);
1633		return ERR_PTR(src_nents);
1634	}
1635
1636	if (req->dst != req->src) {
1637		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1638		if (unlikely(dst_nents < 0)) {
1639			dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1640				req->cryptlen);
1641			return ERR_PTR(dst_nents);
1642		}
1643	}
1644
1645	if (likely(req->src == req->dst)) {
1646		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1647					      DMA_BIDIRECTIONAL);
1648		if (unlikely(!mapped_src_nents)) {
1649			dev_err(jrdev, "unable to map source\n");
1650			return ERR_PTR(-ENOMEM);
1651		}
1652	} else {
1653		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1654					      DMA_TO_DEVICE);
1655		if (unlikely(!mapped_src_nents)) {
1656			dev_err(jrdev, "unable to map source\n");
1657			return ERR_PTR(-ENOMEM);
1658		}
1659		mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
1660					      DMA_FROM_DEVICE);
1661		if (unlikely(!mapped_dst_nents)) {
1662			dev_err(jrdev, "unable to map destination\n");
1663			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1664			return ERR_PTR(-ENOMEM);
1665		}
1666	}
1667
1668	if (!ivsize && mapped_src_nents == 1)
1669		sec4_sg_ents = 0; // no need for an input hw s/g table
1670	else
1671		sec4_sg_ents = mapped_src_nents + !!ivsize;
1672	dst_sg_idx = sec4_sg_ents;
1673
1674	/*
1675	 * Input, output HW S/G tables: [IV, src][dst, IV]
1676	 * IV entries point to the same buffer
1677	 * If src == dst, S/G entries are reused (S/G tables overlap)
1678	 *
1679	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1680	 * the end of the table by allocating more S/G entries. Logic:
1681	 * if (output S/G)
1682	 *      pad output S/G, if needed
1683	 * else if (input S/G) ...
1684	 *      pad input S/G, if needed
1685	 */
1686	if (ivsize || mapped_dst_nents > 1) {
1687		if (req->src == req->dst)
1688			sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents);
1689		else
1690			sec4_sg_ents += pad_sg_nents(mapped_dst_nents +
1691						     !!ivsize);
1692	} else {
1693		sec4_sg_ents = pad_sg_nents(sec4_sg_ents);
 
 
 
1694	}
1695
1696	sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
1697
1698	/*
1699	 * allocate space for base edesc and hw desc commands, link tables, IV
 
1700	 */
1701	aligned_size = sizeof(*edesc) + desc_bytes + sec4_sg_bytes;
1702	aligned_size = ALIGN(aligned_size, dma_get_cache_alignment());
1703	aligned_size += ~(ARCH_KMALLOC_MINALIGN - 1) &
1704			(dma_get_cache_alignment() - 1);
1705	aligned_size += ALIGN(ivsize, dma_get_cache_alignment());
1706	edesc = kzalloc(aligned_size, flags);
 
 
 
 
 
1707	if (!edesc) {
1708		dev_err(jrdev, "could not allocate extended descriptor\n");
1709		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1710			   0, 0, 0);
1711		return ERR_PTR(-ENOMEM);
1712	}
1713
1714	edesc->src_nents = src_nents;
1715	edesc->dst_nents = dst_nents;
1716	edesc->mapped_src_nents = mapped_src_nents;
1717	edesc->mapped_dst_nents = mapped_dst_nents;
1718	edesc->sec4_sg_bytes = sec4_sg_bytes;
1719	edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
1720						  desc_bytes);
1721	rctx->edesc = edesc;
1722
1723	/* Make sure IV is located in a DMAable area */
1724	if (ivsize) {
1725		iv = skcipher_edesc_iv(edesc);
1726		memcpy(iv, req->iv, ivsize);
1727
1728		iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
1729		if (dma_mapping_error(jrdev, iv_dma)) {
1730			dev_err(jrdev, "unable to map IV\n");
1731			caam_unmap(jrdev, req->src, req->dst, src_nents,
1732				   dst_nents, 0, 0, 0, 0);
1733			kfree(edesc);
1734			return ERR_PTR(-ENOMEM);
1735		}
1736
1737		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
 
 
 
 
 
1738	}
1739	if (dst_sg_idx)
1740		sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg +
1741			      !!ivsize, 0);
1742
1743	if (req->src != req->dst && (ivsize || mapped_dst_nents > 1))
1744		sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg +
1745			      dst_sg_idx, 0);
1746
1747	if (ivsize)
1748		dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx +
1749				   mapped_dst_nents, iv_dma, ivsize, 0);
1750
1751	if (ivsize || mapped_dst_nents > 1)
1752		sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
1753				    mapped_dst_nents - 1 + !!ivsize);
1754
1755	if (sec4_sg_bytes) {
1756		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1757						    sec4_sg_bytes,
1758						    DMA_TO_DEVICE);
1759		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1760			dev_err(jrdev, "unable to map S/G table\n");
1761			caam_unmap(jrdev, req->src, req->dst, src_nents,
1762				   dst_nents, iv_dma, ivsize, 0, 0);
1763			kfree(edesc);
1764			return ERR_PTR(-ENOMEM);
1765		}
1766	}
1767
 
 
1768	edesc->iv_dma = iv_dma;
1769
1770	print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ",
1771			     DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1772			     sec4_sg_bytes, 1);
 
 
1773
 
1774	return edesc;
1775}
1776
1777static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
1778{
1779	struct skcipher_request *req = skcipher_request_cast(areq);
1780	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(crypto_skcipher_reqtfm(req));
1781	struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1782	u32 *desc = rctx->edesc->hw_desc;
1783	int ret;
1784
1785	rctx->edesc->bklog = true;
1786
1787	ret = caam_jr_enqueue(ctx->jrdev, desc, skcipher_crypt_done, req);
 
 
 
 
1788
1789	if (ret == -ENOSPC && engine->retry_support)
1790		return ret;
 
 
 
 
 
 
 
 
1791
1792	if (ret != -EINPROGRESS) {
1793		skcipher_unmap(ctx->jrdev, rctx->edesc, req);
1794		kfree(rctx->edesc);
1795	} else {
1796		ret = 0;
 
1797	}
1798
1799	return ret;
1800}
1801
1802static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1803{
1804	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1805	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1806
1807	return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1808}
1809
1810static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
1811{
1812	struct skcipher_edesc *edesc;
1813	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1814	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1815	struct device *jrdev = ctx->jrdev;
1816	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
1817	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1818	u32 *desc;
1819	int ret = 0;
1820
1821	/*
1822	 * XTS is expected to return an error even for input length = 0
1823	 * Note that the case input length < block size will be caught during
1824	 * HW offloading and return an error.
1825	 */
1826	if (!req->cryptlen && !ctx->fallback)
1827		return 0;
1828
1829	if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
1830			      ctx->xts_key_fallback)) {
1831		struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1832
1833		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
1834		skcipher_request_set_callback(&rctx->fallback_req,
1835					      req->base.flags,
1836					      req->base.complete,
1837					      req->base.data);
1838		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
1839					   req->dst, req->cryptlen, req->iv);
1840
1841		return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
1842				 crypto_skcipher_decrypt(&rctx->fallback_req);
1843	}
1844
1845	/* allocate extended descriptor */
1846	edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
 
1847	if (IS_ERR(edesc))
1848		return PTR_ERR(edesc);
1849
1850	/* Create and submit job descriptor*/
1851	init_skcipher_job(req, edesc, encrypt);
1852
1853	print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
1854			     DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1855			     desc_bytes(edesc->hw_desc), 1);
1856
1857	desc = edesc->hw_desc;
1858	/*
1859	 * Only the backlog request are sent to crypto-engine since the others
1860	 * can be handled by CAAM, if free, especially since JR has up to 1024
1861	 * entries (more than the 10 entries from crypto-engine).
1862	 */
1863	if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
1864		ret = crypto_transfer_skcipher_request_to_engine(jrpriv->engine,
1865								 req);
1866	else
1867		ret = caam_jr_enqueue(jrdev, desc, skcipher_crypt_done, req);
1868
1869	if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
1870		skcipher_unmap(jrdev, edesc, req);
1871		kfree(edesc);
1872	}
1873
1874	return ret;
1875}
1876
1877static int skcipher_encrypt(struct skcipher_request *req)
1878{
1879	return skcipher_crypt(req, true);
1880}
1881
1882static int skcipher_decrypt(struct skcipher_request *req)
1883{
1884	return skcipher_crypt(req, false);
1885}
1886
1887static struct caam_skcipher_alg driver_algs[] = {
1888	{
1889		.skcipher.base = {
1890			.base = {
1891				.cra_name = "cbc(aes)",
1892				.cra_driver_name = "cbc-aes-caam",
1893				.cra_blocksize = AES_BLOCK_SIZE,
1894			},
1895			.setkey = aes_skcipher_setkey,
1896			.encrypt = skcipher_encrypt,
1897			.decrypt = skcipher_decrypt,
1898			.min_keysize = AES_MIN_KEY_SIZE,
1899			.max_keysize = AES_MAX_KEY_SIZE,
1900			.ivsize = AES_BLOCK_SIZE,
1901		},
1902		.skcipher.op = {
1903			.do_one_request = skcipher_do_one_req,
1904		},
1905		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1906	},
1907	{
1908		.skcipher.base = {
1909			.base = {
1910				.cra_name = "cbc(des3_ede)",
1911				.cra_driver_name = "cbc-3des-caam",
1912				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1913			},
1914			.setkey = des3_skcipher_setkey,
1915			.encrypt = skcipher_encrypt,
1916			.decrypt = skcipher_decrypt,
1917			.min_keysize = DES3_EDE_KEY_SIZE,
1918			.max_keysize = DES3_EDE_KEY_SIZE,
1919			.ivsize = DES3_EDE_BLOCK_SIZE,
1920		},
1921		.skcipher.op = {
1922			.do_one_request = skcipher_do_one_req,
1923		},
1924		.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1925	},
1926	{
1927		.skcipher.base = {
1928			.base = {
1929				.cra_name = "cbc(des)",
1930				.cra_driver_name = "cbc-des-caam",
1931				.cra_blocksize = DES_BLOCK_SIZE,
1932			},
1933			.setkey = des_skcipher_setkey,
1934			.encrypt = skcipher_encrypt,
1935			.decrypt = skcipher_decrypt,
1936			.min_keysize = DES_KEY_SIZE,
1937			.max_keysize = DES_KEY_SIZE,
1938			.ivsize = DES_BLOCK_SIZE,
1939		},
1940		.skcipher.op = {
1941			.do_one_request = skcipher_do_one_req,
1942		},
1943		.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1944	},
1945	{
1946		.skcipher.base = {
1947			.base = {
1948				.cra_name = "ctr(aes)",
1949				.cra_driver_name = "ctr-aes-caam",
1950				.cra_blocksize = 1,
1951			},
1952			.setkey = ctr_skcipher_setkey,
1953			.encrypt = skcipher_encrypt,
1954			.decrypt = skcipher_decrypt,
1955			.min_keysize = AES_MIN_KEY_SIZE,
1956			.max_keysize = AES_MAX_KEY_SIZE,
1957			.ivsize = AES_BLOCK_SIZE,
1958			.chunksize = AES_BLOCK_SIZE,
1959		},
1960		.skcipher.op = {
1961			.do_one_request = skcipher_do_one_req,
1962		},
1963		.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1964					OP_ALG_AAI_CTR_MOD128,
1965	},
1966	{
1967		.skcipher.base = {
1968			.base = {
1969				.cra_name = "rfc3686(ctr(aes))",
1970				.cra_driver_name = "rfc3686-ctr-aes-caam",
1971				.cra_blocksize = 1,
1972			},
1973			.setkey = rfc3686_skcipher_setkey,
1974			.encrypt = skcipher_encrypt,
1975			.decrypt = skcipher_decrypt,
1976			.min_keysize = AES_MIN_KEY_SIZE +
1977				       CTR_RFC3686_NONCE_SIZE,
1978			.max_keysize = AES_MAX_KEY_SIZE +
1979				       CTR_RFC3686_NONCE_SIZE,
1980			.ivsize = CTR_RFC3686_IV_SIZE,
1981			.chunksize = AES_BLOCK_SIZE,
1982		},
1983		.skcipher.op = {
1984			.do_one_request = skcipher_do_one_req,
1985		},
1986		.caam = {
1987			.class1_alg_type = OP_ALG_ALGSEL_AES |
1988					   OP_ALG_AAI_CTR_MOD128,
1989			.rfc3686 = true,
1990		},
1991	},
1992	{
1993		.skcipher.base = {
1994			.base = {
1995				.cra_name = "xts(aes)",
1996				.cra_driver_name = "xts-aes-caam",
1997				.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1998				.cra_blocksize = AES_BLOCK_SIZE,
1999			},
2000			.setkey = xts_skcipher_setkey,
2001			.encrypt = skcipher_encrypt,
2002			.decrypt = skcipher_decrypt,
2003			.min_keysize = 2 * AES_MIN_KEY_SIZE,
2004			.max_keysize = 2 * AES_MAX_KEY_SIZE,
2005			.ivsize = AES_BLOCK_SIZE,
2006		},
2007		.skcipher.op = {
2008			.do_one_request = skcipher_do_one_req,
2009		},
2010		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
2011	},
2012	{
2013		.skcipher.base = {
2014			.base = {
2015				.cra_name = "ecb(des)",
2016				.cra_driver_name = "ecb-des-caam",
2017				.cra_blocksize = DES_BLOCK_SIZE,
2018			},
2019			.setkey = des_skcipher_setkey,
2020			.encrypt = skcipher_encrypt,
2021			.decrypt = skcipher_decrypt,
2022			.min_keysize = DES_KEY_SIZE,
2023			.max_keysize = DES_KEY_SIZE,
2024		},
2025		.skcipher.op = {
2026			.do_one_request = skcipher_do_one_req,
2027		},
2028		.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
2029	},
2030	{
2031		.skcipher.base = {
2032			.base = {
2033				.cra_name = "ecb(aes)",
2034				.cra_driver_name = "ecb-aes-caam",
2035				.cra_blocksize = AES_BLOCK_SIZE,
2036			},
2037			.setkey = aes_skcipher_setkey,
2038			.encrypt = skcipher_encrypt,
2039			.decrypt = skcipher_decrypt,
2040			.min_keysize = AES_MIN_KEY_SIZE,
2041			.max_keysize = AES_MAX_KEY_SIZE,
2042		},
2043		.skcipher.op = {
2044			.do_one_request = skcipher_do_one_req,
2045		},
2046		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
2047	},
2048	{
2049		.skcipher.base = {
2050			.base = {
2051				.cra_name = "ecb(des3_ede)",
2052				.cra_driver_name = "ecb-des3-caam",
2053				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2054			},
2055			.setkey = des3_skcipher_setkey,
2056			.encrypt = skcipher_encrypt,
2057			.decrypt = skcipher_decrypt,
2058			.min_keysize = DES3_EDE_KEY_SIZE,
2059			.max_keysize = DES3_EDE_KEY_SIZE,
2060		},
2061		.skcipher.op = {
2062			.do_one_request = skcipher_do_one_req,
2063		},
2064		.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
2065	},
2066};
2067
2068static struct caam_aead_alg driver_aeads[] = {
2069	{
2070		.aead.base = {
2071			.base = {
2072				.cra_name = "rfc4106(gcm(aes))",
2073				.cra_driver_name = "rfc4106-gcm-aes-caam",
2074				.cra_blocksize = 1,
2075			},
2076			.setkey = rfc4106_setkey,
2077			.setauthsize = rfc4106_setauthsize,
2078			.encrypt = ipsec_gcm_encrypt,
2079			.decrypt = ipsec_gcm_decrypt,
2080			.ivsize = GCM_RFC4106_IV_SIZE,
2081			.maxauthsize = AES_BLOCK_SIZE,
2082		},
2083		.aead.op = {
2084			.do_one_request = aead_do_one_req,
2085		},
2086		.caam = {
2087			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2088			.nodkp = true,
2089		},
2090	},
2091	{
2092		.aead.base = {
2093			.base = {
2094				.cra_name = "rfc4543(gcm(aes))",
2095				.cra_driver_name = "rfc4543-gcm-aes-caam",
2096				.cra_blocksize = 1,
2097			},
2098			.setkey = rfc4543_setkey,
2099			.setauthsize = rfc4543_setauthsize,
2100			.encrypt = ipsec_gcm_encrypt,
2101			.decrypt = ipsec_gcm_decrypt,
2102			.ivsize = GCM_RFC4543_IV_SIZE,
2103			.maxauthsize = AES_BLOCK_SIZE,
2104		},
2105		.aead.op = {
2106			.do_one_request = aead_do_one_req,
2107		},
2108		.caam = {
2109			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2110			.nodkp = true,
2111		},
2112	},
2113	/* Galois Counter Mode */
2114	{
2115		.aead.base = {
2116			.base = {
2117				.cra_name = "gcm(aes)",
2118				.cra_driver_name = "gcm-aes-caam",
2119				.cra_blocksize = 1,
2120			},
2121			.setkey = gcm_setkey,
2122			.setauthsize = gcm_setauthsize,
2123			.encrypt = gcm_encrypt,
2124			.decrypt = gcm_decrypt,
2125			.ivsize = GCM_AES_IV_SIZE,
2126			.maxauthsize = AES_BLOCK_SIZE,
2127		},
2128		.aead.op = {
2129			.do_one_request = aead_do_one_req,
2130		},
2131		.caam = {
2132			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2133			.nodkp = true,
2134		},
2135	},
2136	/* single-pass ipsec_esp descriptor */
2137	{
2138		.aead.base = {
2139			.base = {
2140				.cra_name = "authenc(hmac(md5),"
2141					    "ecb(cipher_null))",
2142				.cra_driver_name = "authenc-hmac-md5-"
2143						   "ecb-cipher_null-caam",
2144				.cra_blocksize = NULL_BLOCK_SIZE,
2145			},
2146			.setkey = aead_setkey,
2147			.setauthsize = aead_setauthsize,
2148			.encrypt = aead_encrypt,
2149			.decrypt = aead_decrypt,
2150			.ivsize = NULL_IV_SIZE,
2151			.maxauthsize = MD5_DIGEST_SIZE,
2152		},
2153		.aead.op = {
2154			.do_one_request = aead_do_one_req,
2155		},
2156		.caam = {
2157			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2158					   OP_ALG_AAI_HMAC_PRECOMP,
2159		},
2160	},
2161	{
2162		.aead.base = {
2163			.base = {
2164				.cra_name = "authenc(hmac(sha1),"
2165					    "ecb(cipher_null))",
2166				.cra_driver_name = "authenc-hmac-sha1-"
2167						   "ecb-cipher_null-caam",
2168				.cra_blocksize = NULL_BLOCK_SIZE,
2169			},
2170			.setkey = aead_setkey,
2171			.setauthsize = aead_setauthsize,
2172			.encrypt = aead_encrypt,
2173			.decrypt = aead_decrypt,
2174			.ivsize = NULL_IV_SIZE,
2175			.maxauthsize = SHA1_DIGEST_SIZE,
2176		},
2177		.aead.op = {
2178			.do_one_request = aead_do_one_req,
2179		},
2180		.caam = {
2181			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2182					   OP_ALG_AAI_HMAC_PRECOMP,
2183		},
2184	},
2185	{
2186		.aead.base = {
2187			.base = {
2188				.cra_name = "authenc(hmac(sha224),"
2189					    "ecb(cipher_null))",
2190				.cra_driver_name = "authenc-hmac-sha224-"
2191						   "ecb-cipher_null-caam",
2192				.cra_blocksize = NULL_BLOCK_SIZE,
2193			},
2194			.setkey = aead_setkey,
2195			.setauthsize = aead_setauthsize,
2196			.encrypt = aead_encrypt,
2197			.decrypt = aead_decrypt,
2198			.ivsize = NULL_IV_SIZE,
2199			.maxauthsize = SHA224_DIGEST_SIZE,
2200		},
2201		.aead.op = {
2202			.do_one_request = aead_do_one_req,
2203		},
2204		.caam = {
2205			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2206					   OP_ALG_AAI_HMAC_PRECOMP,
2207		},
2208	},
2209	{
2210		.aead.base = {
2211			.base = {
2212				.cra_name = "authenc(hmac(sha256),"
2213					    "ecb(cipher_null))",
2214				.cra_driver_name = "authenc-hmac-sha256-"
2215						   "ecb-cipher_null-caam",
2216				.cra_blocksize = NULL_BLOCK_SIZE,
2217			},
2218			.setkey = aead_setkey,
2219			.setauthsize = aead_setauthsize,
2220			.encrypt = aead_encrypt,
2221			.decrypt = aead_decrypt,
2222			.ivsize = NULL_IV_SIZE,
2223			.maxauthsize = SHA256_DIGEST_SIZE,
2224		},
2225		.aead.op = {
2226			.do_one_request = aead_do_one_req,
2227		},
2228		.caam = {
2229			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2230					   OP_ALG_AAI_HMAC_PRECOMP,
2231		},
2232	},
2233	{
2234		.aead.base = {
2235			.base = {
2236				.cra_name = "authenc(hmac(sha384),"
2237					    "ecb(cipher_null))",
2238				.cra_driver_name = "authenc-hmac-sha384-"
2239						   "ecb-cipher_null-caam",
2240				.cra_blocksize = NULL_BLOCK_SIZE,
2241			},
2242			.setkey = aead_setkey,
2243			.setauthsize = aead_setauthsize,
2244			.encrypt = aead_encrypt,
2245			.decrypt = aead_decrypt,
2246			.ivsize = NULL_IV_SIZE,
2247			.maxauthsize = SHA384_DIGEST_SIZE,
2248		},
2249		.aead.op = {
2250			.do_one_request = aead_do_one_req,
2251		},
2252		.caam = {
2253			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2254					   OP_ALG_AAI_HMAC_PRECOMP,
2255		},
2256	},
2257	{
2258		.aead.base = {
2259			.base = {
2260				.cra_name = "authenc(hmac(sha512),"
2261					    "ecb(cipher_null))",
2262				.cra_driver_name = "authenc-hmac-sha512-"
2263						   "ecb-cipher_null-caam",
2264				.cra_blocksize = NULL_BLOCK_SIZE,
2265			},
2266			.setkey = aead_setkey,
2267			.setauthsize = aead_setauthsize,
2268			.encrypt = aead_encrypt,
2269			.decrypt = aead_decrypt,
2270			.ivsize = NULL_IV_SIZE,
2271			.maxauthsize = SHA512_DIGEST_SIZE,
2272		},
2273		.aead.op = {
2274			.do_one_request = aead_do_one_req,
2275		},
2276		.caam = {
2277			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2278					   OP_ALG_AAI_HMAC_PRECOMP,
2279		},
2280	},
2281	{
2282		.aead.base = {
2283			.base = {
2284				.cra_name = "authenc(hmac(md5),cbc(aes))",
2285				.cra_driver_name = "authenc-hmac-md5-"
2286						   "cbc-aes-caam",
2287				.cra_blocksize = AES_BLOCK_SIZE,
2288			},
2289			.setkey = aead_setkey,
2290			.setauthsize = aead_setauthsize,
2291			.encrypt = aead_encrypt,
2292			.decrypt = aead_decrypt,
2293			.ivsize = AES_BLOCK_SIZE,
2294			.maxauthsize = MD5_DIGEST_SIZE,
2295		},
2296		.aead.op = {
2297			.do_one_request = aead_do_one_req,
2298		},
2299		.caam = {
2300			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2301			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2302					   OP_ALG_AAI_HMAC_PRECOMP,
2303		},
2304	},
2305	{
2306		.aead.base = {
2307			.base = {
2308				.cra_name = "echainiv(authenc(hmac(md5),"
2309					    "cbc(aes)))",
2310				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2311						   "cbc-aes-caam",
2312				.cra_blocksize = AES_BLOCK_SIZE,
2313			},
2314			.setkey = aead_setkey,
2315			.setauthsize = aead_setauthsize,
2316			.encrypt = aead_encrypt,
2317			.decrypt = aead_decrypt,
2318			.ivsize = AES_BLOCK_SIZE,
2319			.maxauthsize = MD5_DIGEST_SIZE,
2320		},
2321		.aead.op = {
2322			.do_one_request = aead_do_one_req,
2323		},
2324		.caam = {
2325			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2326			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2327					   OP_ALG_AAI_HMAC_PRECOMP,
2328			.geniv = true,
2329		},
2330	},
2331	{
2332		.aead.base = {
2333			.base = {
2334				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2335				.cra_driver_name = "authenc-hmac-sha1-"
2336						   "cbc-aes-caam",
2337				.cra_blocksize = AES_BLOCK_SIZE,
2338			},
2339			.setkey = aead_setkey,
2340			.setauthsize = aead_setauthsize,
2341			.encrypt = aead_encrypt,
2342			.decrypt = aead_decrypt,
 
 
2343			.ivsize = AES_BLOCK_SIZE,
2344			.maxauthsize = SHA1_DIGEST_SIZE,
2345		},
2346		.aead.op = {
2347			.do_one_request = aead_do_one_req,
2348		},
2349		.caam = {
2350			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2351			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2352					   OP_ALG_AAI_HMAC_PRECOMP,
2353		},
2354	},
2355	{
2356		.aead.base = {
2357			.base = {
2358				.cra_name = "echainiv(authenc(hmac(sha1),"
2359					    "cbc(aes)))",
2360				.cra_driver_name = "echainiv-authenc-"
2361						   "hmac-sha1-cbc-aes-caam",
2362				.cra_blocksize = AES_BLOCK_SIZE,
2363			},
2364			.setkey = aead_setkey,
2365			.setauthsize = aead_setauthsize,
2366			.encrypt = aead_encrypt,
2367			.decrypt = aead_decrypt,
2368			.ivsize = AES_BLOCK_SIZE,
2369			.maxauthsize = SHA1_DIGEST_SIZE,
2370		},
2371		.aead.op = {
2372			.do_one_request = aead_do_one_req,
2373		},
2374		.caam = {
2375			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2376			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2377					   OP_ALG_AAI_HMAC_PRECOMP,
2378			.geniv = true,
2379		},
2380	},
2381	{
2382		.aead.base = {
2383			.base = {
2384				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2385				.cra_driver_name = "authenc-hmac-sha224-"
2386						   "cbc-aes-caam",
2387				.cra_blocksize = AES_BLOCK_SIZE,
2388			},
2389			.setkey = aead_setkey,
2390			.setauthsize = aead_setauthsize,
2391			.encrypt = aead_encrypt,
2392			.decrypt = aead_decrypt,
2393			.ivsize = AES_BLOCK_SIZE,
2394			.maxauthsize = SHA224_DIGEST_SIZE,
2395		},
2396		.aead.op = {
2397			.do_one_request = aead_do_one_req,
2398		},
2399		.caam = {
2400			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2401			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2402					   OP_ALG_AAI_HMAC_PRECOMP,
2403		},
2404	},
2405	{
2406		.aead.base = {
2407			.base = {
2408				.cra_name = "echainiv(authenc(hmac(sha224),"
2409					    "cbc(aes)))",
2410				.cra_driver_name = "echainiv-authenc-"
2411						   "hmac-sha224-cbc-aes-caam",
2412				.cra_blocksize = AES_BLOCK_SIZE,
2413			},
2414			.setkey = aead_setkey,
2415			.setauthsize = aead_setauthsize,
2416			.encrypt = aead_encrypt,
2417			.decrypt = aead_decrypt,
2418			.ivsize = AES_BLOCK_SIZE,
2419			.maxauthsize = SHA224_DIGEST_SIZE,
2420		},
2421		.aead.op = {
2422			.do_one_request = aead_do_one_req,
2423		},
2424		.caam = {
2425			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2426			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2427					   OP_ALG_AAI_HMAC_PRECOMP,
2428			.geniv = true,
2429		},
2430	},
2431	{
2432		.aead.base = {
2433			.base = {
2434				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2435				.cra_driver_name = "authenc-hmac-sha256-"
2436						   "cbc-aes-caam",
2437				.cra_blocksize = AES_BLOCK_SIZE,
2438			},
2439			.setkey = aead_setkey,
2440			.setauthsize = aead_setauthsize,
2441			.encrypt = aead_encrypt,
2442			.decrypt = aead_decrypt,
2443			.ivsize = AES_BLOCK_SIZE,
2444			.maxauthsize = SHA256_DIGEST_SIZE,
2445		},
2446		.aead.op = {
2447			.do_one_request = aead_do_one_req,
2448		},
2449		.caam = {
2450			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2451			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2452					   OP_ALG_AAI_HMAC_PRECOMP,
2453		},
2454	},
2455	{
2456		.aead.base = {
2457			.base = {
2458				.cra_name = "echainiv(authenc(hmac(sha256),"
2459					    "cbc(aes)))",
2460				.cra_driver_name = "echainiv-authenc-"
2461						   "hmac-sha256-cbc-aes-caam",
2462				.cra_blocksize = AES_BLOCK_SIZE,
2463			},
2464			.setkey = aead_setkey,
2465			.setauthsize = aead_setauthsize,
2466			.encrypt = aead_encrypt,
2467			.decrypt = aead_decrypt,
 
 
2468			.ivsize = AES_BLOCK_SIZE,
2469			.maxauthsize = SHA256_DIGEST_SIZE,
2470		},
2471		.aead.op = {
2472			.do_one_request = aead_do_one_req,
2473		},
2474		.caam = {
2475			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2476			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2477					   OP_ALG_AAI_HMAC_PRECOMP,
2478			.geniv = true,
2479		},
2480	},
2481	{
2482		.aead.base = {
2483			.base = {
2484				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2485				.cra_driver_name = "authenc-hmac-sha384-"
2486						   "cbc-aes-caam",
2487				.cra_blocksize = AES_BLOCK_SIZE,
2488			},
2489			.setkey = aead_setkey,
2490			.setauthsize = aead_setauthsize,
2491			.encrypt = aead_encrypt,
2492			.decrypt = aead_decrypt,
2493			.ivsize = AES_BLOCK_SIZE,
2494			.maxauthsize = SHA384_DIGEST_SIZE,
2495		},
2496		.aead.op = {
2497			.do_one_request = aead_do_one_req,
2498		},
2499		.caam = {
2500			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2501			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2502					   OP_ALG_AAI_HMAC_PRECOMP,
2503		},
2504	},
2505	{
2506		.aead.base = {
2507			.base = {
2508				.cra_name = "echainiv(authenc(hmac(sha384),"
2509					    "cbc(aes)))",
2510				.cra_driver_name = "echainiv-authenc-"
2511						   "hmac-sha384-cbc-aes-caam",
2512				.cra_blocksize = AES_BLOCK_SIZE,
2513			},
2514			.setkey = aead_setkey,
2515			.setauthsize = aead_setauthsize,
2516			.encrypt = aead_encrypt,
2517			.decrypt = aead_decrypt,
2518			.ivsize = AES_BLOCK_SIZE,
2519			.maxauthsize = SHA384_DIGEST_SIZE,
2520		},
2521		.aead.op = {
2522			.do_one_request = aead_do_one_req,
2523		},
2524		.caam = {
2525			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2526			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2527					   OP_ALG_AAI_HMAC_PRECOMP,
2528			.geniv = true,
2529		},
2530	},
2531	{
2532		.aead.base = {
2533			.base = {
2534				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2535				.cra_driver_name = "authenc-hmac-sha512-"
2536						   "cbc-aes-caam",
2537				.cra_blocksize = AES_BLOCK_SIZE,
2538			},
2539			.setkey = aead_setkey,
2540			.setauthsize = aead_setauthsize,
2541			.encrypt = aead_encrypt,
2542			.decrypt = aead_decrypt,
 
 
2543			.ivsize = AES_BLOCK_SIZE,
2544			.maxauthsize = SHA512_DIGEST_SIZE,
2545		},
2546		.aead.op = {
2547			.do_one_request = aead_do_one_req,
2548		},
2549		.caam = {
2550			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2551			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2552					   OP_ALG_AAI_HMAC_PRECOMP,
2553		},
2554	},
2555	{
2556		.aead.base = {
2557			.base = {
2558				.cra_name = "echainiv(authenc(hmac(sha512),"
2559					    "cbc(aes)))",
2560				.cra_driver_name = "echainiv-authenc-"
2561						   "hmac-sha512-cbc-aes-caam",
2562				.cra_blocksize = AES_BLOCK_SIZE,
2563			},
2564			.setkey = aead_setkey,
2565			.setauthsize = aead_setauthsize,
2566			.encrypt = aead_encrypt,
2567			.decrypt = aead_decrypt,
2568			.ivsize = AES_BLOCK_SIZE,
2569			.maxauthsize = SHA512_DIGEST_SIZE,
2570		},
2571		.aead.op = {
2572			.do_one_request = aead_do_one_req,
2573		},
2574		.caam = {
2575			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2576			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2577					   OP_ALG_AAI_HMAC_PRECOMP,
2578			.geniv = true,
2579		},
2580	},
2581	{
2582		.aead.base = {
2583			.base = {
2584				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2585				.cra_driver_name = "authenc-hmac-md5-"
2586						   "cbc-des3_ede-caam",
2587				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2588			},
2589			.setkey = des3_aead_setkey,
2590			.setauthsize = aead_setauthsize,
2591			.encrypt = aead_encrypt,
2592			.decrypt = aead_decrypt,
2593			.ivsize = DES3_EDE_BLOCK_SIZE,
2594			.maxauthsize = MD5_DIGEST_SIZE,
2595		},
2596		.aead.op = {
2597			.do_one_request = aead_do_one_req,
2598		},
2599		.caam = {
2600			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2601			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2602					   OP_ALG_AAI_HMAC_PRECOMP,
2603		}
2604	},
2605	{
2606		.aead.base = {
2607			.base = {
2608				.cra_name = "echainiv(authenc(hmac(md5),"
2609					    "cbc(des3_ede)))",
2610				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2611						   "cbc-des3_ede-caam",
2612				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2613			},
2614			.setkey = des3_aead_setkey,
2615			.setauthsize = aead_setauthsize,
2616			.encrypt = aead_encrypt,
2617			.decrypt = aead_decrypt,
2618			.ivsize = DES3_EDE_BLOCK_SIZE,
2619			.maxauthsize = MD5_DIGEST_SIZE,
2620		},
2621		.aead.op = {
2622			.do_one_request = aead_do_one_req,
2623		},
2624		.caam = {
2625			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2626			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2627					   OP_ALG_AAI_HMAC_PRECOMP,
2628			.geniv = true,
2629		}
2630	},
2631	{
2632		.aead.base = {
2633			.base = {
2634				.cra_name = "authenc(hmac(sha1),"
2635					    "cbc(des3_ede))",
2636				.cra_driver_name = "authenc-hmac-sha1-"
2637						   "cbc-des3_ede-caam",
2638				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2639			},
2640			.setkey = des3_aead_setkey,
2641			.setauthsize = aead_setauthsize,
2642			.encrypt = aead_encrypt,
2643			.decrypt = aead_decrypt,
2644			.ivsize = DES3_EDE_BLOCK_SIZE,
2645			.maxauthsize = SHA1_DIGEST_SIZE,
2646		},
2647		.aead.op = {
2648			.do_one_request = aead_do_one_req,
2649		},
2650		.caam = {
2651			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2652			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2653					   OP_ALG_AAI_HMAC_PRECOMP,
2654		},
2655	},
2656	{
2657		.aead.base = {
2658			.base = {
2659				.cra_name = "echainiv(authenc(hmac(sha1),"
2660					    "cbc(des3_ede)))",
2661				.cra_driver_name = "echainiv-authenc-"
2662						   "hmac-sha1-"
2663						   "cbc-des3_ede-caam",
2664				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2665			},
2666			.setkey = des3_aead_setkey,
2667			.setauthsize = aead_setauthsize,
2668			.encrypt = aead_encrypt,
2669			.decrypt = aead_decrypt,
 
 
2670			.ivsize = DES3_EDE_BLOCK_SIZE,
2671			.maxauthsize = SHA1_DIGEST_SIZE,
2672		},
2673		.aead.op = {
2674			.do_one_request = aead_do_one_req,
2675		},
2676		.caam = {
2677			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2678			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2679					   OP_ALG_AAI_HMAC_PRECOMP,
2680			.geniv = true,
2681		},
2682	},
2683	{
2684		.aead.base = {
2685			.base = {
2686				.cra_name = "authenc(hmac(sha224),"
2687					    "cbc(des3_ede))",
2688				.cra_driver_name = "authenc-hmac-sha224-"
2689						   "cbc-des3_ede-caam",
2690				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2691			},
2692			.setkey = des3_aead_setkey,
2693			.setauthsize = aead_setauthsize,
2694			.encrypt = aead_encrypt,
2695			.decrypt = aead_decrypt,
2696			.ivsize = DES3_EDE_BLOCK_SIZE,
2697			.maxauthsize = SHA224_DIGEST_SIZE,
2698		},
2699		.aead.op = {
2700			.do_one_request = aead_do_one_req,
2701		},
2702		.caam = {
2703			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2704			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2705					   OP_ALG_AAI_HMAC_PRECOMP,
2706		},
2707	},
2708	{
2709		.aead.base = {
2710			.base = {
2711				.cra_name = "echainiv(authenc(hmac(sha224),"
2712					    "cbc(des3_ede)))",
2713				.cra_driver_name = "echainiv-authenc-"
2714						   "hmac-sha224-"
2715						   "cbc-des3_ede-caam",
2716				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2717			},
2718			.setkey = des3_aead_setkey,
2719			.setauthsize = aead_setauthsize,
2720			.encrypt = aead_encrypt,
2721			.decrypt = aead_decrypt,
2722			.ivsize = DES3_EDE_BLOCK_SIZE,
2723			.maxauthsize = SHA224_DIGEST_SIZE,
2724		},
2725		.aead.op = {
2726			.do_one_request = aead_do_one_req,
2727		},
2728		.caam = {
2729			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2730			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2731					   OP_ALG_AAI_HMAC_PRECOMP,
2732			.geniv = true,
2733		},
2734	},
2735	{
2736		.aead.base = {
2737			.base = {
2738				.cra_name = "authenc(hmac(sha256),"
2739					    "cbc(des3_ede))",
2740				.cra_driver_name = "authenc-hmac-sha256-"
2741						   "cbc-des3_ede-caam",
2742				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2743			},
2744			.setkey = des3_aead_setkey,
2745			.setauthsize = aead_setauthsize,
2746			.encrypt = aead_encrypt,
2747			.decrypt = aead_decrypt,
2748			.ivsize = DES3_EDE_BLOCK_SIZE,
2749			.maxauthsize = SHA256_DIGEST_SIZE,
2750		},
2751		.aead.op = {
2752			.do_one_request = aead_do_one_req,
2753		},
2754		.caam = {
2755			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2756			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2757					   OP_ALG_AAI_HMAC_PRECOMP,
2758		},
2759	},
2760	{
2761		.aead.base = {
2762			.base = {
2763				.cra_name = "echainiv(authenc(hmac(sha256),"
2764					    "cbc(des3_ede)))",
2765				.cra_driver_name = "echainiv-authenc-"
2766						   "hmac-sha256-"
2767						   "cbc-des3_ede-caam",
2768				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2769			},
2770			.setkey = des3_aead_setkey,
2771			.setauthsize = aead_setauthsize,
2772			.encrypt = aead_encrypt,
2773			.decrypt = aead_decrypt,
 
 
2774			.ivsize = DES3_EDE_BLOCK_SIZE,
2775			.maxauthsize = SHA256_DIGEST_SIZE,
2776		},
2777		.aead.op = {
2778			.do_one_request = aead_do_one_req,
2779		},
2780		.caam = {
2781			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2782			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2783					   OP_ALG_AAI_HMAC_PRECOMP,
2784			.geniv = true,
2785		},
2786	},
2787	{
2788		.aead.base = {
2789			.base = {
2790				.cra_name = "authenc(hmac(sha384),"
2791					    "cbc(des3_ede))",
2792				.cra_driver_name = "authenc-hmac-sha384-"
2793						   "cbc-des3_ede-caam",
2794				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2795			},
2796			.setkey = des3_aead_setkey,
2797			.setauthsize = aead_setauthsize,
2798			.encrypt = aead_encrypt,
2799			.decrypt = aead_decrypt,
2800			.ivsize = DES3_EDE_BLOCK_SIZE,
2801			.maxauthsize = SHA384_DIGEST_SIZE,
2802		},
2803		.aead.op = {
2804			.do_one_request = aead_do_one_req,
2805		},
2806		.caam = {
2807			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2808			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2809					   OP_ALG_AAI_HMAC_PRECOMP,
2810		},
2811	},
2812	{
2813		.aead.base = {
2814			.base = {
2815				.cra_name = "echainiv(authenc(hmac(sha384),"
2816					    "cbc(des3_ede)))",
2817				.cra_driver_name = "echainiv-authenc-"
2818						   "hmac-sha384-"
2819						   "cbc-des3_ede-caam",
2820				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2821			},
2822			.setkey = des3_aead_setkey,
2823			.setauthsize = aead_setauthsize,
2824			.encrypt = aead_encrypt,
2825			.decrypt = aead_decrypt,
2826			.ivsize = DES3_EDE_BLOCK_SIZE,
2827			.maxauthsize = SHA384_DIGEST_SIZE,
2828		},
2829		.aead.op = {
2830			.do_one_request = aead_do_one_req,
2831		},
2832		.caam = {
2833			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2834			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2835					   OP_ALG_AAI_HMAC_PRECOMP,
2836			.geniv = true,
2837		},
2838	},
2839	{
2840		.aead.base = {
2841			.base = {
2842				.cra_name = "authenc(hmac(sha512),"
2843					    "cbc(des3_ede))",
2844				.cra_driver_name = "authenc-hmac-sha512-"
2845						   "cbc-des3_ede-caam",
2846				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2847			},
2848			.setkey = des3_aead_setkey,
2849			.setauthsize = aead_setauthsize,
2850			.encrypt = aead_encrypt,
2851			.decrypt = aead_decrypt,
 
 
2852			.ivsize = DES3_EDE_BLOCK_SIZE,
2853			.maxauthsize = SHA512_DIGEST_SIZE,
2854		},
2855		.aead.op = {
2856			.do_one_request = aead_do_one_req,
2857		},
2858		.caam = {
2859			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2860			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2861					   OP_ALG_AAI_HMAC_PRECOMP,
2862		},
2863	},
2864	{
2865		.aead.base = {
2866			.base = {
2867				.cra_name = "echainiv(authenc(hmac(sha512),"
2868					    "cbc(des3_ede)))",
2869				.cra_driver_name = "echainiv-authenc-"
2870						   "hmac-sha512-"
2871						   "cbc-des3_ede-caam",
2872				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2873			},
2874			.setkey = des3_aead_setkey,
2875			.setauthsize = aead_setauthsize,
2876			.encrypt = aead_encrypt,
2877			.decrypt = aead_decrypt,
2878			.ivsize = DES3_EDE_BLOCK_SIZE,
2879			.maxauthsize = SHA512_DIGEST_SIZE,
2880		},
2881		.aead.op = {
2882			.do_one_request = aead_do_one_req,
2883		},
2884		.caam = {
2885			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2886			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2887					   OP_ALG_AAI_HMAC_PRECOMP,
2888			.geniv = true,
2889		},
2890	},
2891	{
2892		.aead.base = {
2893			.base = {
2894				.cra_name = "authenc(hmac(md5),cbc(des))",
2895				.cra_driver_name = "authenc-hmac-md5-"
2896						   "cbc-des-caam",
2897				.cra_blocksize = DES_BLOCK_SIZE,
2898			},
2899			.setkey = aead_setkey,
2900			.setauthsize = aead_setauthsize,
2901			.encrypt = aead_encrypt,
2902			.decrypt = aead_decrypt,
2903			.ivsize = DES_BLOCK_SIZE,
2904			.maxauthsize = MD5_DIGEST_SIZE,
2905		},
2906		.aead.op = {
2907			.do_one_request = aead_do_one_req,
2908		},
2909		.caam = {
2910			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2911			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2912					   OP_ALG_AAI_HMAC_PRECOMP,
2913		},
2914	},
2915	{
2916		.aead.base = {
2917			.base = {
2918				.cra_name = "echainiv(authenc(hmac(md5),"
2919					    "cbc(des)))",
2920				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2921						   "cbc-des-caam",
2922				.cra_blocksize = DES_BLOCK_SIZE,
2923			},
2924			.setkey = aead_setkey,
2925			.setauthsize = aead_setauthsize,
2926			.encrypt = aead_encrypt,
2927			.decrypt = aead_decrypt,
2928			.ivsize = DES_BLOCK_SIZE,
2929			.maxauthsize = MD5_DIGEST_SIZE,
2930		},
2931		.aead.op = {
2932			.do_one_request = aead_do_one_req,
2933		},
2934		.caam = {
2935			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2936			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2937					   OP_ALG_AAI_HMAC_PRECOMP,
2938			.geniv = true,
2939		},
2940	},
2941	{
2942		.aead.base = {
2943			.base = {
2944				.cra_name = "authenc(hmac(sha1),cbc(des))",
2945				.cra_driver_name = "authenc-hmac-sha1-"
2946						   "cbc-des-caam",
2947				.cra_blocksize = DES_BLOCK_SIZE,
2948			},
2949			.setkey = aead_setkey,
2950			.setauthsize = aead_setauthsize,
2951			.encrypt = aead_encrypt,
2952			.decrypt = aead_decrypt,
 
 
2953			.ivsize = DES_BLOCK_SIZE,
2954			.maxauthsize = SHA1_DIGEST_SIZE,
2955		},
2956		.aead.op = {
2957			.do_one_request = aead_do_one_req,
2958		},
2959		.caam = {
2960			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2961			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2962					   OP_ALG_AAI_HMAC_PRECOMP,
2963		},
2964	},
2965	{
2966		.aead.base = {
2967			.base = {
2968				.cra_name = "echainiv(authenc(hmac(sha1),"
2969					    "cbc(des)))",
2970				.cra_driver_name = "echainiv-authenc-"
2971						   "hmac-sha1-cbc-des-caam",
2972				.cra_blocksize = DES_BLOCK_SIZE,
2973			},
2974			.setkey = aead_setkey,
2975			.setauthsize = aead_setauthsize,
2976			.encrypt = aead_encrypt,
2977			.decrypt = aead_decrypt,
2978			.ivsize = DES_BLOCK_SIZE,
2979			.maxauthsize = SHA1_DIGEST_SIZE,
2980		},
2981		.aead.op = {
2982			.do_one_request = aead_do_one_req,
2983		},
2984		.caam = {
2985			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2986			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2987					   OP_ALG_AAI_HMAC_PRECOMP,
2988			.geniv = true,
2989		},
2990	},
2991	{
2992		.aead.base = {
2993			.base = {
2994				.cra_name = "authenc(hmac(sha224),cbc(des))",
2995				.cra_driver_name = "authenc-hmac-sha224-"
2996						   "cbc-des-caam",
2997				.cra_blocksize = DES_BLOCK_SIZE,
2998			},
2999			.setkey = aead_setkey,
3000			.setauthsize = aead_setauthsize,
3001			.encrypt = aead_encrypt,
3002			.decrypt = aead_decrypt,
3003			.ivsize = DES_BLOCK_SIZE,
3004			.maxauthsize = SHA224_DIGEST_SIZE,
3005		},
3006		.aead.op = {
3007			.do_one_request = aead_do_one_req,
3008		},
3009		.caam = {
3010			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3011			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3012					   OP_ALG_AAI_HMAC_PRECOMP,
3013		},
3014	},
3015	{
3016		.aead.base = {
3017			.base = {
3018				.cra_name = "echainiv(authenc(hmac(sha224),"
3019					    "cbc(des)))",
3020				.cra_driver_name = "echainiv-authenc-"
3021						   "hmac-sha224-cbc-des-caam",
3022				.cra_blocksize = DES_BLOCK_SIZE,
3023			},
3024			.setkey = aead_setkey,
3025			.setauthsize = aead_setauthsize,
3026			.encrypt = aead_encrypt,
3027			.decrypt = aead_decrypt,
3028			.ivsize = DES_BLOCK_SIZE,
3029			.maxauthsize = SHA224_DIGEST_SIZE,
3030		},
3031		.aead.op = {
3032			.do_one_request = aead_do_one_req,
3033		},
3034		.caam = {
3035			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3036			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3037					   OP_ALG_AAI_HMAC_PRECOMP,
3038			.geniv = true,
3039		},
3040	},
3041	{
3042		.aead.base = {
3043			.base = {
3044				.cra_name = "authenc(hmac(sha256),cbc(des))",
3045				.cra_driver_name = "authenc-hmac-sha256-"
3046						   "cbc-des-caam",
3047				.cra_blocksize = DES_BLOCK_SIZE,
3048			},
3049			.setkey = aead_setkey,
3050			.setauthsize = aead_setauthsize,
3051			.encrypt = aead_encrypt,
3052			.decrypt = aead_decrypt,
3053			.ivsize = DES_BLOCK_SIZE,
3054			.maxauthsize = SHA256_DIGEST_SIZE,
3055		},
3056		.aead.op = {
3057			.do_one_request = aead_do_one_req,
3058		},
3059		.caam = {
3060			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3061			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3062					   OP_ALG_AAI_HMAC_PRECOMP,
3063		},
3064	},
3065	{
3066		.aead.base = {
3067			.base = {
3068				.cra_name = "echainiv(authenc(hmac(sha256),"
3069					    "cbc(des)))",
3070				.cra_driver_name = "echainiv-authenc-"
3071						   "hmac-sha256-cbc-des-caam",
3072				.cra_blocksize = DES_BLOCK_SIZE,
3073			},
3074			.setkey = aead_setkey,
3075			.setauthsize = aead_setauthsize,
3076			.encrypt = aead_encrypt,
3077			.decrypt = aead_decrypt,
 
 
3078			.ivsize = DES_BLOCK_SIZE,
3079			.maxauthsize = SHA256_DIGEST_SIZE,
3080		},
3081		.aead.op = {
3082			.do_one_request = aead_do_one_req,
3083		},
3084		.caam = {
3085			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3086			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3087					   OP_ALG_AAI_HMAC_PRECOMP,
3088			.geniv = true,
3089		},
3090	},
3091	{
3092		.aead.base = {
3093			.base = {
3094				.cra_name = "authenc(hmac(sha384),cbc(des))",
3095				.cra_driver_name = "authenc-hmac-sha384-"
3096						   "cbc-des-caam",
3097				.cra_blocksize = DES_BLOCK_SIZE,
3098			},
3099			.setkey = aead_setkey,
3100			.setauthsize = aead_setauthsize,
3101			.encrypt = aead_encrypt,
3102			.decrypt = aead_decrypt,
3103			.ivsize = DES_BLOCK_SIZE,
3104			.maxauthsize = SHA384_DIGEST_SIZE,
3105		},
3106		.aead.op = {
3107			.do_one_request = aead_do_one_req,
3108		},
3109		.caam = {
3110			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3111			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3112					   OP_ALG_AAI_HMAC_PRECOMP,
3113		},
3114	},
3115	{
3116		.aead.base = {
3117			.base = {
3118				.cra_name = "echainiv(authenc(hmac(sha384),"
3119					    "cbc(des)))",
3120				.cra_driver_name = "echainiv-authenc-"
3121						   "hmac-sha384-cbc-des-caam",
3122				.cra_blocksize = DES_BLOCK_SIZE,
3123			},
3124			.setkey = aead_setkey,
3125			.setauthsize = aead_setauthsize,
3126			.encrypt = aead_encrypt,
3127			.decrypt = aead_decrypt,
3128			.ivsize = DES_BLOCK_SIZE,
3129			.maxauthsize = SHA384_DIGEST_SIZE,
3130		},
3131		.aead.op = {
3132			.do_one_request = aead_do_one_req,
3133		},
3134		.caam = {
3135			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3136			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3137					   OP_ALG_AAI_HMAC_PRECOMP,
3138			.geniv = true,
3139		},
3140	},
3141	{
3142		.aead.base = {
3143			.base = {
3144				.cra_name = "authenc(hmac(sha512),cbc(des))",
3145				.cra_driver_name = "authenc-hmac-sha512-"
3146						   "cbc-des-caam",
3147				.cra_blocksize = DES_BLOCK_SIZE,
3148			},
3149			.setkey = aead_setkey,
3150			.setauthsize = aead_setauthsize,
3151			.encrypt = aead_encrypt,
3152			.decrypt = aead_decrypt,
 
 
3153			.ivsize = DES_BLOCK_SIZE,
3154			.maxauthsize = SHA512_DIGEST_SIZE,
3155		},
3156		.aead.op = {
3157			.do_one_request = aead_do_one_req,
3158		},
3159		.caam = {
3160			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3161			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3162					   OP_ALG_AAI_HMAC_PRECOMP,
3163		},
3164	},
3165	{
3166		.aead.base = {
3167			.base = {
3168				.cra_name = "echainiv(authenc(hmac(sha512),"
3169					    "cbc(des)))",
3170				.cra_driver_name = "echainiv-authenc-"
3171						   "hmac-sha512-cbc-des-caam",
3172				.cra_blocksize = DES_BLOCK_SIZE,
3173			},
3174			.setkey = aead_setkey,
3175			.setauthsize = aead_setauthsize,
3176			.encrypt = aead_encrypt,
3177			.decrypt = aead_decrypt,
3178			.ivsize = DES_BLOCK_SIZE,
3179			.maxauthsize = SHA512_DIGEST_SIZE,
3180		},
3181		.aead.op = {
3182			.do_one_request = aead_do_one_req,
3183		},
3184		.caam = {
3185			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3186			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3187					   OP_ALG_AAI_HMAC_PRECOMP,
3188			.geniv = true,
3189		},
3190	},
3191	{
3192		.aead.base = {
3193			.base = {
3194				.cra_name = "authenc(hmac(md5),"
3195					    "rfc3686(ctr(aes)))",
3196				.cra_driver_name = "authenc-hmac-md5-"
3197						   "rfc3686-ctr-aes-caam",
3198				.cra_blocksize = 1,
3199			},
3200			.setkey = aead_setkey,
3201			.setauthsize = aead_setauthsize,
3202			.encrypt = aead_encrypt,
3203			.decrypt = aead_decrypt,
3204			.ivsize = CTR_RFC3686_IV_SIZE,
3205			.maxauthsize = MD5_DIGEST_SIZE,
3206		},
3207		.aead.op = {
3208			.do_one_request = aead_do_one_req,
3209		},
3210		.caam = {
3211			.class1_alg_type = OP_ALG_ALGSEL_AES |
3212					   OP_ALG_AAI_CTR_MOD128,
3213			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3214					   OP_ALG_AAI_HMAC_PRECOMP,
3215			.rfc3686 = true,
3216		},
3217	},
3218	{
3219		.aead.base = {
3220			.base = {
3221				.cra_name = "seqiv(authenc("
3222					    "hmac(md5),rfc3686(ctr(aes))))",
3223				.cra_driver_name = "seqiv-authenc-hmac-md5-"
3224						   "rfc3686-ctr-aes-caam",
3225				.cra_blocksize = 1,
3226			},
3227			.setkey = aead_setkey,
3228			.setauthsize = aead_setauthsize,
3229			.encrypt = aead_encrypt,
3230			.decrypt = aead_decrypt,
3231			.ivsize = CTR_RFC3686_IV_SIZE,
3232			.maxauthsize = MD5_DIGEST_SIZE,
3233		},
3234		.aead.op = {
3235			.do_one_request = aead_do_one_req,
3236		},
3237		.caam = {
3238			.class1_alg_type = OP_ALG_ALGSEL_AES |
3239					   OP_ALG_AAI_CTR_MOD128,
3240			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3241					   OP_ALG_AAI_HMAC_PRECOMP,
3242			.rfc3686 = true,
3243			.geniv = true,
3244		},
3245	},
3246	{
3247		.aead.base = {
3248			.base = {
3249				.cra_name = "authenc(hmac(sha1),"
3250					    "rfc3686(ctr(aes)))",
3251				.cra_driver_name = "authenc-hmac-sha1-"
3252						   "rfc3686-ctr-aes-caam",
3253				.cra_blocksize = 1,
3254			},
3255			.setkey = aead_setkey,
3256			.setauthsize = aead_setauthsize,
3257			.encrypt = aead_encrypt,
3258			.decrypt = aead_decrypt,
3259			.ivsize = CTR_RFC3686_IV_SIZE,
3260			.maxauthsize = SHA1_DIGEST_SIZE,
3261		},
3262		.aead.op = {
3263			.do_one_request = aead_do_one_req,
3264		},
3265		.caam = {
3266			.class1_alg_type = OP_ALG_ALGSEL_AES |
3267					   OP_ALG_AAI_CTR_MOD128,
3268			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3269					   OP_ALG_AAI_HMAC_PRECOMP,
3270			.rfc3686 = true,
3271		},
3272	},
3273	{
3274		.aead.base = {
3275			.base = {
3276				.cra_name = "seqiv(authenc("
3277					    "hmac(sha1),rfc3686(ctr(aes))))",
3278				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
3279						   "rfc3686-ctr-aes-caam",
3280				.cra_blocksize = 1,
3281			},
3282			.setkey = aead_setkey,
3283			.setauthsize = aead_setauthsize,
3284			.encrypt = aead_encrypt,
3285			.decrypt = aead_decrypt,
3286			.ivsize = CTR_RFC3686_IV_SIZE,
3287			.maxauthsize = SHA1_DIGEST_SIZE,
3288		},
3289		.aead.op = {
3290			.do_one_request = aead_do_one_req,
3291		},
3292		.caam = {
3293			.class1_alg_type = OP_ALG_ALGSEL_AES |
3294					   OP_ALG_AAI_CTR_MOD128,
3295			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3296					   OP_ALG_AAI_HMAC_PRECOMP,
3297			.rfc3686 = true,
3298			.geniv = true,
3299		},
3300	},
3301	{
3302		.aead.base = {
3303			.base = {
3304				.cra_name = "authenc(hmac(sha224),"
3305					    "rfc3686(ctr(aes)))",
3306				.cra_driver_name = "authenc-hmac-sha224-"
3307						   "rfc3686-ctr-aes-caam",
3308				.cra_blocksize = 1,
3309			},
3310			.setkey = aead_setkey,
3311			.setauthsize = aead_setauthsize,
3312			.encrypt = aead_encrypt,
3313			.decrypt = aead_decrypt,
3314			.ivsize = CTR_RFC3686_IV_SIZE,
3315			.maxauthsize = SHA224_DIGEST_SIZE,
3316		},
3317		.aead.op = {
3318			.do_one_request = aead_do_one_req,
3319		},
3320		.caam = {
3321			.class1_alg_type = OP_ALG_ALGSEL_AES |
3322					   OP_ALG_AAI_CTR_MOD128,
3323			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3324					   OP_ALG_AAI_HMAC_PRECOMP,
3325			.rfc3686 = true,
3326		},
3327	},
3328	{
3329		.aead.base = {
3330			.base = {
3331				.cra_name = "seqiv(authenc("
3332					    "hmac(sha224),rfc3686(ctr(aes))))",
3333				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
3334						   "rfc3686-ctr-aes-caam",
3335				.cra_blocksize = 1,
3336			},
3337			.setkey = aead_setkey,
3338			.setauthsize = aead_setauthsize,
3339			.encrypt = aead_encrypt,
3340			.decrypt = aead_decrypt,
3341			.ivsize = CTR_RFC3686_IV_SIZE,
3342			.maxauthsize = SHA224_DIGEST_SIZE,
3343		},
3344		.aead.op = {
3345			.do_one_request = aead_do_one_req,
3346		},
3347		.caam = {
3348			.class1_alg_type = OP_ALG_ALGSEL_AES |
3349					   OP_ALG_AAI_CTR_MOD128,
3350			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3351					   OP_ALG_AAI_HMAC_PRECOMP,
3352			.rfc3686 = true,
3353			.geniv = true,
3354		},
3355	},
3356	{
3357		.aead.base = {
3358			.base = {
3359				.cra_name = "authenc(hmac(sha256),"
3360					    "rfc3686(ctr(aes)))",
3361				.cra_driver_name = "authenc-hmac-sha256-"
3362						   "rfc3686-ctr-aes-caam",
3363				.cra_blocksize = 1,
3364			},
3365			.setkey = aead_setkey,
3366			.setauthsize = aead_setauthsize,
3367			.encrypt = aead_encrypt,
3368			.decrypt = aead_decrypt,
3369			.ivsize = CTR_RFC3686_IV_SIZE,
3370			.maxauthsize = SHA256_DIGEST_SIZE,
3371		},
3372		.aead.op = {
3373			.do_one_request = aead_do_one_req,
3374		},
3375		.caam = {
3376			.class1_alg_type = OP_ALG_ALGSEL_AES |
3377					   OP_ALG_AAI_CTR_MOD128,
3378			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3379					   OP_ALG_AAI_HMAC_PRECOMP,
3380			.rfc3686 = true,
3381		},
3382	},
3383	{
3384		.aead.base = {
3385			.base = {
3386				.cra_name = "seqiv(authenc(hmac(sha256),"
3387					    "rfc3686(ctr(aes))))",
3388				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
3389						   "rfc3686-ctr-aes-caam",
3390				.cra_blocksize = 1,
3391			},
3392			.setkey = aead_setkey,
3393			.setauthsize = aead_setauthsize,
3394			.encrypt = aead_encrypt,
3395			.decrypt = aead_decrypt,
3396			.ivsize = CTR_RFC3686_IV_SIZE,
3397			.maxauthsize = SHA256_DIGEST_SIZE,
3398		},
3399		.aead.op = {
3400			.do_one_request = aead_do_one_req,
3401		},
3402		.caam = {
3403			.class1_alg_type = OP_ALG_ALGSEL_AES |
3404					   OP_ALG_AAI_CTR_MOD128,
3405			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3406					   OP_ALG_AAI_HMAC_PRECOMP,
3407			.rfc3686 = true,
3408			.geniv = true,
3409		},
3410	},
3411	{
3412		.aead.base = {
3413			.base = {
3414				.cra_name = "authenc(hmac(sha384),"
3415					    "rfc3686(ctr(aes)))",
3416				.cra_driver_name = "authenc-hmac-sha384-"
3417						   "rfc3686-ctr-aes-caam",
3418				.cra_blocksize = 1,
3419			},
3420			.setkey = aead_setkey,
3421			.setauthsize = aead_setauthsize,
3422			.encrypt = aead_encrypt,
3423			.decrypt = aead_decrypt,
3424			.ivsize = CTR_RFC3686_IV_SIZE,
3425			.maxauthsize = SHA384_DIGEST_SIZE,
3426		},
3427		.aead.op = {
3428			.do_one_request = aead_do_one_req,
3429		},
3430		.caam = {
3431			.class1_alg_type = OP_ALG_ALGSEL_AES |
3432					   OP_ALG_AAI_CTR_MOD128,
3433			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3434					   OP_ALG_AAI_HMAC_PRECOMP,
3435			.rfc3686 = true,
3436		},
3437	},
3438	{
3439		.aead.base = {
3440			.base = {
3441				.cra_name = "seqiv(authenc(hmac(sha384),"
3442					    "rfc3686(ctr(aes))))",
3443				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
3444						   "rfc3686-ctr-aes-caam",
3445				.cra_blocksize = 1,
3446			},
3447			.setkey = aead_setkey,
3448			.setauthsize = aead_setauthsize,
3449			.encrypt = aead_encrypt,
3450			.decrypt = aead_decrypt,
3451			.ivsize = CTR_RFC3686_IV_SIZE,
3452			.maxauthsize = SHA384_DIGEST_SIZE,
3453		},
3454		.aead.op = {
3455			.do_one_request = aead_do_one_req,
3456		},
3457		.caam = {
3458			.class1_alg_type = OP_ALG_ALGSEL_AES |
3459					   OP_ALG_AAI_CTR_MOD128,
3460			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3461					   OP_ALG_AAI_HMAC_PRECOMP,
3462			.rfc3686 = true,
3463			.geniv = true,
3464		},
3465	},
3466	{
3467		.aead.base = {
3468			.base = {
3469				.cra_name = "authenc(hmac(sha512),"
3470					    "rfc3686(ctr(aes)))",
3471				.cra_driver_name = "authenc-hmac-sha512-"
3472						   "rfc3686-ctr-aes-caam",
3473				.cra_blocksize = 1,
3474			},
3475			.setkey = aead_setkey,
3476			.setauthsize = aead_setauthsize,
3477			.encrypt = aead_encrypt,
3478			.decrypt = aead_decrypt,
3479			.ivsize = CTR_RFC3686_IV_SIZE,
3480			.maxauthsize = SHA512_DIGEST_SIZE,
3481		},
3482		.aead.op = {
3483			.do_one_request = aead_do_one_req,
3484		},
3485		.caam = {
3486			.class1_alg_type = OP_ALG_ALGSEL_AES |
3487					   OP_ALG_AAI_CTR_MOD128,
3488			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3489					   OP_ALG_AAI_HMAC_PRECOMP,
3490			.rfc3686 = true,
3491		},
3492	},
3493	{
3494		.aead.base = {
3495			.base = {
3496				.cra_name = "seqiv(authenc(hmac(sha512),"
3497					    "rfc3686(ctr(aes))))",
3498				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
3499						   "rfc3686-ctr-aes-caam",
3500				.cra_blocksize = 1,
3501			},
3502			.setkey = aead_setkey,
3503			.setauthsize = aead_setauthsize,
3504			.encrypt = aead_encrypt,
3505			.decrypt = aead_decrypt,
3506			.ivsize = CTR_RFC3686_IV_SIZE,
3507			.maxauthsize = SHA512_DIGEST_SIZE,
3508		},
3509		.aead.op = {
3510			.do_one_request = aead_do_one_req,
3511		},
3512		.caam = {
3513			.class1_alg_type = OP_ALG_ALGSEL_AES |
3514					   OP_ALG_AAI_CTR_MOD128,
3515			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3516					   OP_ALG_AAI_HMAC_PRECOMP,
3517			.rfc3686 = true,
3518			.geniv = true,
3519		},
3520	},
3521	{
3522		.aead.base = {
3523			.base = {
3524				.cra_name = "rfc7539(chacha20,poly1305)",
3525				.cra_driver_name = "rfc7539-chacha20-poly1305-"
3526						   "caam",
3527				.cra_blocksize = 1,
3528			},
3529			.setkey = chachapoly_setkey,
3530			.setauthsize = chachapoly_setauthsize,
3531			.encrypt = chachapoly_encrypt,
3532			.decrypt = chachapoly_decrypt,
3533			.ivsize = CHACHAPOLY_IV_SIZE,
3534			.maxauthsize = POLY1305_DIGEST_SIZE,
3535		},
3536		.aead.op = {
3537			.do_one_request = aead_do_one_req,
3538		},
3539		.caam = {
3540			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
3541					   OP_ALG_AAI_AEAD,
3542			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
3543					   OP_ALG_AAI_AEAD,
3544			.nodkp = true,
3545		},
3546	},
3547	{
3548		.aead.base = {
3549			.base = {
3550				.cra_name = "rfc7539esp(chacha20,poly1305)",
3551				.cra_driver_name = "rfc7539esp-chacha20-"
3552						   "poly1305-caam",
3553				.cra_blocksize = 1,
 
 
 
 
 
 
3554			},
3555			.setkey = chachapoly_setkey,
3556			.setauthsize = chachapoly_setauthsize,
3557			.encrypt = chachapoly_encrypt,
3558			.decrypt = chachapoly_decrypt,
3559			.ivsize = 8,
3560			.maxauthsize = POLY1305_DIGEST_SIZE,
3561		},
3562		.aead.op = {
3563			.do_one_request = aead_do_one_req,
3564		},
3565		.caam = {
3566			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
3567					   OP_ALG_AAI_AEAD,
3568			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
3569					   OP_ALG_AAI_AEAD,
3570			.nodkp = true,
3571		},
3572	},
3573};
3574
3575static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
3576			    bool uses_dkp)
3577{
3578	dma_addr_t dma_addr;
3579	struct caam_drv_private *priv;
3580	const size_t sh_desc_enc_offset = offsetof(struct caam_ctx,
3581						   sh_desc_enc);
3582
3583	ctx->jrdev = caam_jr_alloc();
3584	if (IS_ERR(ctx->jrdev)) {
3585		pr_err("Job Ring Device allocation for transform failed\n");
3586		return PTR_ERR(ctx->jrdev);
3587	}
 
3588
3589	priv = dev_get_drvdata(ctx->jrdev->parent);
3590	if (priv->era >= 6 && uses_dkp)
3591		ctx->dir = DMA_BIDIRECTIONAL;
3592	else
3593		ctx->dir = DMA_TO_DEVICE;
 
 
 
3594
3595	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
3596					offsetof(struct caam_ctx,
3597						 sh_desc_enc_dma) -
3598					sh_desc_enc_offset,
3599					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
3600	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
3601		dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
3602		caam_jr_free(ctx->jrdev);
3603		return -ENOMEM;
3604	}
3605
3606	ctx->sh_desc_enc_dma = dma_addr;
3607	ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
3608						   sh_desc_dec) -
3609					sh_desc_enc_offset;
3610	ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key) -
3611					sh_desc_enc_offset;
3612
3613	/* copy descriptor header template value */
3614	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
3615	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
 
3616
3617	return 0;
3618}
3619
3620static int caam_cra_init(struct crypto_skcipher *tfm)
3621{
3622	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3623	struct caam_skcipher_alg *caam_alg =
3624		container_of(alg, typeof(*caam_alg), skcipher.base);
3625	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
3626	u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
3627	int ret = 0;
3628
3629	if (alg_aai == OP_ALG_AAI_XTS) {
3630		const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
3631		struct crypto_skcipher *fallback;
3632
3633		fallback = crypto_alloc_skcipher(tfm_name, 0,
3634						 CRYPTO_ALG_NEED_FALLBACK);
3635		if (IS_ERR(fallback)) {
3636			pr_err("Failed to allocate %s fallback: %ld\n",
3637			       tfm_name, PTR_ERR(fallback));
3638			return PTR_ERR(fallback);
3639		}
3640
3641		ctx->fallback = fallback;
3642		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
3643					    crypto_skcipher_reqsize(fallback));
3644	} else {
3645		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
3646	}
3647
3648	ret = caam_init_common(ctx, &caam_alg->caam, false);
3649	if (ret && ctx->fallback)
3650		crypto_free_skcipher(ctx->fallback);
3651
3652	return ret;
3653}
3654
3655static int caam_aead_init(struct crypto_aead *tfm)
3656{
3657	struct aead_alg *alg = crypto_aead_alg(tfm);
3658	struct caam_aead_alg *caam_alg =
3659		 container_of(alg, struct caam_aead_alg, aead.base);
3660	struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm);
3661
3662	crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx));
3663
3664	return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
 
 
 
 
 
 
 
 
 
 
3665}
3666
3667static void caam_exit_common(struct caam_ctx *ctx)
3668{
3669	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
3670			       offsetof(struct caam_ctx, sh_desc_enc_dma) -
3671			       offsetof(struct caam_ctx, sh_desc_enc),
3672			       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
3673	caam_jr_free(ctx->jrdev);
3674}
3675
3676static void caam_cra_exit(struct crypto_skcipher *tfm)
3677{
3678	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
 
 
 
3679
3680	if (ctx->fallback)
3681		crypto_free_skcipher(ctx->fallback);
3682	caam_exit_common(ctx);
3683}
3684
3685static void caam_aead_exit(struct crypto_aead *tfm)
3686{
3687	caam_exit_common(crypto_aead_ctx_dma(tfm));
3688}
3689
3690void caam_algapi_exit(void)
3691{
3692	int i;
3693
3694	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3695		struct caam_aead_alg *t_alg = driver_aeads + i;
3696
3697		if (t_alg->registered)
3698			crypto_engine_unregister_aead(&t_alg->aead);
 
 
3699	}
3700
3701	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3702		struct caam_skcipher_alg *t_alg = driver_algs + i;
3703
3704		if (t_alg->registered)
3705			crypto_engine_unregister_skcipher(&t_alg->skcipher);
3706	}
 
3707}
3708
3709static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
 
 
3710{
3711	struct skcipher_alg *alg = &t_alg->skcipher.base;
 
3712
3713	alg->base.cra_module = THIS_MODULE;
3714	alg->base.cra_priority = CAAM_CRA_PRIORITY;
3715	alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3716	alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3717			      CRYPTO_ALG_KERN_DRIVER_ONLY);
3718
3719	alg->init = caam_cra_init;
3720	alg->exit = caam_cra_exit;
3721}
3722
3723static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3724{
3725	struct aead_alg *alg = &t_alg->aead.base;
3726
3727	alg->base.cra_module = THIS_MODULE;
3728	alg->base.cra_priority = CAAM_CRA_PRIORITY;
3729	alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3730	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3731			      CRYPTO_ALG_KERN_DRIVER_ONLY;
3732
3733	alg->init = caam_aead_init;
3734	alg->exit = caam_aead_exit;
3735}
3736
3737int caam_algapi_init(struct device *ctrldev)
3738{
3739	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3740	int i = 0, err = 0;
3741	u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
3742	unsigned int md_limit = SHA512_DIGEST_SIZE;
3743	bool registered = false, gcm_support;
3744
3745	/*
3746	 * Register crypto algorithms the device supports.
3747	 * First, detect presence and attributes of DES, AES, and MD blocks.
3748	 */
3749	if (priv->era < 10) {
3750		struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
3751		u32 cha_vid, cha_inst, aes_rn;
3752
3753		cha_vid = rd_reg32(&perfmon->cha_id_ls);
3754		aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
3755		md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
3756
3757		cha_inst = rd_reg32(&perfmon->cha_num_ls);
3758		des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
3759			   CHA_ID_LS_DES_SHIFT;
3760		aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
3761		md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
3762		ccha_inst = 0;
3763		ptha_inst = 0;
3764
3765		aes_rn = rd_reg32(&perfmon->cha_rev_ls) & CHA_ID_LS_AES_MASK;
3766		gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
3767	} else {
3768		struct version_regs __iomem *vreg = &priv->jr[0]->vreg;
3769		u32 aesa, mdha;
3770
3771		aesa = rd_reg32(&vreg->aesa);
3772		mdha = rd_reg32(&vreg->mdha);
 
 
 
3773
3774		aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
3775		md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
3776
3777		des_inst = rd_reg32(&vreg->desa) & CHA_VER_NUM_MASK;
3778		aes_inst = aesa & CHA_VER_NUM_MASK;
3779		md_inst = mdha & CHA_VER_NUM_MASK;
3780		ccha_inst = rd_reg32(&vreg->ccha) & CHA_VER_NUM_MASK;
3781		ptha_inst = rd_reg32(&vreg->ptha) & CHA_VER_NUM_MASK;
3782
3783		gcm_support = aesa & CHA_VER_MISC_AES_GCM;
 
 
 
 
 
 
 
 
 
3784	}
3785
3786	/* If MD is present, limit digest size based on LP256 */
3787	if (md_inst && md_vid  == CHA_VER_VID_MD_LP256)
3788		md_limit = SHA256_DIGEST_SIZE;
3789
 
3790	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3791		struct caam_skcipher_alg *t_alg = driver_algs + i;
3792		u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
3793
3794		/* Skip DES algorithms if not supported by device */
3795		if (!des_inst &&
3796		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
3797		     (alg_sel == OP_ALG_ALGSEL_DES)))
3798				continue;
3799
3800		/* Skip AES algorithms if not supported by device */
3801		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
3802				continue;
3803
3804		/*
3805		 * Check support for AES modes not available
3806		 * on LP devices.
3807		 */
3808		if (aes_vid == CHA_VER_VID_AES_LP &&
3809		    (t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
3810		    OP_ALG_AAI_XTS)
3811			continue;
3812
3813		caam_skcipher_alg_init(t_alg);
3814
3815		err = crypto_engine_register_skcipher(&t_alg->skcipher);
3816		if (err) {
3817			pr_warn("%s alg registration failed\n",
3818				t_alg->skcipher.base.base.cra_driver_name);
3819			continue;
3820		}
3821
3822		t_alg->registered = true;
3823		registered = true;
3824	}
3825
3826	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3827		struct caam_aead_alg *t_alg = driver_aeads + i;
3828		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
3829				 OP_ALG_ALGSEL_MASK;
3830		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
3831				 OP_ALG_ALGSEL_MASK;
3832		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
3833
3834		/* Skip DES algorithms if not supported by device */
3835		if (!des_inst &&
3836		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
3837		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
3838				continue;
3839
3840		/* Skip AES algorithms if not supported by device */
3841		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
3842				continue;
3843
3844		/* Skip CHACHA20 algorithms if not supported by device */
3845		if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
3846			continue;
3847
3848		/* Skip POLY1305 algorithms if not supported by device */
3849		if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
3850			continue;
3851
3852		/* Skip GCM algorithms if not supported by device */
3853		if (c1_alg_sel == OP_ALG_ALGSEL_AES &&
3854		    alg_aai == OP_ALG_AAI_GCM && !gcm_support)
3855			continue;
3856
3857		/*
3858		 * Skip algorithms requiring message digests
3859		 * if MD or MD size is not supported by device.
3860		 */
3861		if (is_mdha(c2_alg_sel) &&
3862		    (!md_inst || t_alg->aead.base.maxauthsize > md_limit))
3863			continue;
3864
3865		caam_aead_alg_init(t_alg);
3866
3867		err = crypto_engine_register_aead(&t_alg->aead);
3868		if (err) {
3869			pr_warn("%s alg registration failed\n",
3870				t_alg->aead.base.base.cra_driver_name);
3871			continue;
 
 
 
 
3872		}
3873
3874		t_alg->registered = true;
3875		registered = true;
3876	}
3877
3878	if (registered)
3879		pr_info("caam algorithms registered in /proc/crypto\n");
3880
3881	return err;
3882}
v3.1
 
   1/*
   2 * caam - Freescale FSL CAAM support for crypto API
   3 *
   4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
 
   5 *
   6 * Based on talitos crypto API driver.
   7 *
   8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
   9 *
  10 * ---------------                     ---------------
  11 * | JobDesc #1  |-------------------->|  ShareDesc  |
  12 * | *(packet 1) |                     |   (PDB)     |
  13 * ---------------      |------------->|  (hashKey)  |
  14 *       .              |              | (cipherKey) |
  15 *       .              |    |-------->| (operation) |
  16 * ---------------      |    |         ---------------
  17 * | JobDesc #2  |------|    |
  18 * | *(packet 2) |           |
  19 * ---------------           |
  20 *       .                   |
  21 *       .                   |
  22 * ---------------           |
  23 * | JobDesc #3  |------------
  24 * | *(packet 3) |
  25 * ---------------
  26 *
  27 * The SharedDesc never changes for a connection unless rekeyed, but
  28 * each packet will likely be in a different place. So all we need
  29 * to know to process the packet is where the input is, where the
  30 * output goes, and what context we want to process with. Context is
  31 * in the SharedDesc, packet references in the JobDesc.
  32 *
  33 * So, a job desc looks like:
  34 *
  35 * ---------------------
  36 * | Header            |
  37 * | ShareDesc Pointer |
  38 * | SEQ_OUT_PTR       |
  39 * | (output buffer)   |
 
  40 * | SEQ_IN_PTR        |
  41 * | (input buffer)    |
  42 * | LOAD (to DECO)    |
  43 * ---------------------
  44 */
  45
  46#include "compat.h"
  47
  48#include "regs.h"
  49#include "intern.h"
  50#include "desc_constr.h"
  51#include "jr.h"
  52#include "error.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  53
  54/*
  55 * crypto alg
  56 */
  57#define CAAM_CRA_PRIORITY		3000
  58/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  59#define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
 
  60					 SHA512_DIGEST_SIZE * 2)
  61/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
  62#define CAAM_MAX_IV_LENGTH		16
  63
  64/* length of descriptors text */
  65#define DESC_JOB_IO_LEN			(CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3)
 
 
 
  66
  67#define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
  68#define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
  69#define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
  70#define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
  71
  72#define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)
  73#define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \
  74					 20 * CAAM_CMD_SZ)
  75#define DESC_ABLKCIPHER_DEC_LEN		(DESC_ABLKCIPHER_BASE + \
  76					 15 * CAAM_CMD_SZ)
  77
  78#define DESC_MAX_USED_BYTES		(DESC_AEAD_GIVENC_LEN + \
  79					 CAAM_MAX_KEY_SIZE)
  80#define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  81
  82#ifdef DEBUG
  83/* for print_hex_dumps with line references */
  84#define xstr(s) str(s)
  85#define str(s) #s
  86#define debug(format, arg...) printk(format, arg)
  87#else
  88#define debug(format, arg...)
  89#endif
  90
  91/* Set DK bit in class 1 operation if shared */
  92static inline void append_dec_op1(u32 *desc, u32 type)
  93{
  94	u32 *jump_cmd, *uncond_jump_cmd;
  95
  96	jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
  97	append_operation(desc, type | OP_ALG_AS_INITFINAL |
  98			 OP_ALG_DECRYPT);
  99	uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
 100	set_jump_tgt_here(desc, jump_cmd);
 101	append_operation(desc, type | OP_ALG_AS_INITFINAL |
 102			 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
 103	set_jump_tgt_here(desc, uncond_jump_cmd);
 104}
 105
 106/*
 107 * Wait for completion of class 1 key loading before allowing
 108 * error propagation
 109 */
 110static inline void append_dec_shr_done(u32 *desc)
 111{
 112	u32 *jump_cmd;
 113
 114	jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
 115	set_jump_tgt_here(desc, jump_cmd);
 116	append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
 117}
 118
 119/*
 120 * For aead functions, read payload and write payload,
 121 * both of which are specified in req->src and req->dst
 122 */
 123static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
 124{
 125	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
 126			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
 127	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
 128}
 129
 130/*
 131 * For aead encrypt and decrypt, read iv for both classes
 132 */
 133static inline void aead_append_ld_iv(u32 *desc, int ivsize)
 134{
 135	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
 136		   LDST_CLASS_1_CCB | ivsize);
 137	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
 138}
 139
 140/*
 141 * For ablkcipher encrypt and decrypt, read from req->src and
 142 * write to req->dst
 143 */
 144static inline void ablkcipher_append_src_dst(u32 *desc)
 145{
 146	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
 147	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
 148	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \
 149			     KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \
 150	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \
 151}
 152
 153/*
 154 * If all data, including src (with assoc and iv) or dst (with iv only) are
 155 * contiguous
 156 */
 157#define GIV_SRC_CONTIG		1
 158#define GIV_DST_CONTIG		(1 << 1)
 159
 160/*
 161 * per-session context
 162 */
 163struct caam_ctx {
 164	struct device *jrdev;
 165	u32 sh_desc_enc[DESC_MAX_USED_LEN];
 166	u32 sh_desc_dec[DESC_MAX_USED_LEN];
 167	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
 168	dma_addr_t sh_desc_enc_dma;
 169	dma_addr_t sh_desc_dec_dma;
 170	dma_addr_t sh_desc_givenc_dma;
 171	u32 class1_alg_type;
 172	u32 class2_alg_type;
 173	u32 alg_op;
 174	u8 key[CAAM_MAX_KEY_SIZE];
 175	dma_addr_t key_dma;
 176	unsigned int enckeylen;
 177	unsigned int split_key_len;
 178	unsigned int split_key_pad_len;
 
 179	unsigned int authsize;
 
 
 180};
 181
 182static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
 183			    int keys_fit_inline)
 
 
 
 
 
 
 
 
 184{
 185	if (keys_fit_inline) {
 186		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
 187				  ctx->split_key_len, CLASS_2 |
 188				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
 189		append_key_as_imm(desc, (void *)ctx->key +
 190				  ctx->split_key_pad_len, ctx->enckeylen,
 191				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
 
 
 
 
 
 
 
 192	} else {
 193		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
 194			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
 195		append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
 196			   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
 197	}
 198}
 199
 200static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
 201				  int keys_fit_inline)
 202{
 203	u32 *key_jump_cmd;
 
 
 204
 205	init_sh_desc(desc, HDR_SHARE_WAIT);
 
 
 
 
 
 
 
 
 
 
 206
 207	/* Skip if already shared */
 208	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 209				   JUMP_COND_SHRD);
 
 
 
 210
 211	append_key_aead(desc, ctx, keys_fit_inline);
 212
 213	set_jump_tgt_here(desc, key_jump_cmd);
 214
 215	/* Propagate errors from shared to job descriptor */
 216	append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
 217}
 218
 219static int aead_set_sh_desc(struct crypto_aead *aead)
 220{
 221	struct aead_tfm *tfm = &aead->base.crt_aead;
 222	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 
 
 
 223	struct device *jrdev = ctx->jrdev;
 224	bool keys_fit_inline = 0;
 225	u32 *key_jump_cmd, *jump_cmd;
 226	u32 geniv, moveiv;
 227	u32 *desc;
 
 
 
 
 228
 229	if (!ctx->enckeylen || !ctx->authsize)
 230		return 0;
 231
 
 
 
 
 232	/*
 233	 * Job Descriptor and Shared Descriptors
 234	 * must all fit into the 64-word Descriptor h/w Buffer
 
 235	 */
 236	if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
 237	    ctx->split_key_pad_len + ctx->enckeylen <=
 238	    CAAM_DESC_BYTES_MAX)
 239		keys_fit_inline = 1;
 240
 241	/* aead_encrypt shared descriptor */
 242	desc = ctx->sh_desc_enc;
 
 
 
 
 
 
 
 243
 244	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
 
 
 
 
 
 
 
 245
 246	/* Class 2 operation */
 247	append_operation(desc, ctx->class2_alg_type |
 248			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 249
 250	/* cryptlen = seqoutlen - authsize */
 251	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
 252
 253	/* assoclen + cryptlen = seqinlen - ivsize */
 254	append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
 255
 256	/* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
 257	append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
 
 
 
 
 
 
 
 258
 259	/* read assoc before reading payload */
 260	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 261			     KEY_VLF);
 262	aead_append_ld_iv(desc, tfm->ivsize);
 263
 264	/* Class 1 operation */
 265	append_operation(desc, ctx->class1_alg_type |
 266			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 267
 268	/* Read and write cryptlen bytes */
 269	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
 270	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 271	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
 272
 273	/* Write ICV */
 274	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
 275			 LDST_SRCDST_BYTE_CONTEXT);
 276
 277	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
 278					      desc_bytes(desc),
 279					      DMA_TO_DEVICE);
 280	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
 281		dev_err(jrdev, "unable to map shared descriptor\n");
 282		return -ENOMEM;
 283	}
 284#ifdef DEBUG
 285	print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
 286		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
 287		       desc_bytes(desc), 1);
 288#endif
 289
 
 290	/*
 291	 * Job Descriptor and Shared Descriptors
 292	 * must all fit into the 64-word Descriptor h/w Buffer
 293	 */
 294	if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
 295	    ctx->split_key_pad_len + ctx->enckeylen <=
 296	    CAAM_DESC_BYTES_MAX)
 297		keys_fit_inline = 1;
 
 298
 299	desc = ctx->sh_desc_dec;
 
 300
 301	/* aead_decrypt shared descriptor */
 302	init_sh_desc(desc, HDR_SHARE_WAIT);
 
 
 
 
 
 303
 304	/* Skip if already shared */
 305	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 306				   JUMP_COND_SHRD);
 307
 308	append_key_aead(desc, ctx, keys_fit_inline);
 309
 310	/* Only propagate error immediately if shared */
 311	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
 312	set_jump_tgt_here(desc, key_jump_cmd);
 313	append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
 314	set_jump_tgt_here(desc, jump_cmd);
 315
 316	/* Class 2 operation */
 317	append_operation(desc, ctx->class2_alg_type |
 318			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 319
 320	/* assoclen + cryptlen = seqinlen - ivsize */
 321	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
 322				ctx->authsize + tfm->ivsize)
 323	/* assoclen = (assoclen + cryptlen) - cryptlen */
 324	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
 325	append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
 326
 327	/* read assoc before reading payload */
 328	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 329			     KEY_VLF);
 330
 331	aead_append_ld_iv(desc, tfm->ivsize);
 332
 333	append_dec_op1(desc, ctx->class1_alg_type);
 334
 335	/* Read and write cryptlen bytes */
 336	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
 337	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
 338	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
 339
 340	/* Load ICV */
 341	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
 342			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
 343	append_dec_shr_done(desc);
 344
 345	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
 346					      desc_bytes(desc),
 347					      DMA_TO_DEVICE);
 348	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
 349		dev_err(jrdev, "unable to map shared descriptor\n");
 350		return -ENOMEM;
 351	}
 352#ifdef DEBUG
 353	print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
 354		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
 355		       desc_bytes(desc), 1);
 356#endif
 357
 358	/*
 359	 * Job Descriptor and Shared Descriptors
 360	 * must all fit into the 64-word Descriptor h/w Buffer
 361	 */
 362	if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
 363	    ctx->split_key_pad_len + ctx->enckeylen <=
 364	    CAAM_DESC_BYTES_MAX)
 365		keys_fit_inline = 1;
 
 
 
 
 366
 367	/* aead_givencrypt shared descriptor */
 368	desc = ctx->sh_desc_givenc;
 369
 370	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
 371
 372	/* Generate IV */
 373	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
 374		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
 375		NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
 376	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
 377			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
 378	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
 379	append_move(desc, MOVE_SRC_INFIFO |
 380		    MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
 381	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
 382
 383	/* Copy IV to class 1 context */
 384	append_move(desc, MOVE_SRC_CLASS1CTX |
 385		    MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
 386
 387	/* Return to encryption */
 388	append_operation(desc, ctx->class2_alg_type |
 389			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 390
 391	/* ivsize + cryptlen = seqoutlen - authsize */
 392	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
 393
 394	/* assoclen = seqinlen - (ivsize + cryptlen) */
 395	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
 396
 397	/* read assoc before reading payload */
 398	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 399			     KEY_VLF);
 400
 401	/* Copy iv from class 1 ctx to class 2 fifo*/
 402	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
 403		 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
 404	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
 405			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
 406	append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
 407			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
 408
 409	/* Class 1 operation */
 410	append_operation(desc, ctx->class1_alg_type |
 411			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 412
 413	/* Will write ivsize + cryptlen */
 414	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
 415
 416	/* Not need to reload iv */
 417	append_seq_fifo_load(desc, tfm->ivsize,
 418			     FIFOLD_CLASS_SKIP);
 419
 420	/* Will read cryptlen */
 421	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
 422	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
 423
 424	/* Write ICV */
 425	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
 426			 LDST_SRCDST_BYTE_CONTEXT);
 427
 428	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
 429						 desc_bytes(desc),
 430						 DMA_TO_DEVICE);
 431	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
 432		dev_err(jrdev, "unable to map shared descriptor\n");
 433		return -ENOMEM;
 434	}
 435#ifdef DEBUG
 436	print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
 437		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
 438		       desc_bytes(desc), 1);
 439#endif
 440
 
 441	return 0;
 442}
 443
 444static int aead_setauthsize(struct crypto_aead *authenc,
 445				    unsigned int authsize)
 446{
 447	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
 448
 449	ctx->authsize = authsize;
 450	aead_set_sh_desc(authenc);
 451
 452	return 0;
 453}
 454
 455struct split_key_result {
 456	struct completion completion;
 457	int err;
 458};
 
 
 
 
 459
 460static void split_key_done(struct device *dev, u32 *desc, u32 err,
 461			   void *context)
 462{
 463	struct split_key_result *res = context;
 464
 465#ifdef DEBUG
 466	dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 467#endif
 
 
 
 
 
 
 
 
 
 468
 469	if (err) {
 470		char tmp[CAAM_ERROR_STR_MAX];
 
 
 471
 472		dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
 
 
 
 
 
 
 
 
 
 473	}
 474
 475	res->err = err;
 
 
 
 476
 477	complete(&res->completion);
 478}
 479
 480/*
 481get a split ipad/opad key
 
 
 
 
 
 
 482
 483Split key generation-----------------------------------------------
 
 484
 485[00] 0xb0810008    jobdesc: stidx=1 share=never len=8
 486[01] 0x04000014        key: class2->keyreg len=20
 487			@0xffe01000
 488[03] 0x84410014  operation: cls2-op sha1 hmac init dec
 489[04] 0x24940000     fifold: class2 msgdata-last2 len=0 imm
 490[05] 0xa4000001       jump: class2 local all ->1 [06]
 491[06] 0x64260028    fifostr: class2 mdsplit-jdk len=40
 492			@0xffe04000
 493*/
 494static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen)
 495{
 
 496	struct device *jrdev = ctx->jrdev;
 
 497	u32 *desc;
 498	struct split_key_result result;
 499	dma_addr_t dma_addr_in, dma_addr_out;
 500	int ret = 0;
 
 
 501
 502	desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
 
 
 
 
 
 
 
 
 
 
 
 503
 504	init_job_desc(desc, 0);
 
 
 
 
 505
 506	dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen,
 507				     DMA_TO_DEVICE);
 508	if (dma_mapping_error(jrdev, dma_addr_in)) {
 509		dev_err(jrdev, "unable to map key input memory\n");
 510		kfree(desc);
 511		return -ENOMEM;
 
 
 
 
 512	}
 513	append_key(desc, dma_addr_in, authkeylen, CLASS_2 |
 514		       KEY_DEST_CLASS_REG);
 515
 516	/* Sets MDHA up into an HMAC-INIT */
 517	append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT |
 518			     OP_ALG_AS_INIT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 519
 520	/*
 521	 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
 522	   into both pads inside MDHA
 
 523	 */
 524	append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
 525				FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
 
 
 
 
 
 
 
 
 
 
 
 526
 527	/*
 528	 * FIFO_STORE with the explicit split-key content store
 529	 * (0x26 output type)
 530	 */
 531	dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
 532				      DMA_FROM_DEVICE);
 533	if (dma_mapping_error(jrdev, dma_addr_out)) {
 534		dev_err(jrdev, "unable to map key output memory\n");
 535		kfree(desc);
 536		return -ENOMEM;
 537	}
 538	append_fifo_store(desc, dma_addr_out, ctx->split_key_len,
 539			  LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
 540
 541#ifdef DEBUG
 542	print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
 543		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1);
 544	print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
 545		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
 546#endif
 547
 548	result.err = 0;
 549	init_completion(&result.completion);
 550
 551	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
 552	if (!ret) {
 553		/* in progress */
 554		wait_for_completion_interruptible(&result.completion);
 555		ret = result.err;
 556#ifdef DEBUG
 557		print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
 558			       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
 559			       ctx->split_key_pad_len, 1);
 560#endif
 561	}
 562
 563	dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len,
 564			 DMA_FROM_DEVICE);
 565	dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE);
 566
 567	kfree(desc);
 
 568
 569	return ret;
 
 
 
 570}
 571
 572static int aead_setkey(struct crypto_aead *aead,
 573			       const u8 *key, unsigned int keylen)
 574{
 575	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
 576	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
 577	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 578	struct device *jrdev = ctx->jrdev;
 579	struct rtattr *rta = (void *)key;
 580	struct crypto_authenc_key_param *param;
 581	unsigned int authkeylen;
 582	unsigned int enckeylen;
 583	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 584
 585	param = RTA_DATA(rta);
 586	enckeylen = be32_to_cpu(param->enckeylen);
 
 
 
 
 587
 588	key += RTA_ALIGN(rta->rta_len);
 589	keylen -= RTA_ALIGN(rta->rta_len);
 590
 591	if (keylen < enckeylen)
 592		goto badkey;
 
 593
 594	authkeylen = keylen - enckeylen;
 
 595
 596	if (keylen > CAAM_MAX_KEY_SIZE)
 
 
 
 
 
 
 
 
 
 597		goto badkey;
 598
 599	/* Pick class 2 key length from algorithm submask */
 600	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
 601				      OP_ALG_ALGSEL_SHIFT] * 2;
 602	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
 603
 604#ifdef DEBUG
 605	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
 606	       keylen, enckeylen, authkeylen);
 607	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
 608	       ctx->split_key_len, ctx->split_key_pad_len);
 609	print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
 610		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 611#endif
 612
 613	ret = gen_split_key(ctx, key, authkeylen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 614	if (ret) {
 615		goto badkey;
 616	}
 617
 618	/* postpend encryption key to auth split key */
 619	memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 620
 621	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
 622				       enckeylen, DMA_TO_DEVICE);
 623	if (dma_mapping_error(jrdev, ctx->key_dma)) {
 624		dev_err(jrdev, "unable to map key i/o memory\n");
 625		return -ENOMEM;
 626	}
 627#ifdef DEBUG
 628	print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
 629		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
 630		       ctx->split_key_pad_len + enckeylen, 1);
 631#endif
 632
 633	ctx->enckeylen = enckeylen;
 
 634
 635	ret = aead_set_sh_desc(aead);
 636	if (ret) {
 637		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
 638				 enckeylen, DMA_TO_DEVICE);
 639	}
 640
 641	return ret;
 642badkey:
 643	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
 644	return -EINVAL;
 
 
 
 
 645}
 646
 647static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 648			     const u8 *key, unsigned int keylen)
 649{
 650	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
 651	struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
 652	struct device *jrdev = ctx->jrdev;
 653	int ret = 0;
 654	u32 *key_jump_cmd, *jump_cmd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 655	u32 *desc;
 
 656
 657#ifdef DEBUG
 658	print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
 659		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 660#endif
 661
 662	memcpy(ctx->key, key, keylen);
 663	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
 664				      DMA_TO_DEVICE);
 665	if (dma_mapping_error(jrdev, ctx->key_dma)) {
 666		dev_err(jrdev, "unable to map key i/o memory\n");
 667		return -ENOMEM;
 668	}
 669	ctx->enckeylen = keylen;
 670
 671	/* ablkcipher_encrypt shared descriptor */
 672	desc = ctx->sh_desc_enc;
 673	init_sh_desc(desc, HDR_SHARE_WAIT);
 674	/* Skip if already shared */
 675	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 676				   JUMP_COND_SHRD);
 677
 678	/* Load class1 key only */
 679	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
 680			  ctx->enckeylen, CLASS_1 |
 681			  KEY_DEST_CLASS_REG);
 682
 683	set_jump_tgt_here(desc, key_jump_cmd);
 684
 685	/* Propagate errors from shared to job descriptor */
 686	append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
 687
 688	/* Load iv */
 689	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
 690		   LDST_CLASS_1_CCB | tfm->ivsize);
 691
 692	/* Load operation */
 693	append_operation(desc, ctx->class1_alg_type |
 694			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 695
 696	/* Perform operation */
 697	ablkcipher_append_src_dst(desc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 698
 699	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
 700					      desc_bytes(desc),
 701					      DMA_TO_DEVICE);
 702	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
 703		dev_err(jrdev, "unable to map shared descriptor\n");
 704		return -ENOMEM;
 705	}
 706#ifdef DEBUG
 707	print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
 708		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
 709		       desc_bytes(desc), 1);
 710#endif
 711	/* ablkcipher_decrypt shared descriptor */
 712	desc = ctx->sh_desc_dec;
 713
 714	init_sh_desc(desc, HDR_SHARE_WAIT);
 715	/* Skip if already shared */
 716	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 717				   JUMP_COND_SHRD);
 718
 719	/* Load class1 key only */
 720	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
 721			  ctx->enckeylen, CLASS_1 |
 722			  KEY_DEST_CLASS_REG);
 723
 724	/* For aead, only propagate error immediately if shared */
 725	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
 726	set_jump_tgt_here(desc, key_jump_cmd);
 727	append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
 728	set_jump_tgt_here(desc, jump_cmd);
 729
 730	/* load IV */
 731	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
 732		   LDST_CLASS_1_CCB | tfm->ivsize);
 733
 734	/* Choose operation */
 735	append_dec_op1(desc, ctx->class1_alg_type);
 736
 737	/* Perform operation */
 738	ablkcipher_append_src_dst(desc);
 
 
 
 739
 740	/* Wait for key to load before allowing propagating error */
 741	append_dec_shr_done(desc);
 
 742
 743	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
 744					      desc_bytes(desc),
 745					      DMA_TO_DEVICE);
 746	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
 747		dev_err(jrdev, "unable to map shared descriptor\n");
 748		return -ENOMEM;
 749	}
 750
 751#ifdef DEBUG
 752	print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
 753		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
 754		       desc_bytes(desc), 1);
 755#endif
 756
 757	return ret;
 758}
 759
 760struct link_tbl_entry {
 761	u64 ptr;
 762	u32 len;
 763	u8 reserved;
 764	u8 buf_pool_id;
 765	u16 offset;
 766};
 767
 768/*
 769 * aead_edesc - s/w-extended aead descriptor
 770 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
 771 * @src_nents: number of segments in input scatterlist
 772 * @dst_nents: number of segments in output scatterlist
 773 * @iv_dma: dma address of iv for checking continuity and link table
 774 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
 775 * @link_tbl_bytes: length of dma mapped link_tbl space
 776 * @link_tbl_dma: bus physical mapped address of h/w link table
 
 777 * @hw_desc: the h/w job descriptor followed by any referenced link tables
 778 */
 779struct aead_edesc {
 780	int assoc_nents;
 781	int src_nents;
 782	int dst_nents;
 783	dma_addr_t iv_dma;
 784	int link_tbl_bytes;
 785	dma_addr_t link_tbl_dma;
 786	struct link_tbl_entry *link_tbl;
 787	u32 hw_desc[0];
 
 
 788};
 789
 790/*
 791 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
 792 * @src_nents: number of segments in input scatterlist
 793 * @dst_nents: number of segments in output scatterlist
 
 
 794 * @iv_dma: dma address of iv for checking continuity and link table
 795 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
 796 * @link_tbl_bytes: length of dma mapped link_tbl space
 797 * @link_tbl_dma: bus physical mapped address of h/w link table
 
 798 * @hw_desc: the h/w job descriptor followed by any referenced link tables
 
 799 */
 800struct ablkcipher_edesc {
 801	int src_nents;
 802	int dst_nents;
 
 
 803	dma_addr_t iv_dma;
 804	int link_tbl_bytes;
 805	dma_addr_t link_tbl_dma;
 806	struct link_tbl_entry *link_tbl;
 807	u32 hw_desc[0];
 
 808};
 809
 810static void caam_unmap(struct device *dev, struct scatterlist *src,
 811		       struct scatterlist *dst, int src_nents, int dst_nents,
 812		       dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma,
 813		       int link_tbl_bytes)
 814{
 815	if (unlikely(dst != src)) {
 816		dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
 817		dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
 
 
 
 818	} else {
 819		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
 820	}
 821
 822	if (iv_dma)
 823		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
 824	if (link_tbl_bytes)
 825		dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes,
 826				 DMA_TO_DEVICE);
 827}
 828
 829static void aead_unmap(struct device *dev,
 830		       struct aead_edesc *edesc,
 831		       struct aead_request *req)
 832{
 833	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 834	int ivsize = crypto_aead_ivsize(aead);
 835
 836	dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
 837
 838	caam_unmap(dev, req->src, req->dst,
 839		   edesc->src_nents, edesc->dst_nents,
 840		   edesc->iv_dma, ivsize, edesc->link_tbl_dma,
 841		   edesc->link_tbl_bytes);
 842}
 843
 844static void ablkcipher_unmap(struct device *dev,
 845			     struct ablkcipher_edesc *edesc,
 846			     struct ablkcipher_request *req)
 847{
 848	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
 849	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 850
 851	caam_unmap(dev, req->src, req->dst,
 852		   edesc->src_nents, edesc->dst_nents,
 853		   edesc->iv_dma, ivsize, edesc->link_tbl_dma,
 854		   edesc->link_tbl_bytes);
 855}
 856
 857static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
 858				   void *context)
 859{
 860	struct aead_request *req = context;
 
 
 861	struct aead_edesc *edesc;
 862#ifdef DEBUG
 863	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 864	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 865	int ivsize = crypto_aead_ivsize(aead);
 866
 867	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 868#endif
 869
 870	edesc = (struct aead_edesc *)((char *)desc -
 871		 offsetof(struct aead_edesc, hw_desc));
 872
 873	if (err) {
 874		char tmp[CAAM_ERROR_STR_MAX];
 875
 876		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
 877	}
 878
 879	aead_unmap(jrdev, edesc, req);
 880
 881#ifdef DEBUG
 882	print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
 883		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
 884		       req->assoclen , 1);
 885	print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
 886		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
 887		       edesc->src_nents ? 100 : ivsize, 1);
 888	print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
 889		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
 890		       edesc->src_nents ? 100 : req->cryptlen +
 891		       ctx->authsize + 4, 1);
 892#endif
 893
 894	kfree(edesc);
 895
 896	aead_request_complete(req, err);
 
 
 
 
 
 
 
 897}
 898
 899static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
 900				   void *context)
 901{
 902	struct aead_request *req = context;
 903	struct aead_edesc *edesc;
 904#ifdef DEBUG
 905	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 906	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 907	int ivsize = crypto_aead_ivsize(aead);
 908
 909	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 910#endif
 
 911
 912	edesc = (struct aead_edesc *)((char *)desc -
 913		 offsetof(struct aead_edesc, hw_desc));
 
 
 
 
 
 
 
 
 
 914
 915#ifdef DEBUG
 916	print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
 917		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
 918		       ivsize, 1);
 919	print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
 920		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
 921		       req->cryptlen, 1);
 922#endif
 923
 924	if (err) {
 925		char tmp[CAAM_ERROR_STR_MAX];
 
 
 926
 927		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
 928	}
 929
 930	aead_unmap(jrdev, edesc, req);
 931
 932	/*
 933	 * verify hw auth check passed else return -EBADMSG
 
 
 934	 */
 935	if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
 936		err = -EBADMSG;
 937
 938#ifdef DEBUG
 939	print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
 940		       DUMP_PREFIX_ADDRESS, 16, 4,
 941		       ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
 942		       sizeof(struct iphdr) + req->assoclen +
 943		       ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
 944		       ctx->authsize + 36, 1);
 945	if (!err && edesc->link_tbl_bytes) {
 946		struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
 947		print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
 948			       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
 949			sg->length + ctx->authsize + 16, 1);
 950	}
 951#endif
 952
 953	kfree(edesc);
 954
 955	aead_request_complete(req, err);
 956}
 957
 958static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
 959				   void *context)
 960{
 961	struct ablkcipher_request *req = context;
 962	struct ablkcipher_edesc *edesc;
 963#ifdef DEBUG
 964	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
 965	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 966
 967	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 968#endif
 969
 970	edesc = (struct ablkcipher_edesc *)((char *)desc -
 971		 offsetof(struct ablkcipher_edesc, hw_desc));
 972
 973	if (err) {
 974		char tmp[CAAM_ERROR_STR_MAX];
 975
 976		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
 
 
 977	}
 978
 979#ifdef DEBUG
 980	print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
 981		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
 982		       edesc->src_nents > 1 ? 100 : ivsize, 1);
 983	print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
 984		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
 985		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
 986#endif
 987
 988	ablkcipher_unmap(jrdev, edesc, req);
 989	kfree(edesc);
 990
 991	ablkcipher_request_complete(req, err);
 992}
 993
 994static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
 995				    void *context)
 996{
 997	struct ablkcipher_request *req = context;
 998	struct ablkcipher_edesc *edesc;
 999#ifdef DEBUG
1000	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1001	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1002
1003	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1004#endif
1005
1006	edesc = (struct ablkcipher_edesc *)((char *)desc -
1007		 offsetof(struct ablkcipher_edesc, hw_desc));
1008	if (err) {
1009		char tmp[CAAM_ERROR_STR_MAX];
1010
1011		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
1012	}
1013
1014#ifdef DEBUG
1015	print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
1016		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1017		       ivsize, 1);
1018	print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
1019		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1020		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1021#endif
1022
1023	ablkcipher_unmap(jrdev, edesc, req);
1024	kfree(edesc);
1025
1026	ablkcipher_request_complete(req, err);
1027}
1028
1029static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr,
1030			       dma_addr_t dma, u32 len, u32 offset)
1031{
1032	link_tbl_ptr->ptr = dma;
1033	link_tbl_ptr->len = len;
1034	link_tbl_ptr->reserved = 0;
1035	link_tbl_ptr->buf_pool_id = 0;
1036	link_tbl_ptr->offset = offset;
1037#ifdef DEBUG
1038	print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ",
1039		       DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr,
1040		       sizeof(struct link_tbl_entry), 1);
1041#endif
1042}
1043
1044/*
1045 * convert scatterlist to h/w link table format
1046 * but does not have final bit; instead, returns last entry
1047 */
1048static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg,
1049					     int sg_count, struct link_tbl_entry
1050					     *link_tbl_ptr, u32 offset)
1051{
1052	while (sg_count) {
1053		sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg),
1054				   sg_dma_len(sg), offset);
1055		link_tbl_ptr++;
1056		sg = sg_next(sg);
1057		sg_count--;
1058	}
1059	return link_tbl_ptr - 1;
1060}
1061
1062/*
1063 * convert scatterlist to h/w link table format
1064 * scatterlist must have been previously dma mapped
1065 */
1066static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count,
1067				struct link_tbl_entry *link_tbl_ptr, u32 offset)
1068{
1069	link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset);
1070	link_tbl_ptr->len |= 0x40000000;
1071}
1072
1073/*
1074 * Fill in aead job descriptor
1075 */
1076static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1077			  struct aead_edesc *edesc,
1078			  struct aead_request *req,
1079			  bool all_contig, bool encrypt)
1080{
1081	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1082	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1083	int ivsize = crypto_aead_ivsize(aead);
1084	int authsize = ctx->authsize;
1085	u32 *desc = edesc->hw_desc;
1086	u32 out_options = 0, in_options;
1087	dma_addr_t dst_dma, src_dma;
1088	int len, link_tbl_index = 0;
 
 
1089
1090#ifdef DEBUG
1091	debug("assoclen %d cryptlen %d authsize %d\n",
1092	      req->assoclen, req->cryptlen, authsize);
1093	print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
1094		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1095		       req->assoclen , 1);
1096	print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1097		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1098		       edesc->src_nents ? 100 : ivsize, 1);
1099	print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
1100		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1101			edesc->src_nents ? 100 : req->cryptlen, 1);
1102	print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
1103		       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1104		       desc_bytes(sh_desc), 1);
1105#endif
1106
1107	len = desc_len(sh_desc);
1108	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1109
1110	if (all_contig) {
1111		src_dma = sg_dma_address(req->assoc);
 
1112		in_options = 0;
1113	} else {
1114		src_dma = edesc->link_tbl_dma;
1115		link_tbl_index += (edesc->assoc_nents ? : 1) + 1 +
1116				  (edesc->src_nents ? : 1);
1117		in_options = LDST_SGF;
1118	}
1119	if (encrypt)
1120		append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1121				  req->cryptlen - authsize, in_options);
1122	else
1123		append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1124				  req->cryptlen, in_options);
1125
1126	if (likely(req->src == req->dst)) {
1127		if (all_contig) {
1128			dst_dma = sg_dma_address(req->src);
1129		} else {
1130			dst_dma = src_dma + sizeof(struct link_tbl_entry) *
1131				  ((edesc->assoc_nents ? : 1) + 1);
1132			out_options = LDST_SGF;
1133		}
1134	} else {
1135		if (!edesc->dst_nents) {
 
1136			dst_dma = sg_dma_address(req->dst);
 
1137		} else {
1138			dst_dma = edesc->link_tbl_dma +
1139				  link_tbl_index *
1140				  sizeof(struct link_tbl_entry);
1141			out_options = LDST_SGF;
1142		}
1143	}
 
1144	if (encrypt)
1145		append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
 
 
1146	else
1147		append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
 
1148				   out_options);
1149}
1150
1151/*
1152 * Fill in aead givencrypt job descriptor
1153 */
1154static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1155			      struct aead_edesc *edesc,
1156			      struct aead_request *req,
1157			      int contig)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1158{
1159	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1160	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1161	int ivsize = crypto_aead_ivsize(aead);
1162	int authsize = ctx->authsize;
1163	u32 *desc = edesc->hw_desc;
1164	u32 out_options = 0, in_options;
1165	dma_addr_t dst_dma, src_dma;
1166	int len, link_tbl_index = 0;
1167
1168#ifdef DEBUG
1169	debug("assoclen %d cryptlen %d authsize %d\n",
1170	      req->assoclen, req->cryptlen, authsize);
1171	print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
1172		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1173		       req->assoclen , 1);
1174	print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1175		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1176	print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
1177		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1178			edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1179	print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
1180		       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1181		       desc_bytes(sh_desc), 1);
1182#endif
1183
1184	len = desc_len(sh_desc);
1185	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
 
1186
1187	if (contig & GIV_SRC_CONTIG) {
1188		src_dma = sg_dma_address(req->assoc);
1189		in_options = 0;
1190	} else {
1191		src_dma = edesc->link_tbl_dma;
1192		link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents;
1193		in_options = LDST_SGF;
1194	}
1195	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1196			  req->cryptlen - authsize, in_options);
1197
1198	if (contig & GIV_DST_CONTIG) {
1199		dst_dma = edesc->iv_dma;
1200	} else {
1201		if (likely(req->src == req->dst)) {
1202			dst_dma = src_dma + sizeof(struct link_tbl_entry) *
1203				  edesc->assoc_nents;
1204			out_options = LDST_SGF;
1205		} else {
1206			dst_dma = edesc->link_tbl_dma +
1207				  link_tbl_index *
1208				  sizeof(struct link_tbl_entry);
1209			out_options = LDST_SGF;
1210		}
1211	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1212
1213	append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
 
 
 
 
1214}
1215
1216/*
1217 * Fill in ablkcipher job descriptor
1218 */
1219static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1220				struct ablkcipher_edesc *edesc,
1221				struct ablkcipher_request *req,
1222				bool iv_contig)
1223{
1224	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1225	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 
 
1226	u32 *desc = edesc->hw_desc;
1227	u32 out_options = 0, in_options;
1228	dma_addr_t dst_dma, src_dma;
1229	int len, link_tbl_index = 0;
 
 
 
 
 
 
 
 
 
 
1230
1231#ifdef DEBUG
1232	print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1233		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1234		       ivsize, 1);
1235	print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
1236		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1237		       edesc->src_nents ? 100 : req->nbytes, 1);
1238#endif
1239
1240	len = desc_len(sh_desc);
1241	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1242
1243	if (iv_contig) {
1244		src_dma = edesc->iv_dma;
1245		in_options = 0;
 
1246	} else {
1247		src_dma = edesc->link_tbl_dma;
1248		link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1249		in_options = LDST_SGF;
1250	}
1251	append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
 
1252
1253	if (likely(req->src == req->dst)) {
1254		if (!edesc->src_nents && iv_contig) {
1255			dst_dma = sg_dma_address(req->src);
1256		} else {
1257			dst_dma = edesc->link_tbl_dma +
1258				sizeof(struct link_tbl_entry);
1259			out_options = LDST_SGF;
1260		}
1261	} else {
1262		if (!edesc->dst_nents) {
1263			dst_dma = sg_dma_address(req->dst);
1264		} else {
1265			dst_dma = edesc->link_tbl_dma +
1266				link_tbl_index * sizeof(struct link_tbl_entry);
1267			out_options = LDST_SGF;
1268		}
1269	}
1270	append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1271}
1272
1273/*
1274 * derive number of elements in scatterlist
1275 */
1276static int sg_count(struct scatterlist *sg_list, int nbytes)
1277{
1278	struct scatterlist *sg = sg_list;
1279	int sg_nents = 0;
1280
1281	while (nbytes > 0) {
1282		sg_nents++;
1283		nbytes -= sg->length;
1284		if (!sg_is_last(sg) && (sg + 1)->length == 0)
1285			BUG(); /* Not support chaining */
1286		sg = scatterwalk_sg_next(sg);
1287	}
1288
1289	if (likely(sg_nents == 1))
1290		return 0;
1291
1292	return sg_nents;
1293}
1294
1295/*
1296 * allocate and map the aead extended descriptor
1297 */
1298static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1299					   int desc_bytes, bool *all_contig_ptr)
 
1300{
1301	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1302	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1303	struct device *jrdev = ctx->jrdev;
1304	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1305		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1306	int assoc_nents, src_nents, dst_nents = 0;
 
 
1307	struct aead_edesc *edesc;
1308	dma_addr_t iv_dma = 0;
1309	int sgc;
1310	bool all_contig = true;
1311	int ivsize = crypto_aead_ivsize(aead);
1312	int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
1313
1314	assoc_nents = sg_count(req->assoc, req->assoclen);
1315	src_nents = sg_count(req->src, req->cryptlen);
 
 
 
 
 
 
 
 
1316
1317	if (unlikely(req->dst != req->src))
1318		dst_nents = sg_count(req->dst, req->cryptlen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1319
1320	sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
1321			 DMA_BIDIRECTIONAL);
1322	if (likely(req->src == req->dst)) {
1323		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1324				 DMA_BIDIRECTIONAL);
 
 
 
 
1325	} else {
1326		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1327				 DMA_TO_DEVICE);
1328		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1329				 DMA_FROM_DEVICE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1330	}
1331
1332	/* Check if data are contiguous */
1333	iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1334	if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1335	    iv_dma || src_nents || iv_dma + ivsize !=
1336	    sg_dma_address(req->src)) {
1337		all_contig = false;
1338		assoc_nents = assoc_nents ? : 1;
1339		src_nents = src_nents ? : 1;
1340		link_tbl_len = assoc_nents + 1 + src_nents;
1341	}
1342	link_tbl_len += dst_nents;
1343
1344	link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
1345
1346	/* allocate space for base edesc and hw desc commands, link tables */
1347	edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1348			link_tbl_bytes, GFP_DMA | flags);
1349	if (!edesc) {
1350		dev_err(jrdev, "could not allocate extended descriptor\n");
 
1351		return ERR_PTR(-ENOMEM);
1352	}
1353
1354	edesc->assoc_nents = assoc_nents;
1355	edesc->src_nents = src_nents;
1356	edesc->dst_nents = dst_nents;
1357	edesc->iv_dma = iv_dma;
1358	edesc->link_tbl_bytes = link_tbl_bytes;
1359	edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
1360			  desc_bytes;
1361	edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
1362					     link_tbl_bytes, DMA_TO_DEVICE);
1363	*all_contig_ptr = all_contig;
1364
1365	link_tbl_index = 0;
1366	if (!all_contig) {
1367		sg_to_link_tbl(req->assoc,
1368			       (assoc_nents ? : 1),
1369			       edesc->link_tbl +
1370			       link_tbl_index, 0);
1371		link_tbl_index += assoc_nents ? : 1;
1372		sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
1373				   iv_dma, ivsize, 0);
1374		link_tbl_index += 1;
1375		sg_to_link_tbl_last(req->src,
1376				    (src_nents ? : 1),
1377				    edesc->link_tbl +
1378				    link_tbl_index, 0);
1379		link_tbl_index += src_nents ? : 1;
1380	}
1381	if (dst_nents) {
1382		sg_to_link_tbl_last(req->dst, dst_nents,
1383				    edesc->link_tbl + link_tbl_index, 0);
 
 
 
1384	}
1385
 
 
1386	return edesc;
1387}
1388
1389static int aead_encrypt(struct aead_request *req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1390{
1391	struct aead_edesc *edesc;
1392	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1393	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1394	struct device *jrdev = ctx->jrdev;
1395	bool all_contig;
1396	u32 *desc;
1397	int ret = 0;
1398
1399	req->cryptlen += ctx->authsize;
1400
1401	/* allocate extended descriptor */
1402	edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1403				 CAAM_CMD_SZ, &all_contig);
1404	if (IS_ERR(edesc))
1405		return PTR_ERR(edesc);
1406
1407	/* Create and submit job descriptor */
1408	init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1409		      all_contig, true);
1410#ifdef DEBUG
1411	print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1412		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1413		       desc_bytes(edesc->hw_desc), 1);
1414#endif
 
1415
1416	desc = edesc->hw_desc;
1417	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1418	if (!ret) {
1419		ret = -EINPROGRESS;
1420	} else {
1421		aead_unmap(jrdev, edesc, req);
1422		kfree(edesc);
1423	}
1424
1425	return ret;
 
 
1426}
1427
1428static int aead_decrypt(struct aead_request *req)
1429{
1430	struct aead_edesc *edesc;
1431	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1432	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1433	struct device *jrdev = ctx->jrdev;
1434	bool all_contig;
1435	u32 *desc;
1436	int ret = 0;
1437
1438	/* allocate extended descriptor */
1439	edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1440				 CAAM_CMD_SZ, &all_contig);
1441	if (IS_ERR(edesc))
1442		return PTR_ERR(edesc);
1443
1444#ifdef DEBUG
1445	print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
1446		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1447		       req->cryptlen, 1);
1448#endif
1449
1450	/* Create and submit job descriptor*/
1451	init_aead_job(ctx->sh_desc_dec,
1452		      ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1453#ifdef DEBUG
1454	print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1455		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1456		       desc_bytes(edesc->hw_desc), 1);
1457#endif
1458
1459	desc = edesc->hw_desc;
1460	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1461	if (!ret) {
1462		ret = -EINPROGRESS;
1463	} else {
1464		aead_unmap(jrdev, edesc, req);
1465		kfree(edesc);
1466	}
1467
1468	return ret;
 
 
1469}
1470
1471/*
1472 * allocate and map the aead extended descriptor for aead givencrypt
1473 */
1474static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1475					       *greq, int desc_bytes,
1476					       u32 *contig_ptr)
1477{
1478	struct aead_request *req = &greq->areq;
1479	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1480	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1481	struct device *jrdev = ctx->jrdev;
1482	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1483		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1484	int assoc_nents, src_nents, dst_nents = 0;
1485	struct aead_edesc *edesc;
1486	dma_addr_t iv_dma = 0;
1487	int sgc;
1488	u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1489	int ivsize = crypto_aead_ivsize(aead);
1490	int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
1491
1492	assoc_nents = sg_count(req->assoc, req->assoclen);
1493	src_nents = sg_count(req->src, req->cryptlen);
1494
1495	if (unlikely(req->dst != req->src))
1496		dst_nents = sg_count(req->dst, req->cryptlen);
 
 
1497
1498	sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
1499			 DMA_BIDIRECTIONAL);
1500	if (likely(req->src == req->dst)) {
1501		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1502				 DMA_BIDIRECTIONAL);
1503	} else {
1504		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1505				 DMA_TO_DEVICE);
1506		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1507				 DMA_FROM_DEVICE);
1508	}
1509
1510	/* Check if data are contiguous */
1511	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1512	if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1513	    iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1514		contig &= ~GIV_SRC_CONTIG;
1515	if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1516		contig &= ~GIV_DST_CONTIG;
1517		if (unlikely(req->src != req->dst)) {
1518			dst_nents = dst_nents ? : 1;
1519			link_tbl_len += 1;
1520		}
1521	if (!(contig & GIV_SRC_CONTIG)) {
1522		assoc_nents = assoc_nents ? : 1;
1523		src_nents = src_nents ? : 1;
1524		link_tbl_len += assoc_nents + 1 + src_nents;
1525		if (likely(req->src == req->dst))
1526			contig &= ~GIV_DST_CONTIG;
1527	}
1528	link_tbl_len += dst_nents;
1529
1530	link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
 
1531
1532	/* allocate space for base edesc and hw desc commands, link tables */
1533	edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1534			link_tbl_bytes, GFP_DMA | flags);
1535	if (!edesc) {
1536		dev_err(jrdev, "could not allocate extended descriptor\n");
1537		return ERR_PTR(-ENOMEM);
1538	}
1539
1540	edesc->assoc_nents = assoc_nents;
1541	edesc->src_nents = src_nents;
1542	edesc->dst_nents = dst_nents;
1543	edesc->iv_dma = iv_dma;
1544	edesc->link_tbl_bytes = link_tbl_bytes;
1545	edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
1546			  desc_bytes;
1547	edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
1548					     link_tbl_bytes, DMA_TO_DEVICE);
1549	*contig_ptr = contig;
1550
1551	link_tbl_index = 0;
1552	if (!(contig & GIV_SRC_CONTIG)) {
1553		sg_to_link_tbl(req->assoc, assoc_nents,
1554			       edesc->link_tbl +
1555			       link_tbl_index, 0);
1556		link_tbl_index += assoc_nents;
1557		sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
1558				   iv_dma, ivsize, 0);
1559		link_tbl_index += 1;
1560		sg_to_link_tbl_last(req->src, src_nents,
1561				    edesc->link_tbl +
1562				    link_tbl_index, 0);
1563		link_tbl_index += src_nents;
1564	}
1565	if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1566		sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
1567				   iv_dma, ivsize, 0);
1568		link_tbl_index += 1;
1569		sg_to_link_tbl_last(req->dst, dst_nents,
1570				    edesc->link_tbl + link_tbl_index, 0);
1571	}
1572
1573	return edesc;
1574}
1575
1576static int aead_givencrypt(struct aead_givcrypt_request *areq)
1577{
1578	struct aead_request *req = &areq->areq;
1579	struct aead_edesc *edesc;
1580	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1581	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1582	struct device *jrdev = ctx->jrdev;
1583	u32 contig;
1584	u32 *desc;
1585	int ret = 0;
1586
1587	req->cryptlen += ctx->authsize;
1588
1589	/* allocate extended descriptor */
1590	edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1591				     CAAM_CMD_SZ, &contig);
1592
1593	if (IS_ERR(edesc))
1594		return PTR_ERR(edesc);
1595
1596#ifdef DEBUG
1597	print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
1598		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1599		       req->cryptlen, 1);
1600#endif
 
 
 
 
1601
1602	/* Create and submit job descriptor*/
1603	init_aead_giv_job(ctx->sh_desc_givenc,
1604			  ctx->sh_desc_givenc_dma, edesc, req, contig);
1605#ifdef DEBUG
1606	print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1607		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1608		       desc_bytes(edesc->hw_desc), 1);
1609#endif
 
1610
1611	desc = edesc->hw_desc;
1612	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1613	if (!ret) {
1614		ret = -EINPROGRESS;
1615	} else {
1616		aead_unmap(jrdev, edesc, req);
1617		kfree(edesc);
1618	}
1619
1620	return ret;
 
 
1621}
1622
1623/*
1624 * allocate and map the ablkcipher extended descriptor for ablkcipher
1625 */
1626static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1627						       *req, int desc_bytes,
1628						       bool *iv_contig_out)
1629{
1630	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1631	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
 
1632	struct device *jrdev = ctx->jrdev;
1633	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1634					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1635		       GFP_KERNEL : GFP_ATOMIC;
1636	int src_nents, dst_nents = 0, link_tbl_bytes;
1637	struct ablkcipher_edesc *edesc;
1638	dma_addr_t iv_dma = 0;
1639	bool iv_contig = false;
1640	int sgc;
1641	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1642	int link_tbl_index;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1643
1644	src_nents = sg_count(req->src, req->nbytes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1645
1646	if (unlikely(req->dst != req->src))
1647		dst_nents = sg_count(req->dst, req->nbytes);
 
 
 
1648
1649	if (likely(req->src == req->dst)) {
1650		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1651				 DMA_BIDIRECTIONAL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1652	} else {
1653		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1654				 DMA_TO_DEVICE);
1655		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1656				 DMA_FROM_DEVICE);
1657	}
1658
 
 
1659	/*
1660	 * Check if iv can be contiguous with source and destination.
1661	 * If so, include it. If not, create scatterlist.
1662	 */
1663	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1664	if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1665		iv_contig = true;
1666	else
1667		src_nents = src_nents ? : 1;
1668	link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1669			 sizeof(struct link_tbl_entry);
1670
1671	/* allocate space for base edesc and hw desc commands, link tables */
1672	edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1673			link_tbl_bytes, GFP_DMA | flags);
1674	if (!edesc) {
1675		dev_err(jrdev, "could not allocate extended descriptor\n");
 
 
1676		return ERR_PTR(-ENOMEM);
1677	}
1678
1679	edesc->src_nents = src_nents;
1680	edesc->dst_nents = dst_nents;
1681	edesc->link_tbl_bytes = link_tbl_bytes;
1682	edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1683			  desc_bytes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1684
1685	link_tbl_index = 0;
1686	if (!iv_contig) {
1687		sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0);
1688		sg_to_link_tbl_last(req->src, src_nents,
1689				    edesc->link_tbl + 1, 0);
1690		link_tbl_index += 1 + src_nents;
1691	}
1692
1693	if (unlikely(dst_nents)) {
1694		sg_to_link_tbl_last(req->dst, dst_nents,
1695			edesc->link_tbl + link_tbl_index, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1696	}
1697
1698	edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
1699					     link_tbl_bytes, DMA_TO_DEVICE);
1700	edesc->iv_dma = iv_dma;
1701
1702#ifdef DEBUG
1703	print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ",
1704		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
1705		       link_tbl_bytes, 1);
1706#endif
1707
1708	*iv_contig_out = iv_contig;
1709	return edesc;
1710}
1711
1712static int ablkcipher_encrypt(struct ablkcipher_request *req)
1713{
1714	struct ablkcipher_edesc *edesc;
1715	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1716	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1717	struct device *jrdev = ctx->jrdev;
1718	bool iv_contig;
1719	u32 *desc;
1720	int ret = 0;
1721
1722	/* allocate extended descriptor */
1723	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1724				       CAAM_CMD_SZ, &iv_contig);
1725	if (IS_ERR(edesc))
1726		return PTR_ERR(edesc);
1727
1728	/* Create and submit job descriptor*/
1729	init_ablkcipher_job(ctx->sh_desc_enc,
1730		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1731#ifdef DEBUG
1732	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1733		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1734		       desc_bytes(edesc->hw_desc), 1);
1735#endif
1736	desc = edesc->hw_desc;
1737	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1738
1739	if (!ret) {
1740		ret = -EINPROGRESS;
 
1741	} else {
1742		ablkcipher_unmap(jrdev, edesc, req);
1743		kfree(edesc);
1744	}
1745
1746	return ret;
1747}
1748
1749static int ablkcipher_decrypt(struct ablkcipher_request *req)
1750{
1751	struct ablkcipher_edesc *edesc;
1752	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1753	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
 
 
 
 
 
 
 
 
1754	struct device *jrdev = ctx->jrdev;
1755	bool iv_contig;
 
1756	u32 *desc;
1757	int ret = 0;
1758
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1759	/* allocate extended descriptor */
1760	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1761				       CAAM_CMD_SZ, &iv_contig);
1762	if (IS_ERR(edesc))
1763		return PTR_ERR(edesc);
1764
1765	/* Create and submit job descriptor*/
1766	init_ablkcipher_job(ctx->sh_desc_dec,
1767		ctx->sh_desc_dec_dma, edesc, req, iv_contig);
 
 
 
 
1768	desc = edesc->hw_desc;
1769#ifdef DEBUG
1770	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1771		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1772		       desc_bytes(edesc->hw_desc), 1);
1773#endif
1774
1775	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1776	if (!ret) {
1777		ret = -EINPROGRESS;
1778	} else {
1779		ablkcipher_unmap(jrdev, edesc, req);
 
 
1780		kfree(edesc);
1781	}
1782
1783	return ret;
1784}
1785
1786#define template_aead		template_u.aead
1787#define template_ablkcipher	template_u.ablkcipher
1788struct caam_alg_template {
1789	char name[CRYPTO_MAX_ALG_NAME];
1790	char driver_name[CRYPTO_MAX_ALG_NAME];
1791	unsigned int blocksize;
1792	u32 type;
1793	union {
1794		struct ablkcipher_alg ablkcipher;
1795		struct aead_alg aead;
1796		struct blkcipher_alg blkcipher;
1797		struct cipher_alg cipher;
1798		struct compress_alg compress;
1799		struct rng_alg rng;
1800	} template_u;
1801	u32 class1_alg_type;
1802	u32 class2_alg_type;
1803	u32 alg_op;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1804};
1805
1806static struct caam_alg_template driver_algs[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1807	/* single-pass ipsec_esp descriptor */
1808	{
1809		.name = "authenc(hmac(sha1),cbc(aes))",
1810		.driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1811		.blocksize = AES_BLOCK_SIZE,
1812		.type = CRYPTO_ALG_TYPE_AEAD,
1813		.template_aead = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1814			.setkey = aead_setkey,
1815			.setauthsize = aead_setauthsize,
1816			.encrypt = aead_encrypt,
1817			.decrypt = aead_decrypt,
1818			.givencrypt = aead_givencrypt,
1819			.geniv = "<built-in>",
1820			.ivsize = AES_BLOCK_SIZE,
1821			.maxauthsize = SHA1_DIGEST_SIZE,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1822			},
1823		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1824		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1825		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
 
 
 
 
 
 
 
 
 
 
 
 
1826	},
1827	{
1828		.name = "authenc(hmac(sha256),cbc(aes))",
1829		.driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1830		.blocksize = AES_BLOCK_SIZE,
1831		.type = CRYPTO_ALG_TYPE_AEAD,
1832		.template_aead = {
 
 
 
1833			.setkey = aead_setkey,
1834			.setauthsize = aead_setauthsize,
1835			.encrypt = aead_encrypt,
1836			.decrypt = aead_decrypt,
1837			.givencrypt = aead_givencrypt,
1838			.geniv = "<built-in>",
1839			.ivsize = AES_BLOCK_SIZE,
1840			.maxauthsize = SHA256_DIGEST_SIZE,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1841			},
1842		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1843		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1844				   OP_ALG_AAI_HMAC_PRECOMP,
1845		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
 
 
 
 
 
 
 
 
 
 
 
 
1846	},
1847	{
1848		.name = "authenc(hmac(sha512),cbc(aes))",
1849		.driver_name = "authenc-hmac-sha512-cbc-aes-caam",
1850		.blocksize = AES_BLOCK_SIZE,
1851		.type = CRYPTO_ALG_TYPE_AEAD,
1852		.template_aead = {
 
 
1853			.setkey = aead_setkey,
1854			.setauthsize = aead_setauthsize,
1855			.encrypt = aead_encrypt,
1856			.decrypt = aead_decrypt,
1857			.givencrypt = aead_givencrypt,
1858			.geniv = "<built-in>",
1859			.ivsize = AES_BLOCK_SIZE,
1860			.maxauthsize = SHA512_DIGEST_SIZE,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1861			},
1862		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1863		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1864				   OP_ALG_AAI_HMAC_PRECOMP,
1865		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
 
 
 
 
 
 
 
 
 
 
 
 
1866	},
1867	{
1868		.name = "authenc(hmac(sha1),cbc(des3_ede))",
1869		.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
1870		.blocksize = DES3_EDE_BLOCK_SIZE,
1871		.type = CRYPTO_ALG_TYPE_AEAD,
1872		.template_aead = {
1873			.setkey = aead_setkey,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1874			.setauthsize = aead_setauthsize,
1875			.encrypt = aead_encrypt,
1876			.decrypt = aead_decrypt,
1877			.givencrypt = aead_givencrypt,
1878			.geniv = "<built-in>",
1879			.ivsize = DES3_EDE_BLOCK_SIZE,
1880			.maxauthsize = SHA1_DIGEST_SIZE,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1881			},
1882		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1883		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1884		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
 
 
 
 
 
 
 
 
 
 
 
 
1885	},
1886	{
1887		.name = "authenc(hmac(sha256),cbc(des3_ede))",
1888		.driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
1889		.blocksize = DES3_EDE_BLOCK_SIZE,
1890		.type = CRYPTO_ALG_TYPE_AEAD,
1891		.template_aead = {
1892			.setkey = aead_setkey,
 
 
 
 
1893			.setauthsize = aead_setauthsize,
1894			.encrypt = aead_encrypt,
1895			.decrypt = aead_decrypt,
1896			.givencrypt = aead_givencrypt,
1897			.geniv = "<built-in>",
1898			.ivsize = DES3_EDE_BLOCK_SIZE,
1899			.maxauthsize = SHA256_DIGEST_SIZE,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1900			},
1901		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1902		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1903				   OP_ALG_AAI_HMAC_PRECOMP,
1904		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
 
 
 
 
 
 
 
 
 
 
 
 
1905	},
1906	{
1907		.name = "authenc(hmac(sha512),cbc(des3_ede))",
1908		.driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
1909		.blocksize = DES3_EDE_BLOCK_SIZE,
1910		.type = CRYPTO_ALG_TYPE_AEAD,
1911		.template_aead = {
1912			.setkey = aead_setkey,
 
 
 
1913			.setauthsize = aead_setauthsize,
1914			.encrypt = aead_encrypt,
1915			.decrypt = aead_decrypt,
1916			.givencrypt = aead_givencrypt,
1917			.geniv = "<built-in>",
1918			.ivsize = DES3_EDE_BLOCK_SIZE,
1919			.maxauthsize = SHA512_DIGEST_SIZE,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1920			},
1921		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1922		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1923				   OP_ALG_AAI_HMAC_PRECOMP,
1924		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
 
 
 
 
 
 
 
 
 
 
 
 
1925	},
1926	{
1927		.name = "authenc(hmac(sha1),cbc(des))",
1928		.driver_name = "authenc-hmac-sha1-cbc-des-caam",
1929		.blocksize = DES_BLOCK_SIZE,
1930		.type = CRYPTO_ALG_TYPE_AEAD,
1931		.template_aead = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1932			.setkey = aead_setkey,
1933			.setauthsize = aead_setauthsize,
1934			.encrypt = aead_encrypt,
1935			.decrypt = aead_decrypt,
1936			.givencrypt = aead_givencrypt,
1937			.geniv = "<built-in>",
1938			.ivsize = DES_BLOCK_SIZE,
1939			.maxauthsize = SHA1_DIGEST_SIZE,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1940			},
1941		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1942		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1943		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
 
 
 
 
 
 
 
 
 
 
 
 
1944	},
1945	{
1946		.name = "authenc(hmac(sha256),cbc(des))",
1947		.driver_name = "authenc-hmac-sha256-cbc-des-caam",
1948		.blocksize = DES_BLOCK_SIZE,
1949		.type = CRYPTO_ALG_TYPE_AEAD,
1950		.template_aead = {
 
 
 
1951			.setkey = aead_setkey,
1952			.setauthsize = aead_setauthsize,
1953			.encrypt = aead_encrypt,
1954			.decrypt = aead_decrypt,
1955			.givencrypt = aead_givencrypt,
1956			.geniv = "<built-in>",
1957			.ivsize = DES_BLOCK_SIZE,
1958			.maxauthsize = SHA256_DIGEST_SIZE,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1959			},
1960		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1961		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1962				   OP_ALG_AAI_HMAC_PRECOMP,
1963		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
 
 
 
 
 
 
 
 
 
 
 
 
1964	},
1965	{
1966		.name = "authenc(hmac(sha512),cbc(des))",
1967		.driver_name = "authenc-hmac-sha512-cbc-des-caam",
1968		.blocksize = DES_BLOCK_SIZE,
1969		.type = CRYPTO_ALG_TYPE_AEAD,
1970		.template_aead = {
 
 
1971			.setkey = aead_setkey,
1972			.setauthsize = aead_setauthsize,
1973			.encrypt = aead_encrypt,
1974			.decrypt = aead_decrypt,
1975			.givencrypt = aead_givencrypt,
1976			.geniv = "<built-in>",
1977			.ivsize = DES_BLOCK_SIZE,
1978			.maxauthsize = SHA512_DIGEST_SIZE,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1979			},
1980		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1981		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1982				   OP_ALG_AAI_HMAC_PRECOMP,
1983		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1984	},
1985	/* ablkcipher descriptor */
1986	{
1987		.name = "cbc(aes)",
1988		.driver_name = "cbc-aes-caam",
1989		.blocksize = AES_BLOCK_SIZE,
1990		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1991		.template_ablkcipher = {
1992			.setkey = ablkcipher_setkey,
1993			.encrypt = ablkcipher_encrypt,
1994			.decrypt = ablkcipher_decrypt,
1995			.geniv = "eseqiv",
1996			.min_keysize = AES_MIN_KEY_SIZE,
1997			.max_keysize = AES_MAX_KEY_SIZE,
1998			.ivsize = AES_BLOCK_SIZE,
 
 
 
 
 
 
 
 
1999			},
2000		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2001	},
2002	{
2003		.name = "cbc(des3_ede)",
2004		.driver_name = "cbc-3des-caam",
2005		.blocksize = DES3_EDE_BLOCK_SIZE,
2006		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2007		.template_ablkcipher = {
2008			.setkey = ablkcipher_setkey,
2009			.encrypt = ablkcipher_encrypt,
2010			.decrypt = ablkcipher_decrypt,
2011			.geniv = "eseqiv",
2012			.min_keysize = DES3_EDE_KEY_SIZE,
2013			.max_keysize = DES3_EDE_KEY_SIZE,
2014			.ivsize = DES3_EDE_BLOCK_SIZE,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2015			},
2016		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2017	},
2018	{
2019		.name = "cbc(des)",
2020		.driver_name = "cbc-des-caam",
2021		.blocksize = DES_BLOCK_SIZE,
2022		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2023		.template_ablkcipher = {
2024			.setkey = ablkcipher_setkey,
2025			.encrypt = ablkcipher_encrypt,
2026			.decrypt = ablkcipher_decrypt,
2027			.geniv = "eseqiv",
2028			.min_keysize = DES_KEY_SIZE,
2029			.max_keysize = DES_KEY_SIZE,
2030			.ivsize = DES_BLOCK_SIZE,
2031			},
2032		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2033	}
2034};
2035
2036struct caam_crypto_alg {
2037	struct list_head entry;
2038	struct device *ctrldev;
2039	int class1_alg_type;
2040	int class2_alg_type;
2041	int alg_op;
2042	struct crypto_alg crypto_alg;
2043};
2044
2045static int caam_cra_init(struct crypto_tfm *tfm)
2046{
2047	struct crypto_alg *alg = tfm->__crt_alg;
2048	struct caam_crypto_alg *caam_alg =
2049		 container_of(alg, struct caam_crypto_alg, crypto_alg);
2050	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2051	struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
2052	int tgt_jr = atomic_inc_return(&priv->tfm_count);
 
 
2053
2054	/*
2055	 * distribute tfms across job rings to ensure in-order
2056	 * crypto request processing per tfm
2057	 */
2058	ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi];
 
2059
2060	/* copy descriptor header template value */
2061	ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2062	ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2063	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2064
2065	return 0;
2066}
2067
2068static void caam_cra_exit(struct crypto_tfm *tfm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2069{
2070	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
 
 
 
2071
2072	if (ctx->sh_desc_enc_dma &&
2073	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2074		dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2075				 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2076	if (ctx->sh_desc_dec_dma &&
2077	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2078		dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2079				 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2080	if (ctx->sh_desc_givenc_dma &&
2081	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2082		dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2083				 desc_bytes(ctx->sh_desc_givenc),
2084				 DMA_TO_DEVICE);
2085}
2086
2087static void __exit caam_algapi_exit(void)
2088{
 
 
 
 
 
 
2089
2090	struct device_node *dev_node;
2091	struct platform_device *pdev;
2092	struct device *ctrldev;
2093	struct caam_drv_private *priv;
2094	struct caam_crypto_alg *t_alg, *n;
2095	int i, err;
2096
2097	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2098	if (!dev_node)
2099		return;
 
2100
2101	pdev = of_find_device_by_node(dev_node);
2102	if (!pdev)
2103		return;
 
2104
2105	ctrldev = &pdev->dev;
2106	of_node_put(dev_node);
2107	priv = dev_get_drvdata(ctrldev);
2108
2109	if (!priv->alg_list.next)
2110		return;
2111
2112	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2113		crypto_unregister_alg(&t_alg->crypto_alg);
2114		list_del(&t_alg->entry);
2115		kfree(t_alg);
2116	}
2117
2118	for (i = 0; i < priv->total_jobrs; i++) {
2119		err = caam_jr_deregister(priv->algapi_jr[i]);
2120		if (err < 0)
2121			break;
 
2122	}
2123	kfree(priv->algapi_jr);
2124}
2125
2126static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2127					      struct caam_alg_template
2128					      *template)
2129{
2130	struct caam_crypto_alg *t_alg;
2131	struct crypto_alg *alg;
2132
2133	t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2134	if (!t_alg) {
2135		dev_err(ctrldev, "failed to allocate t_alg\n");
2136		return ERR_PTR(-ENOMEM);
2137	}
 
 
 
 
 
 
 
 
2138
2139	alg = &t_alg->crypto_alg;
 
 
 
 
2140
2141	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2142	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2143		 template->driver_name);
2144	alg->cra_module = THIS_MODULE;
2145	alg->cra_init = caam_cra_init;
2146	alg->cra_exit = caam_cra_exit;
2147	alg->cra_priority = CAAM_CRA_PRIORITY;
2148	alg->cra_blocksize = template->blocksize;
2149	alg->cra_alignmask = 0;
2150	alg->cra_ctxsize = sizeof(struct caam_ctx);
2151	alg->cra_flags = CRYPTO_ALG_ASYNC | template->type;
2152	switch (template->type) {
2153	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2154		alg->cra_type = &crypto_ablkcipher_type;
2155		alg->cra_ablkcipher = template->template_ablkcipher;
2156		break;
2157	case CRYPTO_ALG_TYPE_AEAD:
2158		alg->cra_type = &crypto_aead_type;
2159		alg->cra_aead = template->template_aead;
2160		break;
2161	}
2162
2163	t_alg->class1_alg_type = template->class1_alg_type;
2164	t_alg->class2_alg_type = template->class2_alg_type;
2165	t_alg->alg_op = template->alg_op;
2166	t_alg->ctrldev = ctrldev;
2167
2168	return t_alg;
2169}
2170
2171static int __init caam_algapi_init(void)
2172{
2173	struct device_node *dev_node;
2174	struct platform_device *pdev;
2175	struct device *ctrldev, **jrdev;
2176	struct caam_drv_private *priv;
2177	int i = 0, err = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2178
2179	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2180	if (!dev_node)
2181		return -ENODEV;
2182
2183	pdev = of_find_device_by_node(dev_node);
2184	if (!pdev)
2185		return -ENODEV;
2186
2187	ctrldev = &pdev->dev;
2188	priv = dev_get_drvdata(ctrldev);
2189	of_node_put(dev_node);
2190
2191	INIT_LIST_HEAD(&priv->alg_list);
 
2192
2193	jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL);
2194	if (!jrdev)
2195		return -ENOMEM;
 
 
2196
2197	for (i = 0; i < priv->total_jobrs; i++) {
2198		err = caam_jr_register(ctrldev, &jrdev[i]);
2199		if (err < 0)
2200			break;
2201	}
2202	if (err < 0 && i == 0) {
2203		dev_err(ctrldev, "algapi error in job ring registration: %d\n",
2204			err);
2205		kfree(jrdev);
2206		return err;
2207	}
2208
2209	priv->num_jrs_for_algapi = i;
2210	priv->algapi_jr = jrdev;
2211	atomic_set(&priv->tfm_count, -1);
2212
2213	/* register crypto algorithms the device supports */
2214	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2215		/* TODO: check if h/w supports alg */
2216		struct caam_crypto_alg *t_alg;
2217
2218		t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
2219		if (IS_ERR(t_alg)) {
2220			err = PTR_ERR(t_alg);
2221			dev_warn(ctrldev, "%s alg allocation failed\n",
2222				 driver_algs[i].driver_name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2223			continue;
2224		}
2225
2226		err = crypto_register_alg(&t_alg->crypto_alg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2227		if (err) {
2228			dev_warn(ctrldev, "%s alg registration failed\n",
2229				t_alg->crypto_alg.cra_driver_name);
2230			kfree(t_alg);
2231		} else {
2232			list_add_tail(&t_alg->entry, &priv->alg_list);
2233			dev_info(ctrldev, "%s\n",
2234				 t_alg->crypto_alg.cra_driver_name);
2235		}
 
 
 
2236	}
2237
 
 
 
2238	return err;
2239}
2240
2241module_init(caam_algapi_init);
2242module_exit(caam_algapi_exit);
2243
2244MODULE_LICENSE("GPL");
2245MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2246MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");