Linux Audio

Check our new training course

Loading...
v5.9
   1/*
   2 * This file is part of the Chelsio T6 Crypto driver for Linux.
   3 *
   4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 *
  34 * Written and Maintained by:
  35 *	Manoj Malviya (manojmalviya@chelsio.com)
  36 *	Atul Gupta (atul.gupta@chelsio.com)
  37 *	Jitendra Lulla (jlulla@chelsio.com)
  38 *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  39 *	Harsh Jain (harsh@chelsio.com)
  40 */
  41
  42#define pr_fmt(fmt) "chcr:" fmt
  43
  44#include <linux/kernel.h>
  45#include <linux/module.h>
  46#include <linux/crypto.h>
  47#include <linux/skbuff.h>
  48#include <linux/rtnetlink.h>
  49#include <linux/highmem.h>
  50#include <linux/scatterlist.h>
  51
  52#include <crypto/aes.h>
  53#include <crypto/algapi.h>
  54#include <crypto/hash.h>
  55#include <crypto/gcm.h>
  56#include <crypto/sha.h>
 
  57#include <crypto/authenc.h>
  58#include <crypto/ctr.h>
  59#include <crypto/gf128mul.h>
  60#include <crypto/internal/aead.h>
  61#include <crypto/null.h>
  62#include <crypto/internal/skcipher.h>
  63#include <crypto/aead.h>
  64#include <crypto/scatterwalk.h>
  65#include <crypto/internal/hash.h>
  66
  67#include "t4fw_api.h"
  68#include "t4_msg.h"
  69#include "chcr_core.h"
  70#include "chcr_algo.h"
  71#include "chcr_crypto.h"
  72
  73#define IV AES_BLOCK_SIZE
  74
  75static unsigned int sgl_ent_len[] = {
  76	0, 0, 16, 24, 40, 48, 64, 72, 88,
  77	96, 112, 120, 136, 144, 160, 168, 184,
  78	192, 208, 216, 232, 240, 256, 264, 280,
  79	288, 304, 312, 328, 336, 352, 360, 376
  80};
  81
  82static unsigned int dsgl_ent_len[] = {
  83	0, 32, 32, 48, 48, 64, 64, 80, 80,
  84	112, 112, 128, 128, 144, 144, 160, 160,
  85	192, 192, 208, 208, 224, 224, 240, 240,
  86	272, 272, 288, 288, 304, 304, 320, 320
  87};
  88
  89static u32 round_constant[11] = {
  90	0x01000000, 0x02000000, 0x04000000, 0x08000000,
  91	0x10000000, 0x20000000, 0x40000000, 0x80000000,
  92	0x1B000000, 0x36000000, 0x6C000000
  93};
  94
  95static int chcr_handle_cipher_resp(struct skcipher_request *req,
  96				   unsigned char *input, int err);
  97
  98static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
  99{
 100	return ctx->crypto_ctx->aeadctx;
 101}
 102
 103static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
 104{
 105	return ctx->crypto_ctx->ablkctx;
 106}
 107
 108static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
 109{
 110	return ctx->crypto_ctx->hmacctx;
 111}
 112
 113static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
 114{
 115	return gctx->ctx->gcm;
 116}
 117
 118static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
 119{
 120	return gctx->ctx->authenc;
 121}
 122
 123static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
 124{
 125	return container_of(ctx->dev, struct uld_ctx, dev);
 126}
 127
 128static inline int is_ofld_imm(const struct sk_buff *skb)
 129{
 130	return (skb->len <= SGE_MAX_WR_LEN);
 131}
 132
 133static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
 134{
 135	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
 136}
 137
 138static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
 139			 unsigned int entlen,
 140			 unsigned int skip)
 141{
 142	int nents = 0;
 143	unsigned int less;
 144	unsigned int skip_len = 0;
 145
 146	while (sg && skip) {
 147		if (sg_dma_len(sg) <= skip) {
 148			skip -= sg_dma_len(sg);
 149			skip_len = 0;
 150			sg = sg_next(sg);
 151		} else {
 152			skip_len = skip;
 153			skip = 0;
 154		}
 155	}
 156
 157	while (sg && reqlen) {
 158		less = min(reqlen, sg_dma_len(sg) - skip_len);
 159		nents += DIV_ROUND_UP(less, entlen);
 160		reqlen -= less;
 161		skip_len = 0;
 162		sg = sg_next(sg);
 163	}
 164	return nents;
 165}
 166
 167static inline int get_aead_subtype(struct crypto_aead *aead)
 168{
 169	struct aead_alg *alg = crypto_aead_alg(aead);
 170	struct chcr_alg_template *chcr_crypto_alg =
 171		container_of(alg, struct chcr_alg_template, alg.aead);
 172	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 173}
 174
 175void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
 176{
 177	u8 temp[SHA512_DIGEST_SIZE];
 178	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 179	int authsize = crypto_aead_authsize(tfm);
 180	struct cpl_fw6_pld *fw6_pld;
 181	int cmp = 0;
 182
 183	fw6_pld = (struct cpl_fw6_pld *)input;
 184	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
 185	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
 186		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
 187	} else {
 188
 189		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
 190				authsize, req->assoclen +
 191				req->cryptlen - authsize);
 192		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
 193	}
 194	if (cmp)
 195		*err = -EBADMSG;
 196	else
 197		*err = 0;
 198}
 199
 200static int chcr_inc_wrcount(struct chcr_dev *dev)
 201{
 202	if (dev->state == CHCR_DETACH)
 203		return 1;
 204	atomic_inc(&dev->inflight);
 205	return 0;
 206}
 207
 208static inline void chcr_dec_wrcount(struct chcr_dev *dev)
 209{
 210	atomic_dec(&dev->inflight);
 211}
 212
 213static inline int chcr_handle_aead_resp(struct aead_request *req,
 214					 unsigned char *input,
 215					 int err)
 216{
 217	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
 218	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 219	struct chcr_dev *dev = a_ctx(tfm)->dev;
 220
 221	chcr_aead_common_exit(req);
 222	if (reqctx->verify == VERIFY_SW) {
 223		chcr_verify_tag(req, input, &err);
 224		reqctx->verify = VERIFY_HW;
 225	}
 226	chcr_dec_wrcount(dev);
 227	req->base.complete(&req->base, err);
 228
 229	return err;
 230}
 231
 232static void get_aes_decrypt_key(unsigned char *dec_key,
 233				       const unsigned char *key,
 234				       unsigned int keylength)
 235{
 236	u32 temp;
 237	u32 w_ring[MAX_NK];
 238	int i, j, k;
 239	u8  nr, nk;
 240
 241	switch (keylength) {
 242	case AES_KEYLENGTH_128BIT:
 243		nk = KEYLENGTH_4BYTES;
 244		nr = NUMBER_OF_ROUNDS_10;
 245		break;
 246	case AES_KEYLENGTH_192BIT:
 247		nk = KEYLENGTH_6BYTES;
 248		nr = NUMBER_OF_ROUNDS_12;
 249		break;
 250	case AES_KEYLENGTH_256BIT:
 251		nk = KEYLENGTH_8BYTES;
 252		nr = NUMBER_OF_ROUNDS_14;
 253		break;
 254	default:
 255		return;
 256	}
 257	for (i = 0; i < nk; i++)
 258		w_ring[i] = get_unaligned_be32(&key[i * 4]);
 259
 260	i = 0;
 261	temp = w_ring[nk - 1];
 262	while (i + nk < (nr + 1) * 4) {
 263		if (!(i % nk)) {
 264			/* RotWord(temp) */
 265			temp = (temp << 8) | (temp >> 24);
 266			temp = aes_ks_subword(temp);
 267			temp ^= round_constant[i / nk];
 268		} else if (nk == 8 && (i % 4 == 0)) {
 269			temp = aes_ks_subword(temp);
 270		}
 271		w_ring[i % nk] ^= temp;
 272		temp = w_ring[i % nk];
 273		i++;
 274	}
 275	i--;
 276	for (k = 0, j = i % nk; k < nk; k++) {
 277		put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
 278		j--;
 279		if (j < 0)
 280			j += nk;
 281	}
 282}
 283
 284static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
 285{
 286	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
 287
 288	switch (ds) {
 289	case SHA1_DIGEST_SIZE:
 290		base_hash = crypto_alloc_shash("sha1", 0, 0);
 291		break;
 292	case SHA224_DIGEST_SIZE:
 293		base_hash = crypto_alloc_shash("sha224", 0, 0);
 294		break;
 295	case SHA256_DIGEST_SIZE:
 296		base_hash = crypto_alloc_shash("sha256", 0, 0);
 297		break;
 298	case SHA384_DIGEST_SIZE:
 299		base_hash = crypto_alloc_shash("sha384", 0, 0);
 300		break;
 301	case SHA512_DIGEST_SIZE:
 302		base_hash = crypto_alloc_shash("sha512", 0, 0);
 303		break;
 304	}
 305
 306	return base_hash;
 307}
 308
 309static int chcr_compute_partial_hash(struct shash_desc *desc,
 310				     char *iopad, char *result_hash,
 311				     int digest_size)
 312{
 313	struct sha1_state sha1_st;
 314	struct sha256_state sha256_st;
 315	struct sha512_state sha512_st;
 316	int error;
 317
 318	if (digest_size == SHA1_DIGEST_SIZE) {
 319		error = crypto_shash_init(desc) ?:
 320			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
 321			crypto_shash_export(desc, (void *)&sha1_st);
 322		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
 323	} else if (digest_size == SHA224_DIGEST_SIZE) {
 324		error = crypto_shash_init(desc) ?:
 325			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 326			crypto_shash_export(desc, (void *)&sha256_st);
 327		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 328
 329	} else if (digest_size == SHA256_DIGEST_SIZE) {
 330		error = crypto_shash_init(desc) ?:
 331			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 332			crypto_shash_export(desc, (void *)&sha256_st);
 333		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 334
 335	} else if (digest_size == SHA384_DIGEST_SIZE) {
 336		error = crypto_shash_init(desc) ?:
 337			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 338			crypto_shash_export(desc, (void *)&sha512_st);
 339		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 340
 341	} else if (digest_size == SHA512_DIGEST_SIZE) {
 342		error = crypto_shash_init(desc) ?:
 343			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 344			crypto_shash_export(desc, (void *)&sha512_st);
 345		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 346	} else {
 347		error = -EINVAL;
 348		pr_err("Unknown digest size %d\n", digest_size);
 349	}
 350	return error;
 351}
 352
 353static void chcr_change_order(char *buf, int ds)
 354{
 355	int i;
 356
 357	if (ds == SHA512_DIGEST_SIZE) {
 358		for (i = 0; i < (ds / sizeof(u64)); i++)
 359			*((__be64 *)buf + i) =
 360				cpu_to_be64(*((u64 *)buf + i));
 361	} else {
 362		for (i = 0; i < (ds / sizeof(u32)); i++)
 363			*((__be32 *)buf + i) =
 364				cpu_to_be32(*((u32 *)buf + i));
 365	}
 366}
 367
 368static inline int is_hmac(struct crypto_tfm *tfm)
 369{
 370	struct crypto_alg *alg = tfm->__crt_alg;
 371	struct chcr_alg_template *chcr_crypto_alg =
 372		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
 373			     alg.hash);
 374	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
 375		return 1;
 376	return 0;
 377}
 378
 379static inline void dsgl_walk_init(struct dsgl_walk *walk,
 380				   struct cpl_rx_phys_dsgl *dsgl)
 381{
 382	walk->dsgl = dsgl;
 383	walk->nents = 0;
 384	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
 385}
 386
 387static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
 388				 int pci_chan_id)
 389{
 390	struct cpl_rx_phys_dsgl *phys_cpl;
 391
 392	phys_cpl = walk->dsgl;
 393
 394	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
 395				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
 396	phys_cpl->pcirlxorder_to_noofsgentr =
 397		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
 398		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
 399		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
 400		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
 401		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
 402		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
 403	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
 404	phys_cpl->rss_hdr_int.qid = htons(qid);
 405	phys_cpl->rss_hdr_int.hash_val = 0;
 406	phys_cpl->rss_hdr_int.channel = pci_chan_id;
 407}
 408
 409static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
 410					size_t size,
 411					dma_addr_t addr)
 412{
 413	int j;
 414
 415	if (!size)
 416		return;
 417	j = walk->nents;
 418	walk->to->len[j % 8] = htons(size);
 419	walk->to->addr[j % 8] = cpu_to_be64(addr);
 420	j++;
 421	if ((j % 8) == 0)
 422		walk->to++;
 423	walk->nents = j;
 424}
 425
 426static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
 427			   struct scatterlist *sg,
 428			      unsigned int slen,
 429			      unsigned int skip)
 430{
 431	int skip_len = 0;
 432	unsigned int left_size = slen, len = 0;
 433	unsigned int j = walk->nents;
 434	int offset, ent_len;
 435
 436	if (!slen)
 437		return;
 438	while (sg && skip) {
 439		if (sg_dma_len(sg) <= skip) {
 440			skip -= sg_dma_len(sg);
 441			skip_len = 0;
 442			sg = sg_next(sg);
 443		} else {
 444			skip_len = skip;
 445			skip = 0;
 446		}
 447	}
 448
 449	while (left_size && sg) {
 450		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 451		offset = 0;
 452		while (len) {
 453			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
 454			walk->to->len[j % 8] = htons(ent_len);
 455			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
 456						      offset + skip_len);
 457			offset += ent_len;
 458			len -= ent_len;
 459			j++;
 460			if ((j % 8) == 0)
 461				walk->to++;
 462		}
 463		walk->last_sg = sg;
 464		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
 465					  skip_len) + skip_len;
 466		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 467		skip_len = 0;
 468		sg = sg_next(sg);
 469	}
 470	walk->nents = j;
 471}
 472
 473static inline void ulptx_walk_init(struct ulptx_walk *walk,
 474				   struct ulptx_sgl *ulp)
 475{
 476	walk->sgl = ulp;
 477	walk->nents = 0;
 478	walk->pair_idx = 0;
 479	walk->pair = ulp->sge;
 480	walk->last_sg = NULL;
 481	walk->last_sg_len = 0;
 482}
 483
 484static inline void ulptx_walk_end(struct ulptx_walk *walk)
 485{
 486	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
 487			      ULPTX_NSGE_V(walk->nents));
 488}
 489
 490
 491static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
 492					size_t size,
 493					dma_addr_t addr)
 494{
 495	if (!size)
 496		return;
 497
 498	if (walk->nents == 0) {
 499		walk->sgl->len0 = cpu_to_be32(size);
 500		walk->sgl->addr0 = cpu_to_be64(addr);
 501	} else {
 502		walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
 503		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
 504		walk->pair_idx = !walk->pair_idx;
 505		if (!walk->pair_idx)
 506			walk->pair++;
 507	}
 508	walk->nents++;
 509}
 510
 511static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
 512					struct scatterlist *sg,
 513			       unsigned int len,
 514			       unsigned int skip)
 515{
 516	int small;
 517	int skip_len = 0;
 518	unsigned int sgmin;
 519
 520	if (!len)
 521		return;
 522	while (sg && skip) {
 523		if (sg_dma_len(sg) <= skip) {
 524			skip -= sg_dma_len(sg);
 525			skip_len = 0;
 526			sg = sg_next(sg);
 527		} else {
 528			skip_len = skip;
 529			skip = 0;
 530		}
 531	}
 532	WARN(!sg, "SG should not be null here\n");
 533	if (sg && (walk->nents == 0)) {
 534		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
 535		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 536		walk->sgl->len0 = cpu_to_be32(sgmin);
 537		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
 538		walk->nents++;
 539		len -= sgmin;
 540		walk->last_sg = sg;
 541		walk->last_sg_len = sgmin + skip_len;
 542		skip_len += sgmin;
 543		if (sg_dma_len(sg) == skip_len) {
 544			sg = sg_next(sg);
 545			skip_len = 0;
 546		}
 547	}
 548
 549	while (sg && len) {
 550		small = min(sg_dma_len(sg) - skip_len, len);
 551		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 552		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
 553		walk->pair->addr[walk->pair_idx] =
 554			cpu_to_be64(sg_dma_address(sg) + skip_len);
 555		walk->pair_idx = !walk->pair_idx;
 556		walk->nents++;
 557		if (!walk->pair_idx)
 558			walk->pair++;
 559		len -= sgmin;
 560		skip_len += sgmin;
 561		walk->last_sg = sg;
 562		walk->last_sg_len = skip_len;
 563		if (sg_dma_len(sg) == skip_len) {
 564			sg = sg_next(sg);
 565			skip_len = 0;
 566		}
 567	}
 568}
 569
 570static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
 571{
 572	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 573	struct chcr_alg_template *chcr_crypto_alg =
 574		container_of(alg, struct chcr_alg_template, alg.skcipher);
 575
 576	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 577}
 578
 579static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
 580{
 581	struct adapter *adap = netdev2adap(dev);
 582	struct sge_uld_txq_info *txq_info =
 583		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
 584	struct sge_uld_txq *txq;
 585	int ret = 0;
 586
 587	local_bh_disable();
 588	txq = &txq_info->uldtxq[idx];
 589	spin_lock(&txq->sendq.lock);
 590	if (txq->full)
 591		ret = -1;
 592	spin_unlock(&txq->sendq.lock);
 593	local_bh_enable();
 594	return ret;
 595}
 596
 597static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
 598			       struct _key_ctx *key_ctx)
 599{
 600	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
 601		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
 602	} else {
 603		memcpy(key_ctx->key,
 604		       ablkctx->key + (ablkctx->enckey_len >> 1),
 605		       ablkctx->enckey_len >> 1);
 606		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
 607		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
 608	}
 609	return 0;
 610}
 611
 612static int chcr_hash_ent_in_wr(struct scatterlist *src,
 613			     unsigned int minsg,
 614			     unsigned int space,
 615			     unsigned int srcskip)
 616{
 617	int srclen = 0;
 618	int srcsg = minsg;
 619	int soffset = 0, sless;
 620
 621	if (sg_dma_len(src) == srcskip) {
 622		src = sg_next(src);
 623		srcskip = 0;
 624	}
 625	while (src && space > (sgl_ent_len[srcsg + 1])) {
 626		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
 627							CHCR_SRC_SG_SIZE);
 628		srclen += sless;
 629		soffset += sless;
 630		srcsg++;
 631		if (sg_dma_len(src) == (soffset + srcskip)) {
 632			src = sg_next(src);
 633			soffset = 0;
 634			srcskip = 0;
 635		}
 636	}
 637	return srclen;
 638}
 639
 640static int chcr_sg_ent_in_wr(struct scatterlist *src,
 641			     struct scatterlist *dst,
 642			     unsigned int minsg,
 643			     unsigned int space,
 644			     unsigned int srcskip,
 645			     unsigned int dstskip)
 646{
 647	int srclen = 0, dstlen = 0;
 648	int srcsg = minsg, dstsg = minsg;
 649	int offset = 0, soffset = 0, less, sless = 0;
 650
 651	if (sg_dma_len(src) == srcskip) {
 652		src = sg_next(src);
 653		srcskip = 0;
 654	}
 655	if (sg_dma_len(dst) == dstskip) {
 656		dst = sg_next(dst);
 657		dstskip = 0;
 658	}
 659
 660	while (src && dst &&
 661	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
 662		sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
 663				CHCR_SRC_SG_SIZE);
 664		srclen += sless;
 665		srcsg++;
 666		offset = 0;
 667		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
 668		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
 669			if (srclen <= dstlen)
 670				break;
 671			less = min_t(unsigned int, sg_dma_len(dst) - offset -
 672				     dstskip, CHCR_DST_SG_SIZE);
 673			dstlen += less;
 674			offset += less;
 675			if ((offset + dstskip) == sg_dma_len(dst)) {
 676				dst = sg_next(dst);
 677				offset = 0;
 678			}
 679			dstsg++;
 680			dstskip = 0;
 681		}
 682		soffset += sless;
 683		if ((soffset + srcskip) == sg_dma_len(src)) {
 684			src = sg_next(src);
 685			srcskip = 0;
 686			soffset = 0;
 687		}
 688
 689	}
 690	return min(srclen, dstlen);
 691}
 692
 693static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
 694				struct skcipher_request *req,
 695				u8 *iv,
 696				unsigned short op_type)
 697{
 698	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
 699	int err;
 700
 701	skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
 702	skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
 703				      req->base.complete, req->base.data);
 704	skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
 705				   req->cryptlen, iv);
 706
 707	err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
 708			crypto_skcipher_encrypt(&reqctx->fallback_req);
 709
 710	return err;
 711
 712}
 713
 714static inline int get_qidxs(struct crypto_async_request *req,
 715			    unsigned int *txqidx, unsigned int *rxqidx)
 716{
 717	struct crypto_tfm *tfm = req->tfm;
 718	int ret = 0;
 719
 720	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
 721	case CRYPTO_ALG_TYPE_AEAD:
 722	{
 723		struct aead_request *aead_req =
 724			container_of(req, struct aead_request, base);
 725		struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
 726		*txqidx = reqctx->txqidx;
 727		*rxqidx = reqctx->rxqidx;
 728		break;
 729	}
 730	case CRYPTO_ALG_TYPE_SKCIPHER:
 731	{
 732		struct skcipher_request *sk_req =
 733			container_of(req, struct skcipher_request, base);
 734		struct chcr_skcipher_req_ctx *reqctx =
 735			skcipher_request_ctx(sk_req);
 736		*txqidx = reqctx->txqidx;
 737		*rxqidx = reqctx->rxqidx;
 738		break;
 739	}
 740	case CRYPTO_ALG_TYPE_AHASH:
 741	{
 742		struct ahash_request *ahash_req =
 743			container_of(req, struct ahash_request, base);
 744		struct chcr_ahash_req_ctx *reqctx =
 745			ahash_request_ctx(ahash_req);
 746		*txqidx = reqctx->txqidx;
 747		*rxqidx = reqctx->rxqidx;
 748		break;
 749	}
 750	default:
 751		ret = -EINVAL;
 752		/* should never get here */
 753		BUG();
 754		break;
 755	}
 756	return ret;
 757}
 758
 759static inline void create_wreq(struct chcr_context *ctx,
 760			       struct chcr_wr *chcr_req,
 761			       struct crypto_async_request *req,
 762			       unsigned int imm,
 763			       int hash_sz,
 764			       unsigned int len16,
 765			       unsigned int sc_len,
 766			       unsigned int lcb)
 767{
 768	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 769	unsigned int tx_channel_id, rx_channel_id;
 770	unsigned int txqidx = 0, rxqidx = 0;
 771	unsigned int qid, fid;
 772
 773	get_qidxs(req, &txqidx, &rxqidx);
 774	qid = u_ctx->lldi.rxq_ids[rxqidx];
 775	fid = u_ctx->lldi.rxq_ids[0];
 
 776	tx_channel_id = txqidx / ctx->txq_perchan;
 777	rx_channel_id = rxqidx / ctx->rxq_perchan;
 778
 779
 780	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
 781	chcr_req->wreq.pld_size_hash_size =
 782		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
 783	chcr_req->wreq.len16_pkd =
 784		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
 785	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
 786	chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
 787							    !!lcb, txqidx);
 788
 789	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
 790	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
 791				((sizeof(chcr_req->wreq)) >> 4)));
 792	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
 793	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
 794					   sizeof(chcr_req->key_ctx) + sc_len);
 795}
 796
 797/**
 798 *	create_cipher_wr - form the WR for cipher operations
 799 *	@req: cipher req.
 800 *	@ctx: crypto driver context of the request.
 801 *	@qid: ingress qid where response of this WR should be received.
 802 *	@op_type:	encryption or decryption
 803 */
 804static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
 805{
 806	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
 807	struct chcr_context *ctx = c_ctx(tfm);
 
 808	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
 809	struct sk_buff *skb = NULL;
 810	struct chcr_wr *chcr_req;
 811	struct cpl_rx_phys_dsgl *phys_cpl;
 812	struct ulptx_sgl *ulptx;
 813	struct chcr_skcipher_req_ctx *reqctx =
 814		skcipher_request_ctx(wrparam->req);
 815	unsigned int temp = 0, transhdr_len, dst_size;
 816	int error;
 817	int nents;
 818	unsigned int kctx_len;
 819	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 820			GFP_KERNEL : GFP_ATOMIC;
 821	struct adapter *adap = padap(ctx->dev);
 822	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
 823
 
 824	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
 825			      reqctx->dst_ofst);
 826	dst_size = get_space_for_phys_dsgl(nents);
 827	kctx_len = roundup(ablkctx->enckey_len, 16);
 828	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
 829	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
 830				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
 831	temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
 832				     (sgl_len(nents) * 8);
 833	transhdr_len += temp;
 834	transhdr_len = roundup(transhdr_len, 16);
 835	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
 836	if (!skb) {
 837		error = -ENOMEM;
 838		goto err;
 839	}
 840	chcr_req = __skb_put_zero(skb, transhdr_len);
 841	chcr_req->sec_cpl.op_ivinsrtofst =
 842			FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
 843
 844	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
 845	chcr_req->sec_cpl.aadstart_cipherstop_hi =
 846			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
 847
 848	chcr_req->sec_cpl.cipherstop_lo_authinsert =
 849			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
 850	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
 851							 ablkctx->ciph_mode,
 852							 0, 0, IV >> 1);
 853	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
 854							  0, 1, dst_size);
 855
 856	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
 857	if ((reqctx->op == CHCR_DECRYPT_OP) &&
 858	    (!(get_cryptoalg_subtype(tfm) ==
 859	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
 860	    (!(get_cryptoalg_subtype(tfm) ==
 861	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
 862		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
 863	} else {
 864		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
 865		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
 866			memcpy(chcr_req->key_ctx.key, ablkctx->key,
 867			       ablkctx->enckey_len);
 868		} else {
 869			memcpy(chcr_req->key_ctx.key, ablkctx->key +
 870			       (ablkctx->enckey_len >> 1),
 871			       ablkctx->enckey_len >> 1);
 872			memcpy(chcr_req->key_ctx.key +
 873			       (ablkctx->enckey_len >> 1),
 874			       ablkctx->key,
 875			       ablkctx->enckey_len >> 1);
 876		}
 877	}
 878	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
 879	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
 880	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
 881	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
 882
 883	atomic_inc(&adap->chcr_stats.cipher_rqst);
 884	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
 885		+ (reqctx->imm ? (wrparam->bytes) : 0);
 886	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
 887		    transhdr_len, temp,
 888			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
 889	reqctx->skb = skb;
 890
 891	if (reqctx->op && (ablkctx->ciph_mode ==
 892			   CHCR_SCMD_CIPHER_MODE_AES_CBC))
 893		sg_pcopy_to_buffer(wrparam->req->src,
 894			sg_nents(wrparam->req->src), wrparam->req->iv, 16,
 895			reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
 896
 897	return skb;
 898err:
 899	return ERR_PTR(error);
 900}
 901
 902static inline int chcr_keyctx_ck_size(unsigned int keylen)
 903{
 904	int ck_size = 0;
 905
 906	if (keylen == AES_KEYSIZE_128)
 907		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
 908	else if (keylen == AES_KEYSIZE_192)
 909		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
 910	else if (keylen == AES_KEYSIZE_256)
 911		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
 912	else
 913		ck_size = 0;
 914
 915	return ck_size;
 916}
 917static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
 918				       const u8 *key,
 919				       unsigned int keylen)
 920{
 921	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 922
 923	crypto_skcipher_clear_flags(ablkctx->sw_cipher,
 924				CRYPTO_TFM_REQ_MASK);
 925	crypto_skcipher_set_flags(ablkctx->sw_cipher,
 926				cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 927	return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
 928}
 929
 930static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
 931			       const u8 *key,
 932			       unsigned int keylen)
 933{
 934	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 935	unsigned int ck_size, context_size;
 936	u16 alignment = 0;
 937	int err;
 938
 939	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 940	if (err)
 941		goto badkey_err;
 942
 943	ck_size = chcr_keyctx_ck_size(keylen);
 944	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
 945	memcpy(ablkctx->key, key, keylen);
 946	ablkctx->enckey_len = keylen;
 947	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
 948	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 949			keylen + alignment) >> 4;
 950
 951	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 952						0, 0, context_size);
 953	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
 954	return 0;
 955badkey_err:
 956	ablkctx->enckey_len = 0;
 957
 958	return err;
 959}
 960
 961static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
 962				   const u8 *key,
 963				   unsigned int keylen)
 964{
 965	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 966	unsigned int ck_size, context_size;
 967	u16 alignment = 0;
 968	int err;
 969
 970	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 971	if (err)
 972		goto badkey_err;
 973	ck_size = chcr_keyctx_ck_size(keylen);
 974	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
 975	memcpy(ablkctx->key, key, keylen);
 976	ablkctx->enckey_len = keylen;
 977	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 978			keylen + alignment) >> 4;
 979
 980	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 981						0, 0, context_size);
 982	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
 983
 984	return 0;
 985badkey_err:
 986	ablkctx->enckey_len = 0;
 987
 988	return err;
 989}
 990
 991static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
 992				   const u8 *key,
 993				   unsigned int keylen)
 994{
 995	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 996	unsigned int ck_size, context_size;
 997	u16 alignment = 0;
 998	int err;
 999
1000	if (keylen < CTR_RFC3686_NONCE_SIZE)
1001		return -EINVAL;
1002	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
1003	       CTR_RFC3686_NONCE_SIZE);
1004
1005	keylen -= CTR_RFC3686_NONCE_SIZE;
1006	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1007	if (err)
1008		goto badkey_err;
1009
1010	ck_size = chcr_keyctx_ck_size(keylen);
1011	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1012	memcpy(ablkctx->key, key, keylen);
1013	ablkctx->enckey_len = keylen;
1014	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1015			keylen + alignment) >> 4;
1016
1017	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1018						0, 0, context_size);
1019	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1020
1021	return 0;
1022badkey_err:
1023	ablkctx->enckey_len = 0;
1024
1025	return err;
1026}
1027static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1028{
1029	unsigned int size = AES_BLOCK_SIZE;
1030	__be32 *b = (__be32 *)(dstiv + size);
1031	u32 c, prev;
1032
1033	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1034	for (; size >= 4; size -= 4) {
1035		prev = be32_to_cpu(*--b);
1036		c = prev + add;
1037		*b = cpu_to_be32(c);
1038		if (prev < c)
1039			break;
1040		add = 1;
1041	}
1042
1043}
1044
1045static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1046{
1047	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1048	u64 c;
1049	u32 temp = be32_to_cpu(*--b);
1050
1051	temp = ~temp;
1052	c = (u64)temp +  1; // No of block can processed without overflow
1053	if ((bytes / AES_BLOCK_SIZE) >= c)
1054		bytes = c * AES_BLOCK_SIZE;
1055	return bytes;
1056}
1057
1058static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1059			     u32 isfinal)
1060{
1061	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1062	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1063	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1064	struct crypto_aes_ctx aes;
1065	int ret, i;
1066	u8 *key;
1067	unsigned int keylen;
1068	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1069	int round8 = round / 8;
1070
1071	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1072
1073	keylen = ablkctx->enckey_len / 2;
1074	key = ablkctx->key + keylen;
1075	/* For a 192 bit key remove the padded zeroes which was
1076	 * added in chcr_xts_setkey
1077	 */
1078	if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1079			== CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1080		ret = aes_expandkey(&aes, key, keylen - 8);
1081	else
1082		ret = aes_expandkey(&aes, key, keylen);
1083	if (ret)
1084		return ret;
1085	aes_encrypt(&aes, iv, iv);
1086	for (i = 0; i < round8; i++)
1087		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1088
1089	for (i = 0; i < (round % 8); i++)
1090		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1091
1092	if (!isfinal)
1093		aes_decrypt(&aes, iv, iv);
1094
1095	memzero_explicit(&aes, sizeof(aes));
1096	return 0;
1097}
1098
1099static int chcr_update_cipher_iv(struct skcipher_request *req,
1100				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1101{
1102	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1103	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1104	int subtype = get_cryptoalg_subtype(tfm);
1105	int ret = 0;
1106
1107	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1108		ctr_add_iv(iv, req->iv, (reqctx->processed /
1109			   AES_BLOCK_SIZE));
1110	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1111		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1112			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1113						AES_BLOCK_SIZE) + 1);
1114	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1115		ret = chcr_update_tweak(req, iv, 0);
1116	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1117		if (reqctx->op)
1118			/*Updated before sending last WR*/
1119			memcpy(iv, req->iv, AES_BLOCK_SIZE);
1120		else
1121			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1122	}
1123
1124	return ret;
1125
1126}
1127
1128/* We need separate function for final iv because in rfc3686  Initial counter
1129 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1130 * for subsequent update requests
1131 */
1132
1133static int chcr_final_cipher_iv(struct skcipher_request *req,
1134				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1135{
1136	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1137	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1138	int subtype = get_cryptoalg_subtype(tfm);
1139	int ret = 0;
1140
1141	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1142		ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1143						       AES_BLOCK_SIZE));
1144	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1145		if (!reqctx->partial_req)
1146			memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1147		else
1148			ret = chcr_update_tweak(req, iv, 1);
1149	}
1150	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1151		/*Already updated for Decrypt*/
1152		if (!reqctx->op)
1153			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1154
1155	}
1156	return ret;
1157
1158}
1159
1160static int chcr_handle_cipher_resp(struct skcipher_request *req,
1161				   unsigned char *input, int err)
1162{
1163	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1164	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1165	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1166	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1167	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1168	struct chcr_dev *dev = c_ctx(tfm)->dev;
1169	struct chcr_context *ctx = c_ctx(tfm);
1170	struct adapter *adap = padap(ctx->dev);
1171	struct cipher_wr_param wrparam;
1172	struct sk_buff *skb;
1173	int bytes;
1174
1175	if (err)
1176		goto unmap;
1177	if (req->cryptlen == reqctx->processed) {
1178		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1179				      req);
1180		err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1181		goto complete;
1182	}
1183
1184	if (!reqctx->imm) {
1185		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1186					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1187					  reqctx->src_ofst, reqctx->dst_ofst);
1188		if ((bytes + reqctx->processed) >= req->cryptlen)
1189			bytes  = req->cryptlen - reqctx->processed;
1190		else
1191			bytes = rounddown(bytes, 16);
1192	} else {
1193		/*CTR mode counter overfloa*/
1194		bytes  = req->cryptlen - reqctx->processed;
1195	}
1196	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1197	if (err)
1198		goto unmap;
1199
1200	if (unlikely(bytes == 0)) {
1201		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1202				      req);
1203		memcpy(req->iv, reqctx->init_iv, IV);
1204		atomic_inc(&adap->chcr_stats.fallback);
1205		err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1206					   reqctx->op);
1207		goto complete;
1208	}
1209
1210	if (get_cryptoalg_subtype(tfm) ==
1211	    CRYPTO_ALG_SUB_TYPE_CTR)
1212		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1213	wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1214	wrparam.req = req;
1215	wrparam.bytes = bytes;
1216	skb = create_cipher_wr(&wrparam);
1217	if (IS_ERR(skb)) {
1218		pr_err("%s : Failed to form WR. No memory\n", __func__);
1219		err = PTR_ERR(skb);
1220		goto unmap;
1221	}
1222	skb->dev = u_ctx->lldi.ports[0];
1223	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1224	chcr_send_wr(skb);
1225	reqctx->last_req_len = bytes;
1226	reqctx->processed += bytes;
1227	if (get_cryptoalg_subtype(tfm) ==
1228		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1229			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1230		complete(&ctx->cbc_aes_aio_done);
1231	}
1232	return 0;
1233unmap:
1234	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1235complete:
1236	if (get_cryptoalg_subtype(tfm) ==
1237		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1238			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1239		complete(&ctx->cbc_aes_aio_done);
1240	}
1241	chcr_dec_wrcount(dev);
1242	req->base.complete(&req->base, err);
1243	return err;
1244}
1245
1246static int process_cipher(struct skcipher_request *req,
1247				  unsigned short qid,
1248				  struct sk_buff **skb,
1249				  unsigned short op_type)
1250{
1251	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1252	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1253	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1254	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1255	struct adapter *adap = padap(c_ctx(tfm)->dev);
1256	struct	cipher_wr_param wrparam;
1257	int bytes, err = -EINVAL;
1258	int subtype;
1259
1260	reqctx->processed = 0;
1261	reqctx->partial_req = 0;
1262	if (!req->iv)
1263		goto error;
1264	subtype = get_cryptoalg_subtype(tfm);
1265	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1266	    (req->cryptlen == 0) ||
1267	    (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1268		if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1269			goto fallback;
1270		else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1271			 subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1272			goto fallback;
1273		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1274		       ablkctx->enckey_len, req->cryptlen, ivsize);
1275		goto error;
1276	}
1277
1278	err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1279	if (err)
1280		goto error;
1281	if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1282					    AES_MIN_KEY_SIZE +
1283					    sizeof(struct cpl_rx_phys_dsgl) +
1284					/*Min dsgl size*/
1285					    32))) {
1286		/* Can be sent as Imm*/
1287		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1288
1289		dnents = sg_nents_xlen(req->dst, req->cryptlen,
1290				       CHCR_DST_SG_SIZE, 0);
1291		phys_dsgl = get_space_for_phys_dsgl(dnents);
1292		kctx_len = roundup(ablkctx->enckey_len, 16);
1293		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1294		reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1295			SGE_MAX_WR_LEN;
1296		bytes = IV + req->cryptlen;
1297
1298	} else {
1299		reqctx->imm = 0;
1300	}
1301
1302	if (!reqctx->imm) {
1303		bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1304					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1305					  0, 0);
1306		if ((bytes + reqctx->processed) >= req->cryptlen)
1307			bytes  = req->cryptlen - reqctx->processed;
1308		else
1309			bytes = rounddown(bytes, 16);
1310	} else {
1311		bytes = req->cryptlen;
1312	}
1313	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1314		bytes = adjust_ctr_overflow(req->iv, bytes);
1315	}
1316	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1317		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1318		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1319				CTR_RFC3686_IV_SIZE);
1320
1321		/* initialize counter portion of counter block */
1322		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1323			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1324		memcpy(reqctx->init_iv, reqctx->iv, IV);
1325
1326	} else {
1327
1328		memcpy(reqctx->iv, req->iv, IV);
1329		memcpy(reqctx->init_iv, req->iv, IV);
1330	}
1331	if (unlikely(bytes == 0)) {
1332		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1333				      req);
1334fallback:       atomic_inc(&adap->chcr_stats.fallback);
1335		err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
1336					   subtype ==
1337					   CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1338					   reqctx->iv : req->iv,
1339					   op_type);
1340		goto error;
1341	}
1342	reqctx->op = op_type;
1343	reqctx->srcsg = req->src;
1344	reqctx->dstsg = req->dst;
1345	reqctx->src_ofst = 0;
1346	reqctx->dst_ofst = 0;
1347	wrparam.qid = qid;
1348	wrparam.req = req;
1349	wrparam.bytes = bytes;
1350	*skb = create_cipher_wr(&wrparam);
1351	if (IS_ERR(*skb)) {
1352		err = PTR_ERR(*skb);
1353		goto unmap;
1354	}
1355	reqctx->processed = bytes;
1356	reqctx->last_req_len = bytes;
1357	reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1358
1359	return 0;
1360unmap:
1361	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1362error:
1363	return err;
1364}
1365
1366static int chcr_aes_encrypt(struct skcipher_request *req)
1367{
1368	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1369	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1370	struct chcr_dev *dev = c_ctx(tfm)->dev;
1371	struct sk_buff *skb = NULL;
1372	int err;
1373	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1374	struct chcr_context *ctx = c_ctx(tfm);
1375	unsigned int cpu;
1376
1377	cpu = get_cpu();
1378	reqctx->txqidx = cpu % ctx->ntxq;
1379	reqctx->rxqidx = cpu % ctx->nrxq;
1380	put_cpu();
1381
1382	err = chcr_inc_wrcount(dev);
1383	if (err)
1384		return -ENXIO;
1385	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1386						reqctx->txqidx) &&
1387		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1388			err = -ENOSPC;
1389			goto error;
1390	}
1391
1392	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1393			     &skb, CHCR_ENCRYPT_OP);
1394	if (err || !skb)
1395		return  err;
1396	skb->dev = u_ctx->lldi.ports[0];
1397	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1398	chcr_send_wr(skb);
1399	if (get_cryptoalg_subtype(tfm) ==
1400		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1401			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1402			reqctx->partial_req = 1;
1403			wait_for_completion(&ctx->cbc_aes_aio_done);
1404        }
1405	return -EINPROGRESS;
1406error:
1407	chcr_dec_wrcount(dev);
1408	return err;
1409}
1410
1411static int chcr_aes_decrypt(struct skcipher_request *req)
1412{
1413	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1414	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1415	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1416	struct chcr_dev *dev = c_ctx(tfm)->dev;
1417	struct sk_buff *skb = NULL;
1418	int err;
1419	struct chcr_context *ctx = c_ctx(tfm);
1420	unsigned int cpu;
1421
1422	cpu = get_cpu();
1423	reqctx->txqidx = cpu % ctx->ntxq;
1424	reqctx->rxqidx = cpu % ctx->nrxq;
1425	put_cpu();
1426
1427	err = chcr_inc_wrcount(dev);
1428	if (err)
1429		return -ENXIO;
1430
1431	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1432						reqctx->txqidx) &&
1433		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1434			return -ENOSPC;
1435	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1436			     &skb, CHCR_DECRYPT_OP);
1437	if (err || !skb)
1438		return err;
1439	skb->dev = u_ctx->lldi.ports[0];
1440	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1441	chcr_send_wr(skb);
1442	return -EINPROGRESS;
1443}
1444static int chcr_device_init(struct chcr_context *ctx)
1445{
1446	struct uld_ctx *u_ctx = NULL;
1447	int txq_perchan, ntxq;
1448	int err = 0, rxq_perchan;
1449
1450	if (!ctx->dev) {
1451		u_ctx = assign_chcr_device();
1452		if (!u_ctx) {
1453			err = -ENXIO;
1454			pr_err("chcr device assignment fails\n");
1455			goto out;
1456		}
1457		ctx->dev = &u_ctx->dev;
1458		ntxq = u_ctx->lldi.ntxq;
1459		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1460		txq_perchan = ntxq / u_ctx->lldi.nchan;
1461		ctx->ntxq = ntxq;
1462		ctx->nrxq = u_ctx->lldi.nrxq;
1463		ctx->rxq_perchan = rxq_perchan;
1464		ctx->txq_perchan = txq_perchan;
1465	}
1466out:
1467	return err;
1468}
1469
1470static int chcr_init_tfm(struct crypto_skcipher *tfm)
1471{
1472	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1473	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1474	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1475
1476	ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1477				CRYPTO_ALG_NEED_FALLBACK);
1478	if (IS_ERR(ablkctx->sw_cipher)) {
1479		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1480		return PTR_ERR(ablkctx->sw_cipher);
1481	}
1482	init_completion(&ctx->cbc_aes_aio_done);
1483	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1484					 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1485
1486	return chcr_device_init(ctx);
1487}
1488
1489static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1490{
1491	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1492	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1493	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1494
1495	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1496	 * cannot be used as fallback in chcr_handle_cipher_response
1497	 */
1498	ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1499				CRYPTO_ALG_NEED_FALLBACK);
1500	if (IS_ERR(ablkctx->sw_cipher)) {
1501		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1502		return PTR_ERR(ablkctx->sw_cipher);
1503	}
1504	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1505				    crypto_skcipher_reqsize(ablkctx->sw_cipher));
1506	return chcr_device_init(ctx);
1507}
1508
1509
1510static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1511{
1512	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1513	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1514
1515	crypto_free_skcipher(ablkctx->sw_cipher);
1516}
1517
1518static int get_alg_config(struct algo_param *params,
1519			  unsigned int auth_size)
1520{
1521	switch (auth_size) {
1522	case SHA1_DIGEST_SIZE:
1523		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1524		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1525		params->result_size = SHA1_DIGEST_SIZE;
1526		break;
1527	case SHA224_DIGEST_SIZE:
1528		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1529		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1530		params->result_size = SHA256_DIGEST_SIZE;
1531		break;
1532	case SHA256_DIGEST_SIZE:
1533		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1534		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1535		params->result_size = SHA256_DIGEST_SIZE;
1536		break;
1537	case SHA384_DIGEST_SIZE:
1538		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1539		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1540		params->result_size = SHA512_DIGEST_SIZE;
1541		break;
1542	case SHA512_DIGEST_SIZE:
1543		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1544		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1545		params->result_size = SHA512_DIGEST_SIZE;
1546		break;
1547	default:
1548		pr_err("ERROR, unsupported digest size\n");
1549		return -EINVAL;
1550	}
1551	return 0;
1552}
1553
1554static inline void chcr_free_shash(struct crypto_shash *base_hash)
1555{
1556		crypto_free_shash(base_hash);
1557}
1558
1559/**
1560 *	create_hash_wr - Create hash work request
1561 *	@req - Cipher req base
 
1562 */
1563static struct sk_buff *create_hash_wr(struct ahash_request *req,
1564				      struct hash_wr_param *param)
1565{
1566	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1567	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1568	struct chcr_context *ctx = h_ctx(tfm);
1569	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1570	struct sk_buff *skb = NULL;
1571	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1572	struct chcr_wr *chcr_req;
1573	struct ulptx_sgl *ulptx;
1574	unsigned int nents = 0, transhdr_len;
1575	unsigned int temp = 0;
1576	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1577		GFP_ATOMIC;
1578	struct adapter *adap = padap(h_ctx(tfm)->dev);
1579	int error = 0;
1580	unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1581
 
1582	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1583	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1584				param->sg_len) <= SGE_MAX_WR_LEN;
1585	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1586		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1587	nents += param->bfr_len ? 1 : 0;
1588	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1589				param->sg_len, 16) : (sgl_len(nents) * 8);
1590	transhdr_len = roundup(transhdr_len, 16);
1591
1592	skb = alloc_skb(transhdr_len, flags);
1593	if (!skb)
1594		return ERR_PTR(-ENOMEM);
1595	chcr_req = __skb_put_zero(skb, transhdr_len);
1596
1597	chcr_req->sec_cpl.op_ivinsrtofst =
1598		FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1599
1600	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1601
1602	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1603		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1604	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1605		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1606	chcr_req->sec_cpl.seqno_numivs =
1607		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1608					 param->opad_needed, 0);
1609
1610	chcr_req->sec_cpl.ivgen_hdrlen =
1611		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1612
1613	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1614	       param->alg_prm.result_size);
1615
1616	if (param->opad_needed)
1617		memcpy(chcr_req->key_ctx.key +
1618		       ((param->alg_prm.result_size <= 32) ? 32 :
1619			CHCR_HASH_MAX_DIGEST_SIZE),
1620		       hmacctx->opad, param->alg_prm.result_size);
1621
1622	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1623					    param->alg_prm.mk_size, 0,
1624					    param->opad_needed,
1625					    ((param->kctx_len +
1626					     sizeof(chcr_req->key_ctx)) >> 4));
1627	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1628	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1629				     DUMMY_BYTES);
1630	if (param->bfr_len != 0) {
1631		req_ctx->hctx_wr.dma_addr =
1632			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1633				       param->bfr_len, DMA_TO_DEVICE);
1634		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1635				       req_ctx->hctx_wr. dma_addr)) {
1636			error = -ENOMEM;
1637			goto err;
1638		}
1639		req_ctx->hctx_wr.dma_len = param->bfr_len;
1640	} else {
1641		req_ctx->hctx_wr.dma_addr = 0;
1642	}
1643	chcr_add_hash_src_ent(req, ulptx, param);
1644	/* Request upto max wr size */
1645	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1646				(param->sg_len + param->bfr_len) : 0);
1647	atomic_inc(&adap->chcr_stats.digest_rqst);
1648	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1649		    param->hash_size, transhdr_len,
1650		    temp,  0);
1651	req_ctx->hctx_wr.skb = skb;
1652	return skb;
1653err:
1654	kfree_skb(skb);
1655	return  ERR_PTR(error);
1656}
1657
1658static int chcr_ahash_update(struct ahash_request *req)
1659{
1660	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1661	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1662	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1663	struct chcr_context *ctx = h_ctx(rtfm);
1664	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1665	struct sk_buff *skb;
1666	u8 remainder = 0, bs;
1667	unsigned int nbytes = req->nbytes;
1668	struct hash_wr_param params;
1669	int error;
1670	unsigned int cpu;
1671
1672	cpu = get_cpu();
1673	req_ctx->txqidx = cpu % ctx->ntxq;
1674	req_ctx->rxqidx = cpu % ctx->nrxq;
1675	put_cpu();
1676
1677	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1678
1679	if (nbytes + req_ctx->reqlen >= bs) {
1680		remainder = (nbytes + req_ctx->reqlen) % bs;
1681		nbytes = nbytes + req_ctx->reqlen - remainder;
1682	} else {
1683		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1684				   + req_ctx->reqlen, nbytes, 0);
1685		req_ctx->reqlen += nbytes;
1686		return 0;
1687	}
1688	error = chcr_inc_wrcount(dev);
1689	if (error)
1690		return -ENXIO;
1691	/* Detach state for CHCR means lldi or padap is freed. Increasing
1692	 * inflight count for dev guarantees that lldi and padap is valid
1693	 */
1694	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1695						req_ctx->txqidx) &&
1696		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1697			error = -ENOSPC;
1698			goto err;
1699	}
1700
1701	chcr_init_hctx_per_wr(req_ctx);
1702	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1703	if (error) {
1704		error = -ENOMEM;
1705		goto err;
1706	}
1707	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1708	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1709	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1710				     HASH_SPACE_LEFT(params.kctx_len), 0);
1711	if (params.sg_len > req->nbytes)
1712		params.sg_len = req->nbytes;
1713	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1714			req_ctx->reqlen;
1715	params.opad_needed = 0;
1716	params.more = 1;
1717	params.last = 0;
1718	params.bfr_len = req_ctx->reqlen;
1719	params.scmd1 = 0;
1720	req_ctx->hctx_wr.srcsg = req->src;
1721
1722	params.hash_size = params.alg_prm.result_size;
1723	req_ctx->data_len += params.sg_len + params.bfr_len;
1724	skb = create_hash_wr(req, &params);
1725	if (IS_ERR(skb)) {
1726		error = PTR_ERR(skb);
1727		goto unmap;
1728	}
1729
1730	req_ctx->hctx_wr.processed += params.sg_len;
1731	if (remainder) {
1732		/* Swap buffers */
1733		swap(req_ctx->reqbfr, req_ctx->skbfr);
1734		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1735				   req_ctx->reqbfr, remainder, req->nbytes -
1736				   remainder);
1737	}
1738	req_ctx->reqlen = remainder;
1739	skb->dev = u_ctx->lldi.ports[0];
1740	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1741	chcr_send_wr(skb);
1742	return -EINPROGRESS;
1743unmap:
1744	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1745err:
1746	chcr_dec_wrcount(dev);
1747	return error;
1748}
1749
1750static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1751{
1752	memset(bfr_ptr, 0, bs);
1753	*bfr_ptr = 0x80;
1754	if (bs == 64)
1755		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1756	else
1757		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1758}
1759
1760static int chcr_ahash_final(struct ahash_request *req)
1761{
1762	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1763	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1764	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1765	struct hash_wr_param params;
1766	struct sk_buff *skb;
1767	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1768	struct chcr_context *ctx = h_ctx(rtfm);
1769	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1770	int error;
1771	unsigned int cpu;
1772
1773	cpu = get_cpu();
1774	req_ctx->txqidx = cpu % ctx->ntxq;
1775	req_ctx->rxqidx = cpu % ctx->nrxq;
1776	put_cpu();
1777
1778	error = chcr_inc_wrcount(dev);
1779	if (error)
1780		return -ENXIO;
1781
1782	chcr_init_hctx_per_wr(req_ctx);
1783	if (is_hmac(crypto_ahash_tfm(rtfm)))
1784		params.opad_needed = 1;
1785	else
1786		params.opad_needed = 0;
1787	params.sg_len = 0;
1788	req_ctx->hctx_wr.isfinal = 1;
1789	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1790	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1791	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1792		params.opad_needed = 1;
1793		params.kctx_len *= 2;
1794	} else {
1795		params.opad_needed = 0;
1796	}
1797
1798	req_ctx->hctx_wr.result = 1;
1799	params.bfr_len = req_ctx->reqlen;
1800	req_ctx->data_len += params.bfr_len + params.sg_len;
1801	req_ctx->hctx_wr.srcsg = req->src;
1802	if (req_ctx->reqlen == 0) {
1803		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1804		params.last = 0;
1805		params.more = 1;
1806		params.scmd1 = 0;
1807		params.bfr_len = bs;
1808
1809	} else {
1810		params.scmd1 = req_ctx->data_len;
1811		params.last = 1;
1812		params.more = 0;
1813	}
1814	params.hash_size = crypto_ahash_digestsize(rtfm);
1815	skb = create_hash_wr(req, &params);
1816	if (IS_ERR(skb)) {
1817		error = PTR_ERR(skb);
1818		goto err;
1819	}
1820	req_ctx->reqlen = 0;
1821	skb->dev = u_ctx->lldi.ports[0];
1822	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1823	chcr_send_wr(skb);
1824	return -EINPROGRESS;
1825err:
1826	chcr_dec_wrcount(dev);
1827	return error;
1828}
1829
1830static int chcr_ahash_finup(struct ahash_request *req)
1831{
1832	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1833	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1834	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1835	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1836	struct chcr_context *ctx = h_ctx(rtfm);
1837	struct sk_buff *skb;
1838	struct hash_wr_param params;
1839	u8  bs;
1840	int error;
1841	unsigned int cpu;
1842
1843	cpu = get_cpu();
1844	req_ctx->txqidx = cpu % ctx->ntxq;
1845	req_ctx->rxqidx = cpu % ctx->nrxq;
1846	put_cpu();
1847
1848	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1849	error = chcr_inc_wrcount(dev);
1850	if (error)
1851		return -ENXIO;
1852
1853	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1854						req_ctx->txqidx) &&
1855		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1856			error = -ENOSPC;
1857			goto err;
1858	}
1859	chcr_init_hctx_per_wr(req_ctx);
1860	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1861	if (error) {
1862		error = -ENOMEM;
1863		goto err;
1864	}
1865
1866	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1867	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1868	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1869		params.kctx_len *= 2;
1870		params.opad_needed = 1;
1871	} else {
1872		params.opad_needed = 0;
1873	}
1874
1875	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1876				    HASH_SPACE_LEFT(params.kctx_len), 0);
1877	if (params.sg_len < req->nbytes) {
1878		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1879			params.kctx_len /= 2;
1880			params.opad_needed = 0;
1881		}
1882		params.last = 0;
1883		params.more = 1;
1884		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1885					- req_ctx->reqlen;
1886		params.hash_size = params.alg_prm.result_size;
1887		params.scmd1 = 0;
1888	} else {
1889		params.last = 1;
1890		params.more = 0;
1891		params.sg_len = req->nbytes;
1892		params.hash_size = crypto_ahash_digestsize(rtfm);
1893		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1894				params.sg_len;
1895	}
1896	params.bfr_len = req_ctx->reqlen;
1897	req_ctx->data_len += params.bfr_len + params.sg_len;
1898	req_ctx->hctx_wr.result = 1;
1899	req_ctx->hctx_wr.srcsg = req->src;
1900	if ((req_ctx->reqlen + req->nbytes) == 0) {
1901		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1902		params.last = 0;
1903		params.more = 1;
1904		params.scmd1 = 0;
1905		params.bfr_len = bs;
1906	}
1907	skb = create_hash_wr(req, &params);
1908	if (IS_ERR(skb)) {
1909		error = PTR_ERR(skb);
1910		goto unmap;
1911	}
1912	req_ctx->reqlen = 0;
1913	req_ctx->hctx_wr.processed += params.sg_len;
1914	skb->dev = u_ctx->lldi.ports[0];
1915	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1916	chcr_send_wr(skb);
1917	return -EINPROGRESS;
1918unmap:
1919	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1920err:
1921	chcr_dec_wrcount(dev);
1922	return error;
1923}
1924
1925static int chcr_ahash_digest(struct ahash_request *req)
1926{
1927	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1928	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1929	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1930	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1931	struct chcr_context *ctx = h_ctx(rtfm);
1932	struct sk_buff *skb;
1933	struct hash_wr_param params;
1934	u8  bs;
1935	int error;
1936	unsigned int cpu;
1937
1938	cpu = get_cpu();
1939	req_ctx->txqidx = cpu % ctx->ntxq;
1940	req_ctx->rxqidx = cpu % ctx->nrxq;
1941	put_cpu();
1942
1943	rtfm->init(req);
1944	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1945	error = chcr_inc_wrcount(dev);
1946	if (error)
1947		return -ENXIO;
1948
1949	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1950						req_ctx->txqidx) &&
1951		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1952			error = -ENOSPC;
1953			goto err;
1954	}
1955
1956	chcr_init_hctx_per_wr(req_ctx);
1957	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1958	if (error) {
1959		error = -ENOMEM;
1960		goto err;
1961	}
1962
1963	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1964	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1965	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1966		params.kctx_len *= 2;
1967		params.opad_needed = 1;
1968	} else {
1969		params.opad_needed = 0;
1970	}
1971	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1972				HASH_SPACE_LEFT(params.kctx_len), 0);
1973	if (params.sg_len < req->nbytes) {
1974		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1975			params.kctx_len /= 2;
1976			params.opad_needed = 0;
1977		}
1978		params.last = 0;
1979		params.more = 1;
1980		params.scmd1 = 0;
1981		params.sg_len = rounddown(params.sg_len, bs);
1982		params.hash_size = params.alg_prm.result_size;
1983	} else {
1984		params.sg_len = req->nbytes;
1985		params.hash_size = crypto_ahash_digestsize(rtfm);
1986		params.last = 1;
1987		params.more = 0;
1988		params.scmd1 = req->nbytes + req_ctx->data_len;
1989
1990	}
1991	params.bfr_len = 0;
1992	req_ctx->hctx_wr.result = 1;
1993	req_ctx->hctx_wr.srcsg = req->src;
1994	req_ctx->data_len += params.bfr_len + params.sg_len;
1995
1996	if (req->nbytes == 0) {
1997		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1998		params.more = 1;
1999		params.bfr_len = bs;
2000	}
2001
2002	skb = create_hash_wr(req, &params);
2003	if (IS_ERR(skb)) {
2004		error = PTR_ERR(skb);
2005		goto unmap;
2006	}
2007	req_ctx->hctx_wr.processed += params.sg_len;
2008	skb->dev = u_ctx->lldi.ports[0];
2009	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
2010	chcr_send_wr(skb);
2011	return -EINPROGRESS;
2012unmap:
2013	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2014err:
2015	chcr_dec_wrcount(dev);
2016	return error;
2017}
2018
2019static int chcr_ahash_continue(struct ahash_request *req)
2020{
2021	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2022	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2023	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2024	struct chcr_context *ctx = h_ctx(rtfm);
2025	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2026	struct sk_buff *skb;
2027	struct hash_wr_param params;
2028	u8  bs;
2029	int error;
2030	unsigned int cpu;
2031
2032	cpu = get_cpu();
2033	reqctx->txqidx = cpu % ctx->ntxq;
2034	reqctx->rxqidx = cpu % ctx->nrxq;
2035	put_cpu();
2036
2037	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2038	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
2039	params.kctx_len = roundup(params.alg_prm.result_size, 16);
2040	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2041		params.kctx_len *= 2;
2042		params.opad_needed = 1;
2043	} else {
2044		params.opad_needed = 0;
2045	}
2046	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2047					    HASH_SPACE_LEFT(params.kctx_len),
2048					    hctx_wr->src_ofst);
2049	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2050		params.sg_len = req->nbytes - hctx_wr->processed;
2051	if (!hctx_wr->result ||
2052	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2053		if (is_hmac(crypto_ahash_tfm(rtfm))) {
2054			params.kctx_len /= 2;
2055			params.opad_needed = 0;
2056		}
2057		params.last = 0;
2058		params.more = 1;
2059		params.sg_len = rounddown(params.sg_len, bs);
2060		params.hash_size = params.alg_prm.result_size;
2061		params.scmd1 = 0;
2062	} else {
2063		params.last = 1;
2064		params.more = 0;
2065		params.hash_size = crypto_ahash_digestsize(rtfm);
2066		params.scmd1 = reqctx->data_len + params.sg_len;
2067	}
2068	params.bfr_len = 0;
2069	reqctx->data_len += params.sg_len;
2070	skb = create_hash_wr(req, &params);
2071	if (IS_ERR(skb)) {
2072		error = PTR_ERR(skb);
2073		goto err;
2074	}
2075	hctx_wr->processed += params.sg_len;
2076	skb->dev = u_ctx->lldi.ports[0];
2077	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2078	chcr_send_wr(skb);
2079	return 0;
2080err:
2081	return error;
2082}
2083
2084static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2085					  unsigned char *input,
2086					  int err)
2087{
2088	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2089	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2090	int digestsize, updated_digestsize;
2091	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2092	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2093	struct chcr_dev *dev = h_ctx(tfm)->dev;
2094
2095	if (input == NULL)
2096		goto out;
2097	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2098	updated_digestsize = digestsize;
2099	if (digestsize == SHA224_DIGEST_SIZE)
2100		updated_digestsize = SHA256_DIGEST_SIZE;
2101	else if (digestsize == SHA384_DIGEST_SIZE)
2102		updated_digestsize = SHA512_DIGEST_SIZE;
2103
2104	if (hctx_wr->dma_addr) {
2105		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2106				 hctx_wr->dma_len, DMA_TO_DEVICE);
2107		hctx_wr->dma_addr = 0;
2108	}
2109	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2110				 req->nbytes)) {
2111		if (hctx_wr->result == 1) {
2112			hctx_wr->result = 0;
2113			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2114			       digestsize);
2115		} else {
2116			memcpy(reqctx->partial_hash,
2117			       input + sizeof(struct cpl_fw6_pld),
2118			       updated_digestsize);
2119
2120		}
2121		goto unmap;
2122	}
2123	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2124	       updated_digestsize);
2125
2126	err = chcr_ahash_continue(req);
2127	if (err)
2128		goto unmap;
2129	return;
2130unmap:
2131	if (hctx_wr->is_sg_map)
2132		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2133
2134
2135out:
2136	chcr_dec_wrcount(dev);
2137	req->base.complete(&req->base, err);
2138}
2139
2140/*
2141 *	chcr_handle_resp - Unmap the DMA buffers associated with the request
2142 *	@req: crypto request
2143 */
2144int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2145			 int err)
2146{
2147	struct crypto_tfm *tfm = req->tfm;
2148	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2149	struct adapter *adap = padap(ctx->dev);
2150
2151	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2152	case CRYPTO_ALG_TYPE_AEAD:
2153		err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2154		break;
2155
2156	case CRYPTO_ALG_TYPE_SKCIPHER:
2157		 chcr_handle_cipher_resp(skcipher_request_cast(req),
2158					       input, err);
2159		break;
2160	case CRYPTO_ALG_TYPE_AHASH:
2161		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2162		}
2163	atomic_inc(&adap->chcr_stats.complete);
2164	return err;
2165}
2166static int chcr_ahash_export(struct ahash_request *areq, void *out)
2167{
2168	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2169	struct chcr_ahash_req_ctx *state = out;
2170
2171	state->reqlen = req_ctx->reqlen;
2172	state->data_len = req_ctx->data_len;
2173	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2174	memcpy(state->partial_hash, req_ctx->partial_hash,
2175	       CHCR_HASH_MAX_DIGEST_SIZE);
2176	chcr_init_hctx_per_wr(state);
2177	return 0;
2178}
2179
2180static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2181{
2182	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2183	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2184
2185	req_ctx->reqlen = state->reqlen;
2186	req_ctx->data_len = state->data_len;
2187	req_ctx->reqbfr = req_ctx->bfr1;
2188	req_ctx->skbfr = req_ctx->bfr2;
2189	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2190	memcpy(req_ctx->partial_hash, state->partial_hash,
2191	       CHCR_HASH_MAX_DIGEST_SIZE);
2192	chcr_init_hctx_per_wr(req_ctx);
2193	return 0;
2194}
2195
2196static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2197			     unsigned int keylen)
2198{
2199	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2200	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2201	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2202	unsigned int i, err = 0, updated_digestsize;
2203
2204	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2205
2206	/* use the key to calculate the ipad and opad. ipad will sent with the
2207	 * first request's data. opad will be sent with the final hash result
2208	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2209	 */
2210	shash->tfm = hmacctx->base_hash;
2211	if (keylen > bs) {
2212		err = crypto_shash_digest(shash, key, keylen,
2213					  hmacctx->ipad);
2214		if (err)
2215			goto out;
2216		keylen = digestsize;
2217	} else {
2218		memcpy(hmacctx->ipad, key, keylen);
2219	}
2220	memset(hmacctx->ipad + keylen, 0, bs - keylen);
2221	memcpy(hmacctx->opad, hmacctx->ipad, bs);
2222
2223	for (i = 0; i < bs / sizeof(int); i++) {
2224		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2225		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2226	}
2227
2228	updated_digestsize = digestsize;
2229	if (digestsize == SHA224_DIGEST_SIZE)
2230		updated_digestsize = SHA256_DIGEST_SIZE;
2231	else if (digestsize == SHA384_DIGEST_SIZE)
2232		updated_digestsize = SHA512_DIGEST_SIZE;
2233	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2234					hmacctx->ipad, digestsize);
2235	if (err)
2236		goto out;
2237	chcr_change_order(hmacctx->ipad, updated_digestsize);
2238
2239	err = chcr_compute_partial_hash(shash, hmacctx->opad,
2240					hmacctx->opad, digestsize);
2241	if (err)
2242		goto out;
2243	chcr_change_order(hmacctx->opad, updated_digestsize);
2244out:
2245	return err;
2246}
2247
2248static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2249			       unsigned int key_len)
2250{
2251	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2252	unsigned short context_size = 0;
2253	int err;
2254
2255	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2256	if (err)
2257		goto badkey_err;
2258
2259	memcpy(ablkctx->key, key, key_len);
2260	ablkctx->enckey_len = key_len;
2261	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2262	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2263	/* Both keys for xts must be aligned to 16 byte boundary
2264	 * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2265	 */
2266	if (key_len == 48) {
2267		context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2268				+ 16) >> 4;
2269		memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2270		memset(ablkctx->key + 24, 0, 8);
2271		memset(ablkctx->key + 56, 0, 8);
2272		ablkctx->enckey_len = 64;
2273		ablkctx->key_ctx_hdr =
2274			FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2275					 CHCR_KEYCTX_NO_KEY, 1,
2276					 0, context_size);
2277	} else {
2278		ablkctx->key_ctx_hdr =
2279		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2280				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2281				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2282				 CHCR_KEYCTX_NO_KEY, 1,
2283				 0, context_size);
2284	}
2285	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2286	return 0;
2287badkey_err:
2288	ablkctx->enckey_len = 0;
2289
2290	return err;
2291}
2292
2293static int chcr_sha_init(struct ahash_request *areq)
2294{
2295	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2296	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2297	int digestsize =  crypto_ahash_digestsize(tfm);
2298
2299	req_ctx->data_len = 0;
2300	req_ctx->reqlen = 0;
2301	req_ctx->reqbfr = req_ctx->bfr1;
2302	req_ctx->skbfr = req_ctx->bfr2;
2303	copy_hash_init_values(req_ctx->partial_hash, digestsize);
2304
2305	return 0;
2306}
2307
2308static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2309{
2310	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2311				 sizeof(struct chcr_ahash_req_ctx));
2312	return chcr_device_init(crypto_tfm_ctx(tfm));
2313}
2314
2315static int chcr_hmac_init(struct ahash_request *areq)
2316{
2317	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2318	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2319	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2320	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2321	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2322
2323	chcr_sha_init(areq);
2324	req_ctx->data_len = bs;
2325	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2326		if (digestsize == SHA224_DIGEST_SIZE)
2327			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2328			       SHA256_DIGEST_SIZE);
2329		else if (digestsize == SHA384_DIGEST_SIZE)
2330			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2331			       SHA512_DIGEST_SIZE);
2332		else
2333			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2334			       digestsize);
2335	}
2336	return 0;
2337}
2338
2339static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2340{
2341	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2342	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2343	unsigned int digestsize =
2344		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2345
2346	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2347				 sizeof(struct chcr_ahash_req_ctx));
2348	hmacctx->base_hash = chcr_alloc_shash(digestsize);
2349	if (IS_ERR(hmacctx->base_hash))
2350		return PTR_ERR(hmacctx->base_hash);
2351	return chcr_device_init(crypto_tfm_ctx(tfm));
2352}
2353
2354static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2355{
2356	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2357	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2358
2359	if (hmacctx->base_hash) {
2360		chcr_free_shash(hmacctx->base_hash);
2361		hmacctx->base_hash = NULL;
2362	}
2363}
2364
2365inline void chcr_aead_common_exit(struct aead_request *req)
2366{
2367	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2368	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2369	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2370
2371	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2372}
2373
2374static int chcr_aead_common_init(struct aead_request *req)
2375{
2376	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2377	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2378	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2379	unsigned int authsize = crypto_aead_authsize(tfm);
2380	int error = -EINVAL;
2381
2382	/* validate key size */
2383	if (aeadctx->enckey_len == 0)
2384		goto err;
2385	if (reqctx->op && req->cryptlen < authsize)
2386		goto err;
2387	if (reqctx->b0_len)
2388		reqctx->scratch_pad = reqctx->iv + IV;
2389	else
2390		reqctx->scratch_pad = NULL;
2391
2392	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2393				  reqctx->op);
2394	if (error) {
2395		error = -ENOMEM;
2396		goto err;
2397	}
2398
2399	return 0;
2400err:
2401	return error;
2402}
2403
2404static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2405				   int aadmax, int wrlen,
2406				   unsigned short op_type)
2407{
2408	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2409
2410	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2411	    dst_nents > MAX_DSGL_ENT ||
2412	    (req->assoclen > aadmax) ||
2413	    (wrlen > SGE_MAX_WR_LEN))
2414		return 1;
2415	return 0;
2416}
2417
2418static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2419{
2420	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2421	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2422	struct aead_request *subreq = aead_request_ctx(req);
2423
2424	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2425	aead_request_set_callback(subreq, req->base.flags,
2426				  req->base.complete, req->base.data);
2427	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2428				 req->iv);
2429	aead_request_set_ad(subreq, req->assoclen);
2430	return op_type ? crypto_aead_decrypt(subreq) :
2431		crypto_aead_encrypt(subreq);
2432}
2433
2434static struct sk_buff *create_authenc_wr(struct aead_request *req,
2435					 unsigned short qid,
2436					 int size)
2437{
2438	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2439	struct chcr_context *ctx = a_ctx(tfm);
 
2440	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2441	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2442	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2443	struct sk_buff *skb = NULL;
2444	struct chcr_wr *chcr_req;
2445	struct cpl_rx_phys_dsgl *phys_cpl;
2446	struct ulptx_sgl *ulptx;
2447	unsigned int transhdr_len;
2448	unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2449	unsigned int   kctx_len = 0, dnents, snents;
2450	unsigned int  authsize = crypto_aead_authsize(tfm);
2451	int error = -EINVAL;
2452	u8 *ivptr;
2453	int null = 0;
2454	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2455		GFP_ATOMIC;
2456	struct adapter *adap = padap(ctx->dev);
2457	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2458
 
2459	if (req->cryptlen == 0)
2460		return NULL;
2461
2462	reqctx->b0_len = 0;
2463	error = chcr_aead_common_init(req);
2464	if (error)
2465		return ERR_PTR(error);
2466
2467	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2468		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2469		null = 1;
2470	}
2471	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2472		(reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2473	dnents += MIN_AUTH_SG; // For IV
2474	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2475			       CHCR_SRC_SG_SIZE, 0);
2476	dst_size = get_space_for_phys_dsgl(dnents);
2477	kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2478		- sizeof(chcr_req->key_ctx);
2479	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2480	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2481			SGE_MAX_WR_LEN;
2482	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2483			: (sgl_len(snents) * 8);
2484	transhdr_len += temp;
2485	transhdr_len = roundup(transhdr_len, 16);
2486
2487	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2488				    transhdr_len, reqctx->op)) {
2489		atomic_inc(&adap->chcr_stats.fallback);
2490		chcr_aead_common_exit(req);
2491		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2492	}
2493	skb = alloc_skb(transhdr_len, flags);
2494	if (!skb) {
2495		error = -ENOMEM;
2496		goto err;
2497	}
2498
2499	chcr_req = __skb_put_zero(skb, transhdr_len);
2500
2501	temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2502
2503	/*
2504	 * Input order	is AAD,IV and Payload. where IV should be included as
2505	 * the part of authdata. All other fields should be filled according
2506	 * to the hardware spec
2507	 */
2508	chcr_req->sec_cpl.op_ivinsrtofst =
2509				FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2510	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2511	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2512					null ? 0 : 1 + IV,
2513					null ? 0 : IV + req->assoclen,
2514					req->assoclen + IV + 1,
2515					(temp & 0x1F0) >> 4);
2516	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2517					temp & 0xF,
2518					null ? 0 : req->assoclen + IV + 1,
2519					temp, temp);
2520	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2521	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2522		temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2523	else
2524		temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2525	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2526					(reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2527					temp,
2528					actx->auth_mode, aeadctx->hmac_ctrl,
2529					IV >> 1);
2530	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2531					 0, 0, dst_size);
2532
2533	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2534	if (reqctx->op == CHCR_ENCRYPT_OP ||
2535		subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2536		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2537		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2538		       aeadctx->enckey_len);
2539	else
2540		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2541		       aeadctx->enckey_len);
2542
2543	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2544	       actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2545	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2546	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2547	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2548	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2549	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2550		memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2551		memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2552				CTR_RFC3686_IV_SIZE);
2553		*(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2554			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2555	} else {
2556		memcpy(ivptr, req->iv, IV);
2557	}
2558	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2559	chcr_add_aead_src_ent(req, ulptx);
2560	atomic_inc(&adap->chcr_stats.cipher_rqst);
2561	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2562		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2563	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2564		   transhdr_len, temp, 0);
2565	reqctx->skb = skb;
2566
2567	return skb;
2568err:
2569	chcr_aead_common_exit(req);
2570
2571	return ERR_PTR(error);
2572}
2573
2574int chcr_aead_dma_map(struct device *dev,
2575		      struct aead_request *req,
2576		      unsigned short op_type)
2577{
2578	int error;
2579	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2580	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2581	unsigned int authsize = crypto_aead_authsize(tfm);
2582	int src_len, dst_len;
2583
2584	/* calculate and handle src and dst sg length separately
2585	 * for inplace and out-of place operations
2586	 */
2587	if (req->src == req->dst) {
2588		src_len = req->assoclen + req->cryptlen + (op_type ?
2589							0 : authsize);
2590		dst_len = src_len;
2591	} else {
2592		src_len = req->assoclen + req->cryptlen;
2593		dst_len = req->assoclen + req->cryptlen + (op_type ?
2594							-authsize : authsize);
2595	}
2596
2597	if (!req->cryptlen || !src_len || !dst_len)
2598		return 0;
2599	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2600					DMA_BIDIRECTIONAL);
2601	if (dma_mapping_error(dev, reqctx->iv_dma))
2602		return -ENOMEM;
2603	if (reqctx->b0_len)
2604		reqctx->b0_dma = reqctx->iv_dma + IV;
2605	else
2606		reqctx->b0_dma = 0;
2607	if (req->src == req->dst) {
2608		error = dma_map_sg(dev, req->src,
2609				sg_nents_for_len(req->src, src_len),
2610					DMA_BIDIRECTIONAL);
2611		if (!error)
2612			goto err;
2613	} else {
2614		error = dma_map_sg(dev, req->src,
2615				   sg_nents_for_len(req->src, src_len),
2616				   DMA_TO_DEVICE);
2617		if (!error)
2618			goto err;
2619		error = dma_map_sg(dev, req->dst,
2620				   sg_nents_for_len(req->dst, dst_len),
2621				   DMA_FROM_DEVICE);
2622		if (!error) {
2623			dma_unmap_sg(dev, req->src,
2624				     sg_nents_for_len(req->src, src_len),
2625				     DMA_TO_DEVICE);
2626			goto err;
2627		}
2628	}
2629
2630	return 0;
2631err:
2632	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2633	return -ENOMEM;
2634}
2635
2636void chcr_aead_dma_unmap(struct device *dev,
2637			 struct aead_request *req,
2638			 unsigned short op_type)
2639{
2640	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2641	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2642	unsigned int authsize = crypto_aead_authsize(tfm);
2643	int src_len, dst_len;
2644
2645	/* calculate and handle src and dst sg length separately
2646	 * for inplace and out-of place operations
2647	 */
2648	if (req->src == req->dst) {
2649		src_len = req->assoclen + req->cryptlen + (op_type ?
2650							0 : authsize);
2651		dst_len = src_len;
2652	} else {
2653		src_len = req->assoclen + req->cryptlen;
2654		dst_len = req->assoclen + req->cryptlen + (op_type ?
2655						-authsize : authsize);
2656	}
2657
2658	if (!req->cryptlen || !src_len || !dst_len)
2659		return;
2660
2661	dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2662					DMA_BIDIRECTIONAL);
2663	if (req->src == req->dst) {
2664		dma_unmap_sg(dev, req->src,
2665			     sg_nents_for_len(req->src, src_len),
2666			     DMA_BIDIRECTIONAL);
2667	} else {
2668		dma_unmap_sg(dev, req->src,
2669			     sg_nents_for_len(req->src, src_len),
2670			     DMA_TO_DEVICE);
2671		dma_unmap_sg(dev, req->dst,
2672			     sg_nents_for_len(req->dst, dst_len),
2673			     DMA_FROM_DEVICE);
2674	}
2675}
2676
2677void chcr_add_aead_src_ent(struct aead_request *req,
2678			   struct ulptx_sgl *ulptx)
2679{
2680	struct ulptx_walk ulp_walk;
2681	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2682
2683	if (reqctx->imm) {
2684		u8 *buf = (u8 *)ulptx;
2685
2686		if (reqctx->b0_len) {
2687			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2688			buf += reqctx->b0_len;
2689		}
2690		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2691				   buf, req->cryptlen + req->assoclen, 0);
2692	} else {
2693		ulptx_walk_init(&ulp_walk, ulptx);
2694		if (reqctx->b0_len)
2695			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2696					    reqctx->b0_dma);
2697		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2698				  req->assoclen,  0);
2699		ulptx_walk_end(&ulp_walk);
2700	}
2701}
2702
2703void chcr_add_aead_dst_ent(struct aead_request *req,
2704			   struct cpl_rx_phys_dsgl *phys_cpl,
2705			   unsigned short qid)
2706{
2707	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2708	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2709	struct dsgl_walk dsgl_walk;
2710	unsigned int authsize = crypto_aead_authsize(tfm);
2711	struct chcr_context *ctx = a_ctx(tfm);
 
2712	u32 temp;
2713	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2714
 
2715	dsgl_walk_init(&dsgl_walk, phys_cpl);
2716	dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2717	temp = req->assoclen + req->cryptlen +
2718		(reqctx->op ? -authsize : authsize);
2719	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2720	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2721}
2722
2723void chcr_add_cipher_src_ent(struct skcipher_request *req,
2724			     void *ulptx,
2725			     struct  cipher_wr_param *wrparam)
2726{
2727	struct ulptx_walk ulp_walk;
2728	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2729	u8 *buf = ulptx;
2730
2731	memcpy(buf, reqctx->iv, IV);
2732	buf += IV;
2733	if (reqctx->imm) {
2734		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2735				   buf, wrparam->bytes, reqctx->processed);
2736	} else {
2737		ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2738		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2739				  reqctx->src_ofst);
2740		reqctx->srcsg = ulp_walk.last_sg;
2741		reqctx->src_ofst = ulp_walk.last_sg_len;
2742		ulptx_walk_end(&ulp_walk);
2743	}
2744}
2745
2746void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2747			     struct cpl_rx_phys_dsgl *phys_cpl,
2748			     struct  cipher_wr_param *wrparam,
2749			     unsigned short qid)
2750{
2751	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2752	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2753	struct chcr_context *ctx = c_ctx(tfm);
 
2754	struct dsgl_walk dsgl_walk;
2755	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2756
 
2757	dsgl_walk_init(&dsgl_walk, phys_cpl);
2758	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2759			 reqctx->dst_ofst);
2760	reqctx->dstsg = dsgl_walk.last_sg;
2761	reqctx->dst_ofst = dsgl_walk.last_sg_len;
2762	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2763}
2764
2765void chcr_add_hash_src_ent(struct ahash_request *req,
2766			   struct ulptx_sgl *ulptx,
2767			   struct hash_wr_param *param)
2768{
2769	struct ulptx_walk ulp_walk;
2770	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2771
2772	if (reqctx->hctx_wr.imm) {
2773		u8 *buf = (u8 *)ulptx;
2774
2775		if (param->bfr_len) {
2776			memcpy(buf, reqctx->reqbfr, param->bfr_len);
2777			buf += param->bfr_len;
2778		}
2779
2780		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2781				   sg_nents(reqctx->hctx_wr.srcsg), buf,
2782				   param->sg_len, 0);
2783	} else {
2784		ulptx_walk_init(&ulp_walk, ulptx);
2785		if (param->bfr_len)
2786			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2787					    reqctx->hctx_wr.dma_addr);
2788		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2789				  param->sg_len, reqctx->hctx_wr.src_ofst);
2790		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2791		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2792		ulptx_walk_end(&ulp_walk);
2793	}
2794}
2795
2796int chcr_hash_dma_map(struct device *dev,
2797		      struct ahash_request *req)
2798{
2799	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2800	int error = 0;
2801
2802	if (!req->nbytes)
2803		return 0;
2804	error = dma_map_sg(dev, req->src, sg_nents(req->src),
2805			   DMA_TO_DEVICE);
2806	if (!error)
2807		return -ENOMEM;
2808	req_ctx->hctx_wr.is_sg_map = 1;
2809	return 0;
2810}
2811
2812void chcr_hash_dma_unmap(struct device *dev,
2813			 struct ahash_request *req)
2814{
2815	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2816
2817	if (!req->nbytes)
2818		return;
2819
2820	dma_unmap_sg(dev, req->src, sg_nents(req->src),
2821			   DMA_TO_DEVICE);
2822	req_ctx->hctx_wr.is_sg_map = 0;
2823
2824}
2825
2826int chcr_cipher_dma_map(struct device *dev,
2827			struct skcipher_request *req)
2828{
2829	int error;
2830
2831	if (req->src == req->dst) {
2832		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2833				   DMA_BIDIRECTIONAL);
2834		if (!error)
2835			goto err;
2836	} else {
2837		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2838				   DMA_TO_DEVICE);
2839		if (!error)
2840			goto err;
2841		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2842				   DMA_FROM_DEVICE);
2843		if (!error) {
2844			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2845				   DMA_TO_DEVICE);
2846			goto err;
2847		}
2848	}
2849
2850	return 0;
2851err:
2852	return -ENOMEM;
2853}
2854
2855void chcr_cipher_dma_unmap(struct device *dev,
2856			   struct skcipher_request *req)
2857{
2858	if (req->src == req->dst) {
2859		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2860				   DMA_BIDIRECTIONAL);
2861	} else {
2862		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2863				   DMA_TO_DEVICE);
2864		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2865				   DMA_FROM_DEVICE);
2866	}
2867}
2868
2869static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2870{
2871	__be32 data;
2872
2873	memset(block, 0, csize);
2874	block += csize;
2875
2876	if (csize >= 4)
2877		csize = 4;
2878	else if (msglen > (unsigned int)(1 << (8 * csize)))
2879		return -EOVERFLOW;
2880
2881	data = cpu_to_be32(msglen);
2882	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2883
2884	return 0;
2885}
2886
2887static int generate_b0(struct aead_request *req, u8 *ivptr,
2888			unsigned short op_type)
2889{
2890	unsigned int l, lp, m;
2891	int rc;
2892	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2893	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2894	u8 *b0 = reqctx->scratch_pad;
2895
2896	m = crypto_aead_authsize(aead);
2897
2898	memcpy(b0, ivptr, 16);
2899
2900	lp = b0[0];
2901	l = lp + 1;
2902
2903	/* set m, bits 3-5 */
2904	*b0 |= (8 * ((m - 2) / 2));
2905
2906	/* set adata, bit 6, if associated data is used */
2907	if (req->assoclen)
2908		*b0 |= 64;
2909	rc = set_msg_len(b0 + 16 - l,
2910			 (op_type == CHCR_DECRYPT_OP) ?
2911			 req->cryptlen - m : req->cryptlen, l);
2912
2913	return rc;
2914}
2915
2916static inline int crypto_ccm_check_iv(const u8 *iv)
2917{
2918	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2919	if (iv[0] < 1 || iv[0] > 7)
2920		return -EINVAL;
2921
2922	return 0;
2923}
2924
2925static int ccm_format_packet(struct aead_request *req,
2926			     u8 *ivptr,
2927			     unsigned int sub_type,
2928			     unsigned short op_type,
2929			     unsigned int assoclen)
2930{
2931	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2932	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2933	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2934	int rc = 0;
2935
2936	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2937		ivptr[0] = 3;
2938		memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2939		memcpy(ivptr + 4, req->iv, 8);
2940		memset(ivptr + 12, 0, 4);
2941	} else {
2942		memcpy(ivptr, req->iv, 16);
2943	}
2944	if (assoclen)
2945		put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
2946
2947	rc = generate_b0(req, ivptr, op_type);
2948	/* zero the ctr value */
2949	memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2950	return rc;
2951}
2952
2953static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2954				  unsigned int dst_size,
2955				  struct aead_request *req,
2956				  unsigned short op_type)
2957{
2958	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2959	struct chcr_context *ctx = a_ctx(tfm);
 
2960	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2961	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2962	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2963	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2964	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2965	unsigned int ccm_xtra;
2966	unsigned int tag_offset = 0, auth_offset = 0;
2967	unsigned int assoclen;
2968
 
 
2969	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2970		assoclen = req->assoclen - 8;
2971	else
2972		assoclen = req->assoclen;
2973	ccm_xtra = CCM_B0_SIZE +
2974		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2975
2976	auth_offset = req->cryptlen ?
2977		(req->assoclen + IV + 1 + ccm_xtra) : 0;
2978	if (op_type == CHCR_DECRYPT_OP) {
2979		if (crypto_aead_authsize(tfm) != req->cryptlen)
2980			tag_offset = crypto_aead_authsize(tfm);
2981		else
2982			auth_offset = 0;
2983	}
2984
2985	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2986	sec_cpl->pldlen =
2987		htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2988	/* For CCM there wil be b0 always. So AAD start will be 1 always */
2989	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2990				1 + IV,	IV + assoclen + ccm_xtra,
2991				req->assoclen + IV + 1 + ccm_xtra, 0);
2992
2993	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2994					auth_offset, tag_offset,
2995					(op_type == CHCR_ENCRYPT_OP) ? 0 :
2996					crypto_aead_authsize(tfm));
2997	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2998					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2999					cipher_mode, mac_mode,
3000					aeadctx->hmac_ctrl, IV >> 1);
3001
3002	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
3003					0, dst_size);
3004}
3005
3006static int aead_ccm_validate_input(unsigned short op_type,
3007				   struct aead_request *req,
3008				   struct chcr_aead_ctx *aeadctx,
3009				   unsigned int sub_type)
3010{
3011	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
3012		if (crypto_ccm_check_iv(req->iv)) {
3013			pr_err("CCM: IV check fails\n");
3014			return -EINVAL;
3015		}
3016	} else {
3017		if (req->assoclen != 16 && req->assoclen != 20) {
3018			pr_err("RFC4309: Invalid AAD length %d\n",
3019			       req->assoclen);
3020			return -EINVAL;
3021		}
3022	}
3023	return 0;
3024}
3025
3026static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
3027					  unsigned short qid,
3028					  int size)
3029{
3030	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3031	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3032	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3033	struct sk_buff *skb = NULL;
3034	struct chcr_wr *chcr_req;
3035	struct cpl_rx_phys_dsgl *phys_cpl;
3036	struct ulptx_sgl *ulptx;
3037	unsigned int transhdr_len;
3038	unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
3039	unsigned int sub_type, assoclen = req->assoclen;
3040	unsigned int authsize = crypto_aead_authsize(tfm);
3041	int error = -EINVAL;
3042	u8 *ivptr;
3043	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3044		GFP_ATOMIC;
3045	struct adapter *adap = padap(a_ctx(tfm)->dev);
3046
3047	sub_type = get_aead_subtype(tfm);
3048	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
3049		assoclen -= 8;
3050	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
3051	error = chcr_aead_common_init(req);
3052	if (error)
3053		return ERR_PTR(error);
3054
3055	error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
3056	if (error)
3057		goto err;
3058	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3059			+ (reqctx->op ? -authsize : authsize),
3060			CHCR_DST_SG_SIZE, 0);
3061	dnents += MIN_CCM_SG; // For IV and B0
3062	dst_size = get_space_for_phys_dsgl(dnents);
3063	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3064			       CHCR_SRC_SG_SIZE, 0);
3065	snents += MIN_CCM_SG; //For B0
3066	kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3067	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3068	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3069		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
3070	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3071				     reqctx->b0_len, 16) :
3072		(sgl_len(snents) *  8);
3073	transhdr_len += temp;
3074	transhdr_len = roundup(transhdr_len, 16);
3075
3076	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3077				reqctx->b0_len, transhdr_len, reqctx->op)) {
3078		atomic_inc(&adap->chcr_stats.fallback);
3079		chcr_aead_common_exit(req);
3080		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3081	}
3082	skb = alloc_skb(transhdr_len,  flags);
3083
3084	if (!skb) {
3085		error = -ENOMEM;
3086		goto err;
3087	}
3088
3089	chcr_req = __skb_put_zero(skb, transhdr_len);
3090
3091	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3092
3093	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3094	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3095	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3096			aeadctx->key, aeadctx->enckey_len);
3097
3098	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3099	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3100	ulptx = (struct ulptx_sgl *)(ivptr + IV);
3101	error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3102	if (error)
3103		goto dstmap_fail;
3104	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3105	chcr_add_aead_src_ent(req, ulptx);
3106
3107	atomic_inc(&adap->chcr_stats.aead_rqst);
3108	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3109		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3110		reqctx->b0_len) : 0);
3111	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3112		    transhdr_len, temp, 0);
3113	reqctx->skb = skb;
3114
3115	return skb;
3116dstmap_fail:
3117	kfree_skb(skb);
3118err:
3119	chcr_aead_common_exit(req);
3120	return ERR_PTR(error);
3121}
3122
3123static struct sk_buff *create_gcm_wr(struct aead_request *req,
3124				     unsigned short qid,
3125				     int size)
3126{
3127	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3128	struct chcr_context *ctx = a_ctx(tfm);
 
3129	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3130	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3131	struct sk_buff *skb = NULL;
3132	struct chcr_wr *chcr_req;
3133	struct cpl_rx_phys_dsgl *phys_cpl;
3134	struct ulptx_sgl *ulptx;
3135	unsigned int transhdr_len, dnents = 0, snents;
3136	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3137	unsigned int authsize = crypto_aead_authsize(tfm);
3138	int error = -EINVAL;
3139	u8 *ivptr;
3140	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3141		GFP_ATOMIC;
3142	struct adapter *adap = padap(ctx->dev);
3143	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3144
 
3145	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3146		assoclen = req->assoclen - 8;
3147
3148	reqctx->b0_len = 0;
3149	error = chcr_aead_common_init(req);
3150	if (error)
3151		return ERR_PTR(error);
3152	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3153				(reqctx->op ? -authsize : authsize),
3154				CHCR_DST_SG_SIZE, 0);
3155	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3156			       CHCR_SRC_SG_SIZE, 0);
3157	dnents += MIN_GCM_SG; // For IV
3158	dst_size = get_space_for_phys_dsgl(dnents);
3159	kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3160	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3161	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3162			SGE_MAX_WR_LEN;
3163	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3164		(sgl_len(snents) * 8);
3165	transhdr_len += temp;
3166	transhdr_len = roundup(transhdr_len, 16);
3167	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3168			    transhdr_len, reqctx->op)) {
3169
3170		atomic_inc(&adap->chcr_stats.fallback);
3171		chcr_aead_common_exit(req);
3172		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3173	}
3174	skb = alloc_skb(transhdr_len, flags);
3175	if (!skb) {
3176		error = -ENOMEM;
3177		goto err;
3178	}
3179
3180	chcr_req = __skb_put_zero(skb, transhdr_len);
3181
3182	//Offset of tag from end
3183	temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3184	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3185						rx_channel_id, 2, 1);
3186	chcr_req->sec_cpl.pldlen =
3187		htonl(req->assoclen + IV + req->cryptlen);
3188	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3189					assoclen ? 1 + IV : 0,
3190					assoclen ? IV + assoclen : 0,
3191					req->assoclen + IV + 1, 0);
3192	chcr_req->sec_cpl.cipherstop_lo_authinsert =
3193			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3194						temp, temp);
3195	chcr_req->sec_cpl.seqno_numivs =
3196			FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3197					CHCR_ENCRYPT_OP) ? 1 : 0,
3198					CHCR_SCMD_CIPHER_MODE_AES_GCM,
3199					CHCR_SCMD_AUTH_MODE_GHASH,
3200					aeadctx->hmac_ctrl, IV >> 1);
3201	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3202					0, 0, dst_size);
3203	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3204	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3205	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3206	       GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3207
3208	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3209	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3210	/* prepare a 16 byte iv */
3211	/* S   A   L  T |  IV | 0x00000001 */
3212	if (get_aead_subtype(tfm) ==
3213	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3214		memcpy(ivptr, aeadctx->salt, 4);
3215		memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3216	} else {
3217		memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3218	}
3219	put_unaligned_be32(0x01, &ivptr[12]);
3220	ulptx = (struct ulptx_sgl *)(ivptr + 16);
3221
3222	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3223	chcr_add_aead_src_ent(req, ulptx);
3224	atomic_inc(&adap->chcr_stats.aead_rqst);
3225	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3226		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3227	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3228		    transhdr_len, temp, reqctx->verify);
3229	reqctx->skb = skb;
3230	return skb;
3231
3232err:
3233	chcr_aead_common_exit(req);
3234	return ERR_PTR(error);
3235}
3236
3237
3238
3239static int chcr_aead_cra_init(struct crypto_aead *tfm)
3240{
3241	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3242	struct aead_alg *alg = crypto_aead_alg(tfm);
3243
3244	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3245					       CRYPTO_ALG_NEED_FALLBACK |
3246					       CRYPTO_ALG_ASYNC);
3247	if  (IS_ERR(aeadctx->sw_cipher))
3248		return PTR_ERR(aeadctx->sw_cipher);
3249	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3250				 sizeof(struct aead_request) +
3251				 crypto_aead_reqsize(aeadctx->sw_cipher)));
 
3252	return chcr_device_init(a_ctx(tfm));
3253}
3254
3255static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3256{
3257	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3258
3259	crypto_free_aead(aeadctx->sw_cipher);
3260}
3261
3262static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3263					unsigned int authsize)
3264{
3265	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3266
3267	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3268	aeadctx->mayverify = VERIFY_HW;
3269	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3270}
3271static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3272				    unsigned int authsize)
3273{
3274	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3275	u32 maxauth = crypto_aead_maxauthsize(tfm);
3276
3277	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3278	 * true for sha1. authsize == 12 condition should be before
3279	 * authsize == (maxauth >> 1)
3280	 */
3281	if (authsize == ICV_4) {
3282		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3283		aeadctx->mayverify = VERIFY_HW;
3284	} else if (authsize == ICV_6) {
3285		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3286		aeadctx->mayverify = VERIFY_HW;
3287	} else if (authsize == ICV_10) {
3288		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3289		aeadctx->mayverify = VERIFY_HW;
3290	} else if (authsize == ICV_12) {
3291		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3292		aeadctx->mayverify = VERIFY_HW;
3293	} else if (authsize == ICV_14) {
3294		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3295		aeadctx->mayverify = VERIFY_HW;
3296	} else if (authsize == (maxauth >> 1)) {
3297		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3298		aeadctx->mayverify = VERIFY_HW;
3299	} else if (authsize == maxauth) {
3300		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3301		aeadctx->mayverify = VERIFY_HW;
3302	} else {
3303		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3304		aeadctx->mayverify = VERIFY_SW;
3305	}
3306	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3307}
3308
3309
3310static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3311{
3312	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3313
3314	switch (authsize) {
3315	case ICV_4:
3316		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3317		aeadctx->mayverify = VERIFY_HW;
3318		break;
3319	case ICV_8:
3320		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3321		aeadctx->mayverify = VERIFY_HW;
3322		break;
3323	case ICV_12:
3324		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3325		aeadctx->mayverify = VERIFY_HW;
3326		break;
3327	case ICV_14:
3328		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3329		aeadctx->mayverify = VERIFY_HW;
3330		break;
3331	case ICV_16:
3332		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3333		aeadctx->mayverify = VERIFY_HW;
3334		break;
3335	case ICV_13:
3336	case ICV_15:
3337		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3338		aeadctx->mayverify = VERIFY_SW;
3339		break;
3340	default:
3341		return -EINVAL;
3342	}
3343	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3344}
3345
3346static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3347					  unsigned int authsize)
3348{
3349	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3350
3351	switch (authsize) {
3352	case ICV_8:
3353		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3354		aeadctx->mayverify = VERIFY_HW;
3355		break;
3356	case ICV_12:
3357		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3358		aeadctx->mayverify = VERIFY_HW;
3359		break;
3360	case ICV_16:
3361		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3362		aeadctx->mayverify = VERIFY_HW;
3363		break;
3364	default:
3365		return -EINVAL;
3366	}
3367	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3368}
3369
3370static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3371				unsigned int authsize)
3372{
3373	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3374
3375	switch (authsize) {
3376	case ICV_4:
3377		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3378		aeadctx->mayverify = VERIFY_HW;
3379		break;
3380	case ICV_6:
3381		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3382		aeadctx->mayverify = VERIFY_HW;
3383		break;
3384	case ICV_8:
3385		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3386		aeadctx->mayverify = VERIFY_HW;
3387		break;
3388	case ICV_10:
3389		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3390		aeadctx->mayverify = VERIFY_HW;
3391		break;
3392	case ICV_12:
3393		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3394		aeadctx->mayverify = VERIFY_HW;
3395		break;
3396	case ICV_14:
3397		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3398		aeadctx->mayverify = VERIFY_HW;
3399		break;
3400	case ICV_16:
3401		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3402		aeadctx->mayverify = VERIFY_HW;
3403		break;
3404	default:
3405		return -EINVAL;
3406	}
3407	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3408}
3409
3410static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3411				const u8 *key,
3412				unsigned int keylen)
3413{
3414	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3415	unsigned char ck_size, mk_size;
3416	int key_ctx_size = 0;
3417
3418	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3419	if (keylen == AES_KEYSIZE_128) {
3420		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3421		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3422	} else if (keylen == AES_KEYSIZE_192) {
3423		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3424		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3425	} else if (keylen == AES_KEYSIZE_256) {
3426		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3427		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3428	} else {
3429		aeadctx->enckey_len = 0;
3430		return	-EINVAL;
3431	}
3432	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3433						key_ctx_size >> 4);
3434	memcpy(aeadctx->key, key, keylen);
3435	aeadctx->enckey_len = keylen;
3436
3437	return 0;
3438}
3439
3440static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3441				const u8 *key,
3442				unsigned int keylen)
3443{
3444	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3445	int error;
3446
3447	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3448	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3449			      CRYPTO_TFM_REQ_MASK);
3450	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3451	if (error)
3452		return error;
3453	return chcr_ccm_common_setkey(aead, key, keylen);
3454}
3455
3456static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3457				    unsigned int keylen)
3458{
3459	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3460	int error;
3461
3462	if (keylen < 3) {
3463		aeadctx->enckey_len = 0;
3464		return	-EINVAL;
3465	}
3466	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3467	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3468			      CRYPTO_TFM_REQ_MASK);
3469	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3470	if (error)
3471		return error;
3472	keylen -= 3;
3473	memcpy(aeadctx->salt, key + keylen, 3);
3474	return chcr_ccm_common_setkey(aead, key, keylen);
3475}
3476
3477static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3478			   unsigned int keylen)
3479{
3480	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3481	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3482	unsigned int ck_size;
3483	int ret = 0, key_ctx_size = 0;
3484	struct crypto_aes_ctx aes;
3485
3486	aeadctx->enckey_len = 0;
3487	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3488	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3489			      & CRYPTO_TFM_REQ_MASK);
3490	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3491	if (ret)
3492		goto out;
3493
3494	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3495	    keylen > 3) {
3496		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3497		memcpy(aeadctx->salt, key + keylen, 4);
3498	}
3499	if (keylen == AES_KEYSIZE_128) {
3500		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3501	} else if (keylen == AES_KEYSIZE_192) {
3502		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3503	} else if (keylen == AES_KEYSIZE_256) {
3504		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3505	} else {
3506		pr_err("GCM: Invalid key length %d\n", keylen);
3507		ret = -EINVAL;
3508		goto out;
3509	}
3510
3511	memcpy(aeadctx->key, key, keylen);
3512	aeadctx->enckey_len = keylen;
3513	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3514		AEAD_H_SIZE;
3515	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3516						CHCR_KEYCTX_MAC_KEY_SIZE_128,
3517						0, 0,
3518						key_ctx_size >> 4);
3519	/* Calculate the H = CIPH(K, 0 repeated 16 times).
3520	 * It will go in key context
3521	 */
3522	ret = aes_expandkey(&aes, key, keylen);
3523	if (ret) {
3524		aeadctx->enckey_len = 0;
3525		goto out;
3526	}
3527	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3528	aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3529	memzero_explicit(&aes, sizeof(aes));
3530
3531out:
3532	return ret;
3533}
3534
3535static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3536				   unsigned int keylen)
3537{
3538	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3539	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3540	/* it contains auth and cipher key both*/
3541	struct crypto_authenc_keys keys;
3542	unsigned int bs, subtype;
3543	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3544	int err = 0, i, key_ctx_len = 0;
3545	unsigned char ck_size = 0;
3546	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3547	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3548	struct algo_param param;
3549	int align;
3550	u8 *o_ptr = NULL;
3551
3552	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3553	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3554			      & CRYPTO_TFM_REQ_MASK);
3555	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3556	if (err)
3557		goto out;
3558
3559	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3560		goto out;
3561
3562	if (get_alg_config(&param, max_authsize)) {
3563		pr_err("Unsupported digest size\n");
3564		goto out;
3565	}
3566	subtype = get_aead_subtype(authenc);
3567	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3568		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3569		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3570			goto out;
3571		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3572		- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3573		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3574	}
3575	if (keys.enckeylen == AES_KEYSIZE_128) {
3576		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3577	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3578		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3579	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3580		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3581	} else {
3582		pr_err("Unsupported cipher key\n");
3583		goto out;
3584	}
3585
3586	/* Copy only encryption key. We use authkey to generate h(ipad) and
3587	 * h(opad) so authkey is not needed again. authkeylen size have the
3588	 * size of the hash digest size.
3589	 */
3590	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3591	aeadctx->enckey_len = keys.enckeylen;
3592	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3593		subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3594
3595		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3596			    aeadctx->enckey_len << 3);
3597	}
3598	base_hash  = chcr_alloc_shash(max_authsize);
3599	if (IS_ERR(base_hash)) {
3600		pr_err("Base driver cannot be loaded\n");
3601		goto out;
3602	}
3603	{
3604		SHASH_DESC_ON_STACK(shash, base_hash);
3605
3606		shash->tfm = base_hash;
3607		bs = crypto_shash_blocksize(base_hash);
3608		align = KEYCTX_ALIGN_PAD(max_authsize);
3609		o_ptr =  actx->h_iopad + param.result_size + align;
3610
3611		if (keys.authkeylen > bs) {
3612			err = crypto_shash_digest(shash, keys.authkey,
3613						  keys.authkeylen,
3614						  o_ptr);
3615			if (err) {
3616				pr_err("Base driver cannot be loaded\n");
3617				goto out;
3618			}
3619			keys.authkeylen = max_authsize;
3620		} else
3621			memcpy(o_ptr, keys.authkey, keys.authkeylen);
3622
3623		/* Compute the ipad-digest*/
3624		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3625		memcpy(pad, o_ptr, keys.authkeylen);
3626		for (i = 0; i < bs >> 2; i++)
3627			*((unsigned int *)pad + i) ^= IPAD_DATA;
3628
3629		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3630					      max_authsize))
3631			goto out;
3632		/* Compute the opad-digest */
3633		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3634		memcpy(pad, o_ptr, keys.authkeylen);
3635		for (i = 0; i < bs >> 2; i++)
3636			*((unsigned int *)pad + i) ^= OPAD_DATA;
3637
3638		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3639			goto out;
3640
3641		/* convert the ipad and opad digest to network order */
3642		chcr_change_order(actx->h_iopad, param.result_size);
3643		chcr_change_order(o_ptr, param.result_size);
3644		key_ctx_len = sizeof(struct _key_ctx) +
3645			roundup(keys.enckeylen, 16) +
3646			(param.result_size + align) * 2;
3647		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3648						0, 1, key_ctx_len >> 4);
3649		actx->auth_mode = param.auth_mode;
3650		chcr_free_shash(base_hash);
3651
3652		memzero_explicit(&keys, sizeof(keys));
3653		return 0;
3654	}
3655out:
3656	aeadctx->enckey_len = 0;
3657	memzero_explicit(&keys, sizeof(keys));
3658	if (!IS_ERR(base_hash))
3659		chcr_free_shash(base_hash);
3660	return -EINVAL;
3661}
3662
3663static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3664					const u8 *key, unsigned int keylen)
3665{
3666	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3667	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3668	struct crypto_authenc_keys keys;
3669	int err;
3670	/* it contains auth and cipher key both*/
3671	unsigned int subtype;
3672	int key_ctx_len = 0;
3673	unsigned char ck_size = 0;
3674
3675	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3676	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3677			      & CRYPTO_TFM_REQ_MASK);
3678	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3679	if (err)
3680		goto out;
3681
3682	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3683		goto out;
3684
3685	subtype = get_aead_subtype(authenc);
3686	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3687	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3688		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3689			goto out;
3690		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3691			- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3692		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3693	}
3694	if (keys.enckeylen == AES_KEYSIZE_128) {
3695		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3696	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3697		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3698	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3699		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3700	} else {
3701		pr_err("Unsupported cipher key %d\n", keys.enckeylen);
3702		goto out;
3703	}
3704	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3705	aeadctx->enckey_len = keys.enckeylen;
3706	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3707	    subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3708		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3709				aeadctx->enckey_len << 3);
3710	}
3711	key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3712
3713	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3714						0, key_ctx_len >> 4);
3715	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3716	memzero_explicit(&keys, sizeof(keys));
3717	return 0;
3718out:
3719	aeadctx->enckey_len = 0;
3720	memzero_explicit(&keys, sizeof(keys));
3721	return -EINVAL;
3722}
3723
3724static int chcr_aead_op(struct aead_request *req,
3725			int size,
3726			create_wr_t create_wr_fn)
3727{
3728	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3729	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3730	struct chcr_context *ctx = a_ctx(tfm);
3731	struct uld_ctx *u_ctx = ULD_CTX(ctx);
3732	struct sk_buff *skb;
3733	struct chcr_dev *cdev;
3734
3735	cdev = a_ctx(tfm)->dev;
3736	if (!cdev) {
3737		pr_err("%s : No crypto device.\n", __func__);
3738		return -ENXIO;
3739	}
3740
3741	if (chcr_inc_wrcount(cdev)) {
3742	/* Detach state for CHCR means lldi or padap is freed.
3743	 * We cannot increment fallback here.
3744	 */
3745		return chcr_aead_fallback(req, reqctx->op);
3746	}
3747
3748	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3749					reqctx->txqidx) &&
3750		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3751			chcr_dec_wrcount(cdev);
3752			return -ENOSPC;
3753	}
3754
3755	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3756	    crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3757		pr_err("RFC4106: Invalid value of assoclen %d\n",
3758		       req->assoclen);
3759		return -EINVAL;
3760	}
3761
3762	/* Form a WR from req */
3763	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3764
3765	if (IS_ERR_OR_NULL(skb)) {
3766		chcr_dec_wrcount(cdev);
3767		return PTR_ERR_OR_ZERO(skb);
3768	}
3769
3770	skb->dev = u_ctx->lldi.ports[0];
3771	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3772	chcr_send_wr(skb);
3773	return -EINPROGRESS;
3774}
3775
3776static int chcr_aead_encrypt(struct aead_request *req)
3777{
3778	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3779	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3780	struct chcr_context *ctx = a_ctx(tfm);
3781	unsigned int cpu;
3782
3783	cpu = get_cpu();
3784	reqctx->txqidx = cpu % ctx->ntxq;
3785	reqctx->rxqidx = cpu % ctx->nrxq;
3786	put_cpu();
3787
3788	reqctx->verify = VERIFY_HW;
3789	reqctx->op = CHCR_ENCRYPT_OP;
3790
3791	switch (get_aead_subtype(tfm)) {
3792	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3793	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3794	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3795	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3796		return chcr_aead_op(req, 0, create_authenc_wr);
3797	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3798	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3799		return chcr_aead_op(req, 0, create_aead_ccm_wr);
3800	default:
3801		return chcr_aead_op(req, 0, create_gcm_wr);
3802	}
3803}
3804
3805static int chcr_aead_decrypt(struct aead_request *req)
3806{
3807	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3808	struct chcr_context *ctx = a_ctx(tfm);
3809	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3810	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3811	int size;
3812	unsigned int cpu;
3813
3814	cpu = get_cpu();
3815	reqctx->txqidx = cpu % ctx->ntxq;
3816	reqctx->rxqidx = cpu % ctx->nrxq;
3817	put_cpu();
3818
3819	if (aeadctx->mayverify == VERIFY_SW) {
3820		size = crypto_aead_maxauthsize(tfm);
3821		reqctx->verify = VERIFY_SW;
3822	} else {
3823		size = 0;
3824		reqctx->verify = VERIFY_HW;
3825	}
3826	reqctx->op = CHCR_DECRYPT_OP;
3827	switch (get_aead_subtype(tfm)) {
3828	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3829	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3830	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3831	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3832		return chcr_aead_op(req, size, create_authenc_wr);
3833	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3834	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3835		return chcr_aead_op(req, size, create_aead_ccm_wr);
3836	default:
3837		return chcr_aead_op(req, size, create_gcm_wr);
3838	}
3839}
3840
3841static struct chcr_alg_template driver_algs[] = {
3842	/* AES-CBC */
3843	{
3844		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3845		.is_registered = 0,
3846		.alg.skcipher = {
3847			.base.cra_name		= "cbc(aes)",
3848			.base.cra_driver_name	= "cbc-aes-chcr",
3849			.base.cra_blocksize	= AES_BLOCK_SIZE,
3850
3851			.init			= chcr_init_tfm,
3852			.exit			= chcr_exit_tfm,
3853			.min_keysize		= AES_MIN_KEY_SIZE,
3854			.max_keysize		= AES_MAX_KEY_SIZE,
3855			.ivsize			= AES_BLOCK_SIZE,
3856			.setkey			= chcr_aes_cbc_setkey,
3857			.encrypt		= chcr_aes_encrypt,
3858			.decrypt		= chcr_aes_decrypt,
3859			}
3860	},
3861	{
3862		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3863		.is_registered = 0,
3864		.alg.skcipher = {
3865			.base.cra_name		= "xts(aes)",
3866			.base.cra_driver_name	= "xts-aes-chcr",
3867			.base.cra_blocksize	= AES_BLOCK_SIZE,
3868
3869			.init			= chcr_init_tfm,
3870			.exit			= chcr_exit_tfm,
3871			.min_keysize		= 2 * AES_MIN_KEY_SIZE,
3872			.max_keysize		= 2 * AES_MAX_KEY_SIZE,
3873			.ivsize			= AES_BLOCK_SIZE,
3874			.setkey			= chcr_aes_xts_setkey,
3875			.encrypt		= chcr_aes_encrypt,
3876			.decrypt		= chcr_aes_decrypt,
3877			}
3878	},
3879	{
3880		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3881		.is_registered = 0,
3882		.alg.skcipher = {
3883			.base.cra_name		= "ctr(aes)",
3884			.base.cra_driver_name	= "ctr-aes-chcr",
3885			.base.cra_blocksize	= 1,
3886
3887			.init			= chcr_init_tfm,
3888			.exit			= chcr_exit_tfm,
3889			.min_keysize		= AES_MIN_KEY_SIZE,
3890			.max_keysize		= AES_MAX_KEY_SIZE,
3891			.ivsize			= AES_BLOCK_SIZE,
3892			.setkey			= chcr_aes_ctr_setkey,
3893			.encrypt		= chcr_aes_encrypt,
3894			.decrypt		= chcr_aes_decrypt,
3895		}
3896	},
3897	{
3898		.type = CRYPTO_ALG_TYPE_SKCIPHER |
3899			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3900		.is_registered = 0,
3901		.alg.skcipher = {
3902			.base.cra_name		= "rfc3686(ctr(aes))",
3903			.base.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3904			.base.cra_blocksize	= 1,
3905
3906			.init			= chcr_rfc3686_init,
3907			.exit			= chcr_exit_tfm,
3908			.min_keysize		= AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3909			.max_keysize		= AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3910			.ivsize			= CTR_RFC3686_IV_SIZE,
3911			.setkey			= chcr_aes_rfc3686_setkey,
3912			.encrypt		= chcr_aes_encrypt,
3913			.decrypt		= chcr_aes_decrypt,
3914		}
3915	},
3916	/* SHA */
3917	{
3918		.type = CRYPTO_ALG_TYPE_AHASH,
3919		.is_registered = 0,
3920		.alg.hash = {
3921			.halg.digestsize = SHA1_DIGEST_SIZE,
3922			.halg.base = {
3923				.cra_name = "sha1",
3924				.cra_driver_name = "sha1-chcr",
3925				.cra_blocksize = SHA1_BLOCK_SIZE,
3926			}
3927		}
3928	},
3929	{
3930		.type = CRYPTO_ALG_TYPE_AHASH,
3931		.is_registered = 0,
3932		.alg.hash = {
3933			.halg.digestsize = SHA256_DIGEST_SIZE,
3934			.halg.base = {
3935				.cra_name = "sha256",
3936				.cra_driver_name = "sha256-chcr",
3937				.cra_blocksize = SHA256_BLOCK_SIZE,
3938			}
3939		}
3940	},
3941	{
3942		.type = CRYPTO_ALG_TYPE_AHASH,
3943		.is_registered = 0,
3944		.alg.hash = {
3945			.halg.digestsize = SHA224_DIGEST_SIZE,
3946			.halg.base = {
3947				.cra_name = "sha224",
3948				.cra_driver_name = "sha224-chcr",
3949				.cra_blocksize = SHA224_BLOCK_SIZE,
3950			}
3951		}
3952	},
3953	{
3954		.type = CRYPTO_ALG_TYPE_AHASH,
3955		.is_registered = 0,
3956		.alg.hash = {
3957			.halg.digestsize = SHA384_DIGEST_SIZE,
3958			.halg.base = {
3959				.cra_name = "sha384",
3960				.cra_driver_name = "sha384-chcr",
3961				.cra_blocksize = SHA384_BLOCK_SIZE,
3962			}
3963		}
3964	},
3965	{
3966		.type = CRYPTO_ALG_TYPE_AHASH,
3967		.is_registered = 0,
3968		.alg.hash = {
3969			.halg.digestsize = SHA512_DIGEST_SIZE,
3970			.halg.base = {
3971				.cra_name = "sha512",
3972				.cra_driver_name = "sha512-chcr",
3973				.cra_blocksize = SHA512_BLOCK_SIZE,
3974			}
3975		}
3976	},
3977	/* HMAC */
3978	{
3979		.type = CRYPTO_ALG_TYPE_HMAC,
3980		.is_registered = 0,
3981		.alg.hash = {
3982			.halg.digestsize = SHA1_DIGEST_SIZE,
3983			.halg.base = {
3984				.cra_name = "hmac(sha1)",
3985				.cra_driver_name = "hmac-sha1-chcr",
3986				.cra_blocksize = SHA1_BLOCK_SIZE,
3987			}
3988		}
3989	},
3990	{
3991		.type = CRYPTO_ALG_TYPE_HMAC,
3992		.is_registered = 0,
3993		.alg.hash = {
3994			.halg.digestsize = SHA224_DIGEST_SIZE,
3995			.halg.base = {
3996				.cra_name = "hmac(sha224)",
3997				.cra_driver_name = "hmac-sha224-chcr",
3998				.cra_blocksize = SHA224_BLOCK_SIZE,
3999			}
4000		}
4001	},
4002	{
4003		.type = CRYPTO_ALG_TYPE_HMAC,
4004		.is_registered = 0,
4005		.alg.hash = {
4006			.halg.digestsize = SHA256_DIGEST_SIZE,
4007			.halg.base = {
4008				.cra_name = "hmac(sha256)",
4009				.cra_driver_name = "hmac-sha256-chcr",
4010				.cra_blocksize = SHA256_BLOCK_SIZE,
4011			}
4012		}
4013	},
4014	{
4015		.type = CRYPTO_ALG_TYPE_HMAC,
4016		.is_registered = 0,
4017		.alg.hash = {
4018			.halg.digestsize = SHA384_DIGEST_SIZE,
4019			.halg.base = {
4020				.cra_name = "hmac(sha384)",
4021				.cra_driver_name = "hmac-sha384-chcr",
4022				.cra_blocksize = SHA384_BLOCK_SIZE,
4023			}
4024		}
4025	},
4026	{
4027		.type = CRYPTO_ALG_TYPE_HMAC,
4028		.is_registered = 0,
4029		.alg.hash = {
4030			.halg.digestsize = SHA512_DIGEST_SIZE,
4031			.halg.base = {
4032				.cra_name = "hmac(sha512)",
4033				.cra_driver_name = "hmac-sha512-chcr",
4034				.cra_blocksize = SHA512_BLOCK_SIZE,
4035			}
4036		}
4037	},
4038	/* Add AEAD Algorithms */
4039	{
4040		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
4041		.is_registered = 0,
4042		.alg.aead = {
4043			.base = {
4044				.cra_name = "gcm(aes)",
4045				.cra_driver_name = "gcm-aes-chcr",
4046				.cra_blocksize	= 1,
4047				.cra_priority = CHCR_AEAD_PRIORITY,
4048				.cra_ctxsize =	sizeof(struct chcr_context) +
4049						sizeof(struct chcr_aead_ctx) +
4050						sizeof(struct chcr_gcm_ctx),
4051			},
4052			.ivsize = GCM_AES_IV_SIZE,
4053			.maxauthsize = GHASH_DIGEST_SIZE,
4054			.setkey = chcr_gcm_setkey,
4055			.setauthsize = chcr_gcm_setauthsize,
4056		}
4057	},
4058	{
4059		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4060		.is_registered = 0,
4061		.alg.aead = {
4062			.base = {
4063				.cra_name = "rfc4106(gcm(aes))",
4064				.cra_driver_name = "rfc4106-gcm-aes-chcr",
4065				.cra_blocksize	 = 1,
4066				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4067				.cra_ctxsize =	sizeof(struct chcr_context) +
4068						sizeof(struct chcr_aead_ctx) +
4069						sizeof(struct chcr_gcm_ctx),
4070
4071			},
4072			.ivsize = GCM_RFC4106_IV_SIZE,
4073			.maxauthsize	= GHASH_DIGEST_SIZE,
4074			.setkey = chcr_gcm_setkey,
4075			.setauthsize	= chcr_4106_4309_setauthsize,
4076		}
4077	},
4078	{
4079		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4080		.is_registered = 0,
4081		.alg.aead = {
4082			.base = {
4083				.cra_name = "ccm(aes)",
4084				.cra_driver_name = "ccm-aes-chcr",
4085				.cra_blocksize	 = 1,
4086				.cra_priority = CHCR_AEAD_PRIORITY,
4087				.cra_ctxsize =	sizeof(struct chcr_context) +
4088						sizeof(struct chcr_aead_ctx),
4089
4090			},
4091			.ivsize = AES_BLOCK_SIZE,
4092			.maxauthsize	= GHASH_DIGEST_SIZE,
4093			.setkey = chcr_aead_ccm_setkey,
4094			.setauthsize	= chcr_ccm_setauthsize,
4095		}
4096	},
4097	{
4098		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4099		.is_registered = 0,
4100		.alg.aead = {
4101			.base = {
4102				.cra_name = "rfc4309(ccm(aes))",
4103				.cra_driver_name = "rfc4309-ccm-aes-chcr",
4104				.cra_blocksize	 = 1,
4105				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4106				.cra_ctxsize =	sizeof(struct chcr_context) +
4107						sizeof(struct chcr_aead_ctx),
4108
4109			},
4110			.ivsize = 8,
4111			.maxauthsize	= GHASH_DIGEST_SIZE,
4112			.setkey = chcr_aead_rfc4309_setkey,
4113			.setauthsize = chcr_4106_4309_setauthsize,
4114		}
4115	},
4116	{
4117		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4118		.is_registered = 0,
4119		.alg.aead = {
4120			.base = {
4121				.cra_name = "authenc(hmac(sha1),cbc(aes))",
4122				.cra_driver_name =
4123					"authenc-hmac-sha1-cbc-aes-chcr",
4124				.cra_blocksize	 = AES_BLOCK_SIZE,
4125				.cra_priority = CHCR_AEAD_PRIORITY,
4126				.cra_ctxsize =	sizeof(struct chcr_context) +
4127						sizeof(struct chcr_aead_ctx) +
4128						sizeof(struct chcr_authenc_ctx),
4129
4130			},
4131			.ivsize = AES_BLOCK_SIZE,
4132			.maxauthsize = SHA1_DIGEST_SIZE,
4133			.setkey = chcr_authenc_setkey,
4134			.setauthsize = chcr_authenc_setauthsize,
4135		}
4136	},
4137	{
4138		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4139		.is_registered = 0,
4140		.alg.aead = {
4141			.base = {
4142
4143				.cra_name = "authenc(hmac(sha256),cbc(aes))",
4144				.cra_driver_name =
4145					"authenc-hmac-sha256-cbc-aes-chcr",
4146				.cra_blocksize	 = AES_BLOCK_SIZE,
4147				.cra_priority = CHCR_AEAD_PRIORITY,
4148				.cra_ctxsize =	sizeof(struct chcr_context) +
4149						sizeof(struct chcr_aead_ctx) +
4150						sizeof(struct chcr_authenc_ctx),
4151
4152			},
4153			.ivsize = AES_BLOCK_SIZE,
4154			.maxauthsize	= SHA256_DIGEST_SIZE,
4155			.setkey = chcr_authenc_setkey,
4156			.setauthsize = chcr_authenc_setauthsize,
4157		}
4158	},
4159	{
4160		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4161		.is_registered = 0,
4162		.alg.aead = {
4163			.base = {
4164				.cra_name = "authenc(hmac(sha224),cbc(aes))",
4165				.cra_driver_name =
4166					"authenc-hmac-sha224-cbc-aes-chcr",
4167				.cra_blocksize	 = AES_BLOCK_SIZE,
4168				.cra_priority = CHCR_AEAD_PRIORITY,
4169				.cra_ctxsize =	sizeof(struct chcr_context) +
4170						sizeof(struct chcr_aead_ctx) +
4171						sizeof(struct chcr_authenc_ctx),
4172			},
4173			.ivsize = AES_BLOCK_SIZE,
4174			.maxauthsize = SHA224_DIGEST_SIZE,
4175			.setkey = chcr_authenc_setkey,
4176			.setauthsize = chcr_authenc_setauthsize,
4177		}
4178	},
4179	{
4180		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4181		.is_registered = 0,
4182		.alg.aead = {
4183			.base = {
4184				.cra_name = "authenc(hmac(sha384),cbc(aes))",
4185				.cra_driver_name =
4186					"authenc-hmac-sha384-cbc-aes-chcr",
4187				.cra_blocksize	 = AES_BLOCK_SIZE,
4188				.cra_priority = CHCR_AEAD_PRIORITY,
4189				.cra_ctxsize =	sizeof(struct chcr_context) +
4190						sizeof(struct chcr_aead_ctx) +
4191						sizeof(struct chcr_authenc_ctx),
4192
4193			},
4194			.ivsize = AES_BLOCK_SIZE,
4195			.maxauthsize = SHA384_DIGEST_SIZE,
4196			.setkey = chcr_authenc_setkey,
4197			.setauthsize = chcr_authenc_setauthsize,
4198		}
4199	},
4200	{
4201		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4202		.is_registered = 0,
4203		.alg.aead = {
4204			.base = {
4205				.cra_name = "authenc(hmac(sha512),cbc(aes))",
4206				.cra_driver_name =
4207					"authenc-hmac-sha512-cbc-aes-chcr",
4208				.cra_blocksize	 = AES_BLOCK_SIZE,
4209				.cra_priority = CHCR_AEAD_PRIORITY,
4210				.cra_ctxsize =	sizeof(struct chcr_context) +
4211						sizeof(struct chcr_aead_ctx) +
4212						sizeof(struct chcr_authenc_ctx),
4213
4214			},
4215			.ivsize = AES_BLOCK_SIZE,
4216			.maxauthsize = SHA512_DIGEST_SIZE,
4217			.setkey = chcr_authenc_setkey,
4218			.setauthsize = chcr_authenc_setauthsize,
4219		}
4220	},
4221	{
4222		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4223		.is_registered = 0,
4224		.alg.aead = {
4225			.base = {
4226				.cra_name = "authenc(digest_null,cbc(aes))",
4227				.cra_driver_name =
4228					"authenc-digest_null-cbc-aes-chcr",
4229				.cra_blocksize	 = AES_BLOCK_SIZE,
4230				.cra_priority = CHCR_AEAD_PRIORITY,
4231				.cra_ctxsize =	sizeof(struct chcr_context) +
4232						sizeof(struct chcr_aead_ctx) +
4233						sizeof(struct chcr_authenc_ctx),
4234
4235			},
4236			.ivsize  = AES_BLOCK_SIZE,
4237			.maxauthsize = 0,
4238			.setkey  = chcr_aead_digest_null_setkey,
4239			.setauthsize = chcr_authenc_null_setauthsize,
4240		}
4241	},
4242	{
4243		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4244		.is_registered = 0,
4245		.alg.aead = {
4246			.base = {
4247				.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4248				.cra_driver_name =
4249				"authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4250				.cra_blocksize	 = 1,
4251				.cra_priority = CHCR_AEAD_PRIORITY,
4252				.cra_ctxsize =	sizeof(struct chcr_context) +
4253						sizeof(struct chcr_aead_ctx) +
4254						sizeof(struct chcr_authenc_ctx),
4255
4256			},
4257			.ivsize = CTR_RFC3686_IV_SIZE,
4258			.maxauthsize = SHA1_DIGEST_SIZE,
4259			.setkey = chcr_authenc_setkey,
4260			.setauthsize = chcr_authenc_setauthsize,
4261		}
4262	},
4263	{
4264		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4265		.is_registered = 0,
4266		.alg.aead = {
4267			.base = {
4268
4269				.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4270				.cra_driver_name =
4271				"authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4272				.cra_blocksize	 = 1,
4273				.cra_priority = CHCR_AEAD_PRIORITY,
4274				.cra_ctxsize =	sizeof(struct chcr_context) +
4275						sizeof(struct chcr_aead_ctx) +
4276						sizeof(struct chcr_authenc_ctx),
4277
4278			},
4279			.ivsize = CTR_RFC3686_IV_SIZE,
4280			.maxauthsize	= SHA256_DIGEST_SIZE,
4281			.setkey = chcr_authenc_setkey,
4282			.setauthsize = chcr_authenc_setauthsize,
4283		}
4284	},
4285	{
4286		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4287		.is_registered = 0,
4288		.alg.aead = {
4289			.base = {
4290				.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4291				.cra_driver_name =
4292				"authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4293				.cra_blocksize	 = 1,
4294				.cra_priority = CHCR_AEAD_PRIORITY,
4295				.cra_ctxsize =	sizeof(struct chcr_context) +
4296						sizeof(struct chcr_aead_ctx) +
4297						sizeof(struct chcr_authenc_ctx),
4298			},
4299			.ivsize = CTR_RFC3686_IV_SIZE,
4300			.maxauthsize = SHA224_DIGEST_SIZE,
4301			.setkey = chcr_authenc_setkey,
4302			.setauthsize = chcr_authenc_setauthsize,
4303		}
4304	},
4305	{
4306		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4307		.is_registered = 0,
4308		.alg.aead = {
4309			.base = {
4310				.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4311				.cra_driver_name =
4312				"authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4313				.cra_blocksize	 = 1,
4314				.cra_priority = CHCR_AEAD_PRIORITY,
4315				.cra_ctxsize =	sizeof(struct chcr_context) +
4316						sizeof(struct chcr_aead_ctx) +
4317						sizeof(struct chcr_authenc_ctx),
4318
4319			},
4320			.ivsize = CTR_RFC3686_IV_SIZE,
4321			.maxauthsize = SHA384_DIGEST_SIZE,
4322			.setkey = chcr_authenc_setkey,
4323			.setauthsize = chcr_authenc_setauthsize,
4324		}
4325	},
4326	{
4327		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4328		.is_registered = 0,
4329		.alg.aead = {
4330			.base = {
4331				.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4332				.cra_driver_name =
4333				"authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4334				.cra_blocksize	 = 1,
4335				.cra_priority = CHCR_AEAD_PRIORITY,
4336				.cra_ctxsize =	sizeof(struct chcr_context) +
4337						sizeof(struct chcr_aead_ctx) +
4338						sizeof(struct chcr_authenc_ctx),
4339
4340			},
4341			.ivsize = CTR_RFC3686_IV_SIZE,
4342			.maxauthsize = SHA512_DIGEST_SIZE,
4343			.setkey = chcr_authenc_setkey,
4344			.setauthsize = chcr_authenc_setauthsize,
4345		}
4346	},
4347	{
4348		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4349		.is_registered = 0,
4350		.alg.aead = {
4351			.base = {
4352				.cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4353				.cra_driver_name =
4354				"authenc-digest_null-rfc3686-ctr-aes-chcr",
4355				.cra_blocksize	 = 1,
4356				.cra_priority = CHCR_AEAD_PRIORITY,
4357				.cra_ctxsize =	sizeof(struct chcr_context) +
4358						sizeof(struct chcr_aead_ctx) +
4359						sizeof(struct chcr_authenc_ctx),
4360
4361			},
4362			.ivsize  = CTR_RFC3686_IV_SIZE,
4363			.maxauthsize = 0,
4364			.setkey  = chcr_aead_digest_null_setkey,
4365			.setauthsize = chcr_authenc_null_setauthsize,
4366		}
4367	},
4368};
4369
4370/*
4371 *	chcr_unregister_alg - Deregister crypto algorithms with
4372 *	kernel framework.
4373 */
4374static int chcr_unregister_alg(void)
4375{
4376	int i;
4377
4378	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4379		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4380		case CRYPTO_ALG_TYPE_SKCIPHER:
4381			if (driver_algs[i].is_registered && refcount_read(
4382			    &driver_algs[i].alg.skcipher.base.cra_refcnt)
4383			    == 1) {
4384				crypto_unregister_skcipher(
4385						&driver_algs[i].alg.skcipher);
4386				driver_algs[i].is_registered = 0;
4387			}
4388			break;
4389		case CRYPTO_ALG_TYPE_AEAD:
4390			if (driver_algs[i].is_registered && refcount_read(
4391			    &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
4392				crypto_unregister_aead(
4393						&driver_algs[i].alg.aead);
4394				driver_algs[i].is_registered = 0;
4395			}
4396			break;
4397		case CRYPTO_ALG_TYPE_AHASH:
4398			if (driver_algs[i].is_registered && refcount_read(
4399			    &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4400			    == 1) {
4401				crypto_unregister_ahash(
4402						&driver_algs[i].alg.hash);
4403				driver_algs[i].is_registered = 0;
4404			}
4405			break;
4406		}
4407	}
4408	return 0;
4409}
4410
4411#define SZ_AHASH_CTX sizeof(struct chcr_context)
4412#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4413#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4414
4415/*
4416 *	chcr_register_alg - Register crypto algorithms with kernel framework.
4417 */
4418static int chcr_register_alg(void)
4419{
4420	struct crypto_alg ai;
4421	struct ahash_alg *a_hash;
4422	int err = 0, i;
4423	char *name = NULL;
4424
4425	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4426		if (driver_algs[i].is_registered)
4427			continue;
4428		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4429		case CRYPTO_ALG_TYPE_SKCIPHER:
4430			driver_algs[i].alg.skcipher.base.cra_priority =
4431				CHCR_CRA_PRIORITY;
4432			driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4433			driver_algs[i].alg.skcipher.base.cra_flags =
4434				CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4435				CRYPTO_ALG_ALLOCATES_MEMORY |
4436				CRYPTO_ALG_NEED_FALLBACK;
4437			driver_algs[i].alg.skcipher.base.cra_ctxsize =
4438				sizeof(struct chcr_context) +
4439				sizeof(struct ablk_ctx);
4440			driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4441
4442			err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4443			name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4444			break;
4445		case CRYPTO_ALG_TYPE_AEAD:
4446			driver_algs[i].alg.aead.base.cra_flags =
4447				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4448				CRYPTO_ALG_ALLOCATES_MEMORY;
4449			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4450			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4451			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4452			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4453			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4454			err = crypto_register_aead(&driver_algs[i].alg.aead);
4455			name = driver_algs[i].alg.aead.base.cra_driver_name;
4456			break;
4457		case CRYPTO_ALG_TYPE_AHASH:
4458			a_hash = &driver_algs[i].alg.hash;
4459			a_hash->update = chcr_ahash_update;
4460			a_hash->final = chcr_ahash_final;
4461			a_hash->finup = chcr_ahash_finup;
4462			a_hash->digest = chcr_ahash_digest;
4463			a_hash->export = chcr_ahash_export;
4464			a_hash->import = chcr_ahash_import;
4465			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4466			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4467			a_hash->halg.base.cra_module = THIS_MODULE;
4468			a_hash->halg.base.cra_flags =
4469				CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4470			a_hash->halg.base.cra_alignmask = 0;
4471			a_hash->halg.base.cra_exit = NULL;
4472
4473			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4474				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4475				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4476				a_hash->init = chcr_hmac_init;
4477				a_hash->setkey = chcr_ahash_setkey;
4478				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4479			} else {
4480				a_hash->init = chcr_sha_init;
4481				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4482				a_hash->halg.base.cra_init = chcr_sha_cra_init;
4483			}
4484			err = crypto_register_ahash(&driver_algs[i].alg.hash);
4485			ai = driver_algs[i].alg.hash.halg.base;
4486			name = ai.cra_driver_name;
4487			break;
4488		}
4489		if (err) {
4490			pr_err("%s : Algorithm registration failed\n", name);
4491			goto register_err;
4492		} else {
4493			driver_algs[i].is_registered = 1;
4494		}
4495	}
4496	return 0;
4497
4498register_err:
4499	chcr_unregister_alg();
4500	return err;
4501}
4502
4503/*
4504 *	start_crypto - Register the crypto algorithms.
4505 *	This should called once when the first device comesup. After this
4506 *	kernel will start calling driver APIs for crypto operations.
4507 */
4508int start_crypto(void)
4509{
4510	return chcr_register_alg();
4511}
4512
4513/*
4514 *	stop_crypto - Deregister all the crypto algorithms with kernel.
4515 *	This should be called once when the last device goes down. After this
4516 *	kernel will not call the driver API for crypto operations.
4517 */
4518int stop_crypto(void)
4519{
4520	chcr_unregister_alg();
4521	return 0;
4522}
v6.2
   1/*
   2 * This file is part of the Chelsio T6 Crypto driver for Linux.
   3 *
   4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 *
  34 * Written and Maintained by:
  35 *	Manoj Malviya (manojmalviya@chelsio.com)
  36 *	Atul Gupta (atul.gupta@chelsio.com)
  37 *	Jitendra Lulla (jlulla@chelsio.com)
  38 *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  39 *	Harsh Jain (harsh@chelsio.com)
  40 */
  41
  42#define pr_fmt(fmt) "chcr:" fmt
  43
  44#include <linux/kernel.h>
  45#include <linux/module.h>
  46#include <linux/crypto.h>
  47#include <linux/skbuff.h>
  48#include <linux/rtnetlink.h>
  49#include <linux/highmem.h>
  50#include <linux/scatterlist.h>
  51
  52#include <crypto/aes.h>
  53#include <crypto/algapi.h>
  54#include <crypto/hash.h>
  55#include <crypto/gcm.h>
  56#include <crypto/sha1.h>
  57#include <crypto/sha2.h>
  58#include <crypto/authenc.h>
  59#include <crypto/ctr.h>
  60#include <crypto/gf128mul.h>
  61#include <crypto/internal/aead.h>
  62#include <crypto/null.h>
  63#include <crypto/internal/skcipher.h>
  64#include <crypto/aead.h>
  65#include <crypto/scatterwalk.h>
  66#include <crypto/internal/hash.h>
  67
  68#include "t4fw_api.h"
  69#include "t4_msg.h"
  70#include "chcr_core.h"
  71#include "chcr_algo.h"
  72#include "chcr_crypto.h"
  73
  74#define IV AES_BLOCK_SIZE
  75
  76static unsigned int sgl_ent_len[] = {
  77	0, 0, 16, 24, 40, 48, 64, 72, 88,
  78	96, 112, 120, 136, 144, 160, 168, 184,
  79	192, 208, 216, 232, 240, 256, 264, 280,
  80	288, 304, 312, 328, 336, 352, 360, 376
  81};
  82
  83static unsigned int dsgl_ent_len[] = {
  84	0, 32, 32, 48, 48, 64, 64, 80, 80,
  85	112, 112, 128, 128, 144, 144, 160, 160,
  86	192, 192, 208, 208, 224, 224, 240, 240,
  87	272, 272, 288, 288, 304, 304, 320, 320
  88};
  89
  90static u32 round_constant[11] = {
  91	0x01000000, 0x02000000, 0x04000000, 0x08000000,
  92	0x10000000, 0x20000000, 0x40000000, 0x80000000,
  93	0x1B000000, 0x36000000, 0x6C000000
  94};
  95
  96static int chcr_handle_cipher_resp(struct skcipher_request *req,
  97				   unsigned char *input, int err);
  98
  99static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
 100{
 101	return &ctx->crypto_ctx->aeadctx;
 102}
 103
 104static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
 105{
 106	return &ctx->crypto_ctx->ablkctx;
 107}
 108
 109static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
 110{
 111	return &ctx->crypto_ctx->hmacctx;
 112}
 113
 114static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
 115{
 116	return gctx->ctx->gcm;
 117}
 118
 119static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
 120{
 121	return gctx->ctx->authenc;
 122}
 123
 124static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
 125{
 126	return container_of(ctx->dev, struct uld_ctx, dev);
 127}
 128
 
 
 
 
 
 129static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
 130{
 131	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
 132}
 133
 134static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
 135			 unsigned int entlen,
 136			 unsigned int skip)
 137{
 138	int nents = 0;
 139	unsigned int less;
 140	unsigned int skip_len = 0;
 141
 142	while (sg && skip) {
 143		if (sg_dma_len(sg) <= skip) {
 144			skip -= sg_dma_len(sg);
 145			skip_len = 0;
 146			sg = sg_next(sg);
 147		} else {
 148			skip_len = skip;
 149			skip = 0;
 150		}
 151	}
 152
 153	while (sg && reqlen) {
 154		less = min(reqlen, sg_dma_len(sg) - skip_len);
 155		nents += DIV_ROUND_UP(less, entlen);
 156		reqlen -= less;
 157		skip_len = 0;
 158		sg = sg_next(sg);
 159	}
 160	return nents;
 161}
 162
 163static inline int get_aead_subtype(struct crypto_aead *aead)
 164{
 165	struct aead_alg *alg = crypto_aead_alg(aead);
 166	struct chcr_alg_template *chcr_crypto_alg =
 167		container_of(alg, struct chcr_alg_template, alg.aead);
 168	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 169}
 170
 171void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
 172{
 173	u8 temp[SHA512_DIGEST_SIZE];
 174	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 175	int authsize = crypto_aead_authsize(tfm);
 176	struct cpl_fw6_pld *fw6_pld;
 177	int cmp = 0;
 178
 179	fw6_pld = (struct cpl_fw6_pld *)input;
 180	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
 181	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
 182		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
 183	} else {
 184
 185		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
 186				authsize, req->assoclen +
 187				req->cryptlen - authsize);
 188		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
 189	}
 190	if (cmp)
 191		*err = -EBADMSG;
 192	else
 193		*err = 0;
 194}
 195
 196static int chcr_inc_wrcount(struct chcr_dev *dev)
 197{
 198	if (dev->state == CHCR_DETACH)
 199		return 1;
 200	atomic_inc(&dev->inflight);
 201	return 0;
 202}
 203
 204static inline void chcr_dec_wrcount(struct chcr_dev *dev)
 205{
 206	atomic_dec(&dev->inflight);
 207}
 208
 209static inline int chcr_handle_aead_resp(struct aead_request *req,
 210					 unsigned char *input,
 211					 int err)
 212{
 213	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
 214	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 215	struct chcr_dev *dev = a_ctx(tfm)->dev;
 216
 217	chcr_aead_common_exit(req);
 218	if (reqctx->verify == VERIFY_SW) {
 219		chcr_verify_tag(req, input, &err);
 220		reqctx->verify = VERIFY_HW;
 221	}
 222	chcr_dec_wrcount(dev);
 223	req->base.complete(&req->base, err);
 224
 225	return err;
 226}
 227
 228static void get_aes_decrypt_key(unsigned char *dec_key,
 229				       const unsigned char *key,
 230				       unsigned int keylength)
 231{
 232	u32 temp;
 233	u32 w_ring[MAX_NK];
 234	int i, j, k;
 235	u8  nr, nk;
 236
 237	switch (keylength) {
 238	case AES_KEYLENGTH_128BIT:
 239		nk = KEYLENGTH_4BYTES;
 240		nr = NUMBER_OF_ROUNDS_10;
 241		break;
 242	case AES_KEYLENGTH_192BIT:
 243		nk = KEYLENGTH_6BYTES;
 244		nr = NUMBER_OF_ROUNDS_12;
 245		break;
 246	case AES_KEYLENGTH_256BIT:
 247		nk = KEYLENGTH_8BYTES;
 248		nr = NUMBER_OF_ROUNDS_14;
 249		break;
 250	default:
 251		return;
 252	}
 253	for (i = 0; i < nk; i++)
 254		w_ring[i] = get_unaligned_be32(&key[i * 4]);
 255
 256	i = 0;
 257	temp = w_ring[nk - 1];
 258	while (i + nk < (nr + 1) * 4) {
 259		if (!(i % nk)) {
 260			/* RotWord(temp) */
 261			temp = (temp << 8) | (temp >> 24);
 262			temp = aes_ks_subword(temp);
 263			temp ^= round_constant[i / nk];
 264		} else if (nk == 8 && (i % 4 == 0)) {
 265			temp = aes_ks_subword(temp);
 266		}
 267		w_ring[i % nk] ^= temp;
 268		temp = w_ring[i % nk];
 269		i++;
 270	}
 271	i--;
 272	for (k = 0, j = i % nk; k < nk; k++) {
 273		put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
 274		j--;
 275		if (j < 0)
 276			j += nk;
 277	}
 278}
 279
 280static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
 281{
 282	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
 283
 284	switch (ds) {
 285	case SHA1_DIGEST_SIZE:
 286		base_hash = crypto_alloc_shash("sha1", 0, 0);
 287		break;
 288	case SHA224_DIGEST_SIZE:
 289		base_hash = crypto_alloc_shash("sha224", 0, 0);
 290		break;
 291	case SHA256_DIGEST_SIZE:
 292		base_hash = crypto_alloc_shash("sha256", 0, 0);
 293		break;
 294	case SHA384_DIGEST_SIZE:
 295		base_hash = crypto_alloc_shash("sha384", 0, 0);
 296		break;
 297	case SHA512_DIGEST_SIZE:
 298		base_hash = crypto_alloc_shash("sha512", 0, 0);
 299		break;
 300	}
 301
 302	return base_hash;
 303}
 304
 305static int chcr_compute_partial_hash(struct shash_desc *desc,
 306				     char *iopad, char *result_hash,
 307				     int digest_size)
 308{
 309	struct sha1_state sha1_st;
 310	struct sha256_state sha256_st;
 311	struct sha512_state sha512_st;
 312	int error;
 313
 314	if (digest_size == SHA1_DIGEST_SIZE) {
 315		error = crypto_shash_init(desc) ?:
 316			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
 317			crypto_shash_export(desc, (void *)&sha1_st);
 318		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
 319	} else if (digest_size == SHA224_DIGEST_SIZE) {
 320		error = crypto_shash_init(desc) ?:
 321			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 322			crypto_shash_export(desc, (void *)&sha256_st);
 323		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 324
 325	} else if (digest_size == SHA256_DIGEST_SIZE) {
 326		error = crypto_shash_init(desc) ?:
 327			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 328			crypto_shash_export(desc, (void *)&sha256_st);
 329		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 330
 331	} else if (digest_size == SHA384_DIGEST_SIZE) {
 332		error = crypto_shash_init(desc) ?:
 333			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 334			crypto_shash_export(desc, (void *)&sha512_st);
 335		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 336
 337	} else if (digest_size == SHA512_DIGEST_SIZE) {
 338		error = crypto_shash_init(desc) ?:
 339			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 340			crypto_shash_export(desc, (void *)&sha512_st);
 341		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 342	} else {
 343		error = -EINVAL;
 344		pr_err("Unknown digest size %d\n", digest_size);
 345	}
 346	return error;
 347}
 348
 349static void chcr_change_order(char *buf, int ds)
 350{
 351	int i;
 352
 353	if (ds == SHA512_DIGEST_SIZE) {
 354		for (i = 0; i < (ds / sizeof(u64)); i++)
 355			*((__be64 *)buf + i) =
 356				cpu_to_be64(*((u64 *)buf + i));
 357	} else {
 358		for (i = 0; i < (ds / sizeof(u32)); i++)
 359			*((__be32 *)buf + i) =
 360				cpu_to_be32(*((u32 *)buf + i));
 361	}
 362}
 363
 364static inline int is_hmac(struct crypto_tfm *tfm)
 365{
 366	struct crypto_alg *alg = tfm->__crt_alg;
 367	struct chcr_alg_template *chcr_crypto_alg =
 368		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
 369			     alg.hash);
 370	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
 371		return 1;
 372	return 0;
 373}
 374
 375static inline void dsgl_walk_init(struct dsgl_walk *walk,
 376				   struct cpl_rx_phys_dsgl *dsgl)
 377{
 378	walk->dsgl = dsgl;
 379	walk->nents = 0;
 380	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
 381}
 382
 383static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
 384				 int pci_chan_id)
 385{
 386	struct cpl_rx_phys_dsgl *phys_cpl;
 387
 388	phys_cpl = walk->dsgl;
 389
 390	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
 391				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
 392	phys_cpl->pcirlxorder_to_noofsgentr =
 393		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
 394		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
 395		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
 396		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
 397		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
 398		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
 399	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
 400	phys_cpl->rss_hdr_int.qid = htons(qid);
 401	phys_cpl->rss_hdr_int.hash_val = 0;
 402	phys_cpl->rss_hdr_int.channel = pci_chan_id;
 403}
 404
 405static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
 406					size_t size,
 407					dma_addr_t addr)
 408{
 409	int j;
 410
 411	if (!size)
 412		return;
 413	j = walk->nents;
 414	walk->to->len[j % 8] = htons(size);
 415	walk->to->addr[j % 8] = cpu_to_be64(addr);
 416	j++;
 417	if ((j % 8) == 0)
 418		walk->to++;
 419	walk->nents = j;
 420}
 421
 422static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
 423			   struct scatterlist *sg,
 424			      unsigned int slen,
 425			      unsigned int skip)
 426{
 427	int skip_len = 0;
 428	unsigned int left_size = slen, len = 0;
 429	unsigned int j = walk->nents;
 430	int offset, ent_len;
 431
 432	if (!slen)
 433		return;
 434	while (sg && skip) {
 435		if (sg_dma_len(sg) <= skip) {
 436			skip -= sg_dma_len(sg);
 437			skip_len = 0;
 438			sg = sg_next(sg);
 439		} else {
 440			skip_len = skip;
 441			skip = 0;
 442		}
 443	}
 444
 445	while (left_size && sg) {
 446		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 447		offset = 0;
 448		while (len) {
 449			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
 450			walk->to->len[j % 8] = htons(ent_len);
 451			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
 452						      offset + skip_len);
 453			offset += ent_len;
 454			len -= ent_len;
 455			j++;
 456			if ((j % 8) == 0)
 457				walk->to++;
 458		}
 459		walk->last_sg = sg;
 460		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
 461					  skip_len) + skip_len;
 462		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 463		skip_len = 0;
 464		sg = sg_next(sg);
 465	}
 466	walk->nents = j;
 467}
 468
 469static inline void ulptx_walk_init(struct ulptx_walk *walk,
 470				   struct ulptx_sgl *ulp)
 471{
 472	walk->sgl = ulp;
 473	walk->nents = 0;
 474	walk->pair_idx = 0;
 475	walk->pair = ulp->sge;
 476	walk->last_sg = NULL;
 477	walk->last_sg_len = 0;
 478}
 479
 480static inline void ulptx_walk_end(struct ulptx_walk *walk)
 481{
 482	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
 483			      ULPTX_NSGE_V(walk->nents));
 484}
 485
 486
 487static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
 488					size_t size,
 489					dma_addr_t addr)
 490{
 491	if (!size)
 492		return;
 493
 494	if (walk->nents == 0) {
 495		walk->sgl->len0 = cpu_to_be32(size);
 496		walk->sgl->addr0 = cpu_to_be64(addr);
 497	} else {
 498		walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
 499		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
 500		walk->pair_idx = !walk->pair_idx;
 501		if (!walk->pair_idx)
 502			walk->pair++;
 503	}
 504	walk->nents++;
 505}
 506
 507static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
 508					struct scatterlist *sg,
 509			       unsigned int len,
 510			       unsigned int skip)
 511{
 512	int small;
 513	int skip_len = 0;
 514	unsigned int sgmin;
 515
 516	if (!len)
 517		return;
 518	while (sg && skip) {
 519		if (sg_dma_len(sg) <= skip) {
 520			skip -= sg_dma_len(sg);
 521			skip_len = 0;
 522			sg = sg_next(sg);
 523		} else {
 524			skip_len = skip;
 525			skip = 0;
 526		}
 527	}
 528	WARN(!sg, "SG should not be null here\n");
 529	if (sg && (walk->nents == 0)) {
 530		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
 531		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 532		walk->sgl->len0 = cpu_to_be32(sgmin);
 533		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
 534		walk->nents++;
 535		len -= sgmin;
 536		walk->last_sg = sg;
 537		walk->last_sg_len = sgmin + skip_len;
 538		skip_len += sgmin;
 539		if (sg_dma_len(sg) == skip_len) {
 540			sg = sg_next(sg);
 541			skip_len = 0;
 542		}
 543	}
 544
 545	while (sg && len) {
 546		small = min(sg_dma_len(sg) - skip_len, len);
 547		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 548		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
 549		walk->pair->addr[walk->pair_idx] =
 550			cpu_to_be64(sg_dma_address(sg) + skip_len);
 551		walk->pair_idx = !walk->pair_idx;
 552		walk->nents++;
 553		if (!walk->pair_idx)
 554			walk->pair++;
 555		len -= sgmin;
 556		skip_len += sgmin;
 557		walk->last_sg = sg;
 558		walk->last_sg_len = skip_len;
 559		if (sg_dma_len(sg) == skip_len) {
 560			sg = sg_next(sg);
 561			skip_len = 0;
 562		}
 563	}
 564}
 565
 566static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
 567{
 568	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 569	struct chcr_alg_template *chcr_crypto_alg =
 570		container_of(alg, struct chcr_alg_template, alg.skcipher);
 571
 572	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 573}
 574
 575static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
 576{
 577	struct adapter *adap = netdev2adap(dev);
 578	struct sge_uld_txq_info *txq_info =
 579		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
 580	struct sge_uld_txq *txq;
 581	int ret = 0;
 582
 583	local_bh_disable();
 584	txq = &txq_info->uldtxq[idx];
 585	spin_lock(&txq->sendq.lock);
 586	if (txq->full)
 587		ret = -1;
 588	spin_unlock(&txq->sendq.lock);
 589	local_bh_enable();
 590	return ret;
 591}
 592
 593static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
 594			       struct _key_ctx *key_ctx)
 595{
 596	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
 597		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
 598	} else {
 599		memcpy(key_ctx->key,
 600		       ablkctx->key + (ablkctx->enckey_len >> 1),
 601		       ablkctx->enckey_len >> 1);
 602		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
 603		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
 604	}
 605	return 0;
 606}
 607
 608static int chcr_hash_ent_in_wr(struct scatterlist *src,
 609			     unsigned int minsg,
 610			     unsigned int space,
 611			     unsigned int srcskip)
 612{
 613	int srclen = 0;
 614	int srcsg = minsg;
 615	int soffset = 0, sless;
 616
 617	if (sg_dma_len(src) == srcskip) {
 618		src = sg_next(src);
 619		srcskip = 0;
 620	}
 621	while (src && space > (sgl_ent_len[srcsg + 1])) {
 622		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
 623							CHCR_SRC_SG_SIZE);
 624		srclen += sless;
 625		soffset += sless;
 626		srcsg++;
 627		if (sg_dma_len(src) == (soffset + srcskip)) {
 628			src = sg_next(src);
 629			soffset = 0;
 630			srcskip = 0;
 631		}
 632	}
 633	return srclen;
 634}
 635
 636static int chcr_sg_ent_in_wr(struct scatterlist *src,
 637			     struct scatterlist *dst,
 638			     unsigned int minsg,
 639			     unsigned int space,
 640			     unsigned int srcskip,
 641			     unsigned int dstskip)
 642{
 643	int srclen = 0, dstlen = 0;
 644	int srcsg = minsg, dstsg = minsg;
 645	int offset = 0, soffset = 0, less, sless = 0;
 646
 647	if (sg_dma_len(src) == srcskip) {
 648		src = sg_next(src);
 649		srcskip = 0;
 650	}
 651	if (sg_dma_len(dst) == dstskip) {
 652		dst = sg_next(dst);
 653		dstskip = 0;
 654	}
 655
 656	while (src && dst &&
 657	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
 658		sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
 659				CHCR_SRC_SG_SIZE);
 660		srclen += sless;
 661		srcsg++;
 662		offset = 0;
 663		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
 664		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
 665			if (srclen <= dstlen)
 666				break;
 667			less = min_t(unsigned int, sg_dma_len(dst) - offset -
 668				     dstskip, CHCR_DST_SG_SIZE);
 669			dstlen += less;
 670			offset += less;
 671			if ((offset + dstskip) == sg_dma_len(dst)) {
 672				dst = sg_next(dst);
 673				offset = 0;
 674			}
 675			dstsg++;
 676			dstskip = 0;
 677		}
 678		soffset += sless;
 679		if ((soffset + srcskip) == sg_dma_len(src)) {
 680			src = sg_next(src);
 681			srcskip = 0;
 682			soffset = 0;
 683		}
 684
 685	}
 686	return min(srclen, dstlen);
 687}
 688
 689static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
 690				struct skcipher_request *req,
 691				u8 *iv,
 692				unsigned short op_type)
 693{
 694	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
 695	int err;
 696
 697	skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
 698	skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
 699				      req->base.complete, req->base.data);
 700	skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
 701				   req->cryptlen, iv);
 702
 703	err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
 704			crypto_skcipher_encrypt(&reqctx->fallback_req);
 705
 706	return err;
 707
 708}
 709
 710static inline int get_qidxs(struct crypto_async_request *req,
 711			    unsigned int *txqidx, unsigned int *rxqidx)
 712{
 713	struct crypto_tfm *tfm = req->tfm;
 714	int ret = 0;
 715
 716	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
 717	case CRYPTO_ALG_TYPE_AEAD:
 718	{
 719		struct aead_request *aead_req =
 720			container_of(req, struct aead_request, base);
 721		struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(aead_req);
 722		*txqidx = reqctx->txqidx;
 723		*rxqidx = reqctx->rxqidx;
 724		break;
 725	}
 726	case CRYPTO_ALG_TYPE_SKCIPHER:
 727	{
 728		struct skcipher_request *sk_req =
 729			container_of(req, struct skcipher_request, base);
 730		struct chcr_skcipher_req_ctx *reqctx =
 731			skcipher_request_ctx(sk_req);
 732		*txqidx = reqctx->txqidx;
 733		*rxqidx = reqctx->rxqidx;
 734		break;
 735	}
 736	case CRYPTO_ALG_TYPE_AHASH:
 737	{
 738		struct ahash_request *ahash_req =
 739			container_of(req, struct ahash_request, base);
 740		struct chcr_ahash_req_ctx *reqctx =
 741			ahash_request_ctx(ahash_req);
 742		*txqidx = reqctx->txqidx;
 743		*rxqidx = reqctx->rxqidx;
 744		break;
 745	}
 746	default:
 747		ret = -EINVAL;
 748		/* should never get here */
 749		BUG();
 750		break;
 751	}
 752	return ret;
 753}
 754
 755static inline void create_wreq(struct chcr_context *ctx,
 756			       struct chcr_wr *chcr_req,
 757			       struct crypto_async_request *req,
 758			       unsigned int imm,
 759			       int hash_sz,
 760			       unsigned int len16,
 761			       unsigned int sc_len,
 762			       unsigned int lcb)
 763{
 764	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 765	unsigned int tx_channel_id, rx_channel_id;
 766	unsigned int txqidx = 0, rxqidx = 0;
 767	unsigned int qid, fid, portno;
 768
 769	get_qidxs(req, &txqidx, &rxqidx);
 770	qid = u_ctx->lldi.rxq_ids[rxqidx];
 771	fid = u_ctx->lldi.rxq_ids[0];
 772	portno = rxqidx / ctx->rxq_perchan;
 773	tx_channel_id = txqidx / ctx->txq_perchan;
 774	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
 775
 776
 777	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
 778	chcr_req->wreq.pld_size_hash_size =
 779		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
 780	chcr_req->wreq.len16_pkd =
 781		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
 782	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
 783	chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
 784							    !!lcb, txqidx);
 785
 786	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
 787	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
 788				((sizeof(chcr_req->wreq)) >> 4)));
 789	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
 790	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
 791					   sizeof(chcr_req->key_ctx) + sc_len);
 792}
 793
 794/**
 795 *	create_cipher_wr - form the WR for cipher operations
 796 *	@wrparam: Container for create_cipher_wr()'s parameters
 
 
 
 797 */
 798static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
 799{
 800	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
 801	struct chcr_context *ctx = c_ctx(tfm);
 802	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 803	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
 804	struct sk_buff *skb = NULL;
 805	struct chcr_wr *chcr_req;
 806	struct cpl_rx_phys_dsgl *phys_cpl;
 807	struct ulptx_sgl *ulptx;
 808	struct chcr_skcipher_req_ctx *reqctx =
 809		skcipher_request_ctx(wrparam->req);
 810	unsigned int temp = 0, transhdr_len, dst_size;
 811	int error;
 812	int nents;
 813	unsigned int kctx_len;
 814	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 815			GFP_KERNEL : GFP_ATOMIC;
 816	struct adapter *adap = padap(ctx->dev);
 817	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
 818
 819	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
 820	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
 821			      reqctx->dst_ofst);
 822	dst_size = get_space_for_phys_dsgl(nents);
 823	kctx_len = roundup(ablkctx->enckey_len, 16);
 824	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
 825	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
 826				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
 827	temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
 828				     (sgl_len(nents) * 8);
 829	transhdr_len += temp;
 830	transhdr_len = roundup(transhdr_len, 16);
 831	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
 832	if (!skb) {
 833		error = -ENOMEM;
 834		goto err;
 835	}
 836	chcr_req = __skb_put_zero(skb, transhdr_len);
 837	chcr_req->sec_cpl.op_ivinsrtofst =
 838			FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
 839
 840	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
 841	chcr_req->sec_cpl.aadstart_cipherstop_hi =
 842			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
 843
 844	chcr_req->sec_cpl.cipherstop_lo_authinsert =
 845			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
 846	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
 847							 ablkctx->ciph_mode,
 848							 0, 0, IV >> 1);
 849	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
 850							  0, 1, dst_size);
 851
 852	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
 853	if ((reqctx->op == CHCR_DECRYPT_OP) &&
 854	    (!(get_cryptoalg_subtype(tfm) ==
 855	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
 856	    (!(get_cryptoalg_subtype(tfm) ==
 857	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
 858		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
 859	} else {
 860		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
 861		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
 862			memcpy(chcr_req->key_ctx.key, ablkctx->key,
 863			       ablkctx->enckey_len);
 864		} else {
 865			memcpy(chcr_req->key_ctx.key, ablkctx->key +
 866			       (ablkctx->enckey_len >> 1),
 867			       ablkctx->enckey_len >> 1);
 868			memcpy(chcr_req->key_ctx.key +
 869			       (ablkctx->enckey_len >> 1),
 870			       ablkctx->key,
 871			       ablkctx->enckey_len >> 1);
 872		}
 873	}
 874	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
 875	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
 876	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
 877	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
 878
 879	atomic_inc(&adap->chcr_stats.cipher_rqst);
 880	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
 881		+ (reqctx->imm ? (wrparam->bytes) : 0);
 882	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
 883		    transhdr_len, temp,
 884			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
 885	reqctx->skb = skb;
 886
 887	if (reqctx->op && (ablkctx->ciph_mode ==
 888			   CHCR_SCMD_CIPHER_MODE_AES_CBC))
 889		sg_pcopy_to_buffer(wrparam->req->src,
 890			sg_nents(wrparam->req->src), wrparam->req->iv, 16,
 891			reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
 892
 893	return skb;
 894err:
 895	return ERR_PTR(error);
 896}
 897
 898static inline int chcr_keyctx_ck_size(unsigned int keylen)
 899{
 900	int ck_size = 0;
 901
 902	if (keylen == AES_KEYSIZE_128)
 903		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
 904	else if (keylen == AES_KEYSIZE_192)
 905		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
 906	else if (keylen == AES_KEYSIZE_256)
 907		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
 908	else
 909		ck_size = 0;
 910
 911	return ck_size;
 912}
 913static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
 914				       const u8 *key,
 915				       unsigned int keylen)
 916{
 917	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 918
 919	crypto_skcipher_clear_flags(ablkctx->sw_cipher,
 920				CRYPTO_TFM_REQ_MASK);
 921	crypto_skcipher_set_flags(ablkctx->sw_cipher,
 922				cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 923	return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
 924}
 925
 926static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
 927			       const u8 *key,
 928			       unsigned int keylen)
 929{
 930	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 931	unsigned int ck_size, context_size;
 932	u16 alignment = 0;
 933	int err;
 934
 935	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 936	if (err)
 937		goto badkey_err;
 938
 939	ck_size = chcr_keyctx_ck_size(keylen);
 940	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
 941	memcpy(ablkctx->key, key, keylen);
 942	ablkctx->enckey_len = keylen;
 943	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
 944	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 945			keylen + alignment) >> 4;
 946
 947	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 948						0, 0, context_size);
 949	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
 950	return 0;
 951badkey_err:
 952	ablkctx->enckey_len = 0;
 953
 954	return err;
 955}
 956
 957static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
 958				   const u8 *key,
 959				   unsigned int keylen)
 960{
 961	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 962	unsigned int ck_size, context_size;
 963	u16 alignment = 0;
 964	int err;
 965
 966	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 967	if (err)
 968		goto badkey_err;
 969	ck_size = chcr_keyctx_ck_size(keylen);
 970	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
 971	memcpy(ablkctx->key, key, keylen);
 972	ablkctx->enckey_len = keylen;
 973	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 974			keylen + alignment) >> 4;
 975
 976	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 977						0, 0, context_size);
 978	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
 979
 980	return 0;
 981badkey_err:
 982	ablkctx->enckey_len = 0;
 983
 984	return err;
 985}
 986
 987static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
 988				   const u8 *key,
 989				   unsigned int keylen)
 990{
 991	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 992	unsigned int ck_size, context_size;
 993	u16 alignment = 0;
 994	int err;
 995
 996	if (keylen < CTR_RFC3686_NONCE_SIZE)
 997		return -EINVAL;
 998	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
 999	       CTR_RFC3686_NONCE_SIZE);
1000
1001	keylen -= CTR_RFC3686_NONCE_SIZE;
1002	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1003	if (err)
1004		goto badkey_err;
1005
1006	ck_size = chcr_keyctx_ck_size(keylen);
1007	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1008	memcpy(ablkctx->key, key, keylen);
1009	ablkctx->enckey_len = keylen;
1010	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1011			keylen + alignment) >> 4;
1012
1013	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1014						0, 0, context_size);
1015	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1016
1017	return 0;
1018badkey_err:
1019	ablkctx->enckey_len = 0;
1020
1021	return err;
1022}
1023static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1024{
1025	unsigned int size = AES_BLOCK_SIZE;
1026	__be32 *b = (__be32 *)(dstiv + size);
1027	u32 c, prev;
1028
1029	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1030	for (; size >= 4; size -= 4) {
1031		prev = be32_to_cpu(*--b);
1032		c = prev + add;
1033		*b = cpu_to_be32(c);
1034		if (prev < c)
1035			break;
1036		add = 1;
1037	}
1038
1039}
1040
1041static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1042{
1043	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1044	u64 c;
1045	u32 temp = be32_to_cpu(*--b);
1046
1047	temp = ~temp;
1048	c = (u64)temp +  1; // No of block can processed without overflow
1049	if ((bytes / AES_BLOCK_SIZE) >= c)
1050		bytes = c * AES_BLOCK_SIZE;
1051	return bytes;
1052}
1053
1054static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1055			     u32 isfinal)
1056{
1057	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1058	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1059	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1060	struct crypto_aes_ctx aes;
1061	int ret, i;
1062	u8 *key;
1063	unsigned int keylen;
1064	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1065	int round8 = round / 8;
1066
1067	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1068
1069	keylen = ablkctx->enckey_len / 2;
1070	key = ablkctx->key + keylen;
1071	/* For a 192 bit key remove the padded zeroes which was
1072	 * added in chcr_xts_setkey
1073	 */
1074	if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1075			== CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1076		ret = aes_expandkey(&aes, key, keylen - 8);
1077	else
1078		ret = aes_expandkey(&aes, key, keylen);
1079	if (ret)
1080		return ret;
1081	aes_encrypt(&aes, iv, iv);
1082	for (i = 0; i < round8; i++)
1083		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1084
1085	for (i = 0; i < (round % 8); i++)
1086		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1087
1088	if (!isfinal)
1089		aes_decrypt(&aes, iv, iv);
1090
1091	memzero_explicit(&aes, sizeof(aes));
1092	return 0;
1093}
1094
1095static int chcr_update_cipher_iv(struct skcipher_request *req,
1096				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1097{
1098	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1099	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1100	int subtype = get_cryptoalg_subtype(tfm);
1101	int ret = 0;
1102
1103	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1104		ctr_add_iv(iv, req->iv, (reqctx->processed /
1105			   AES_BLOCK_SIZE));
1106	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1107		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1108			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1109						AES_BLOCK_SIZE) + 1);
1110	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1111		ret = chcr_update_tweak(req, iv, 0);
1112	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1113		if (reqctx->op)
1114			/*Updated before sending last WR*/
1115			memcpy(iv, req->iv, AES_BLOCK_SIZE);
1116		else
1117			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1118	}
1119
1120	return ret;
1121
1122}
1123
1124/* We need separate function for final iv because in rfc3686  Initial counter
1125 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1126 * for subsequent update requests
1127 */
1128
1129static int chcr_final_cipher_iv(struct skcipher_request *req,
1130				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1131{
1132	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1133	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1134	int subtype = get_cryptoalg_subtype(tfm);
1135	int ret = 0;
1136
1137	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1138		ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1139						       AES_BLOCK_SIZE));
1140	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1141		if (!reqctx->partial_req)
1142			memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1143		else
1144			ret = chcr_update_tweak(req, iv, 1);
1145	}
1146	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1147		/*Already updated for Decrypt*/
1148		if (!reqctx->op)
1149			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1150
1151	}
1152	return ret;
1153
1154}
1155
1156static int chcr_handle_cipher_resp(struct skcipher_request *req,
1157				   unsigned char *input, int err)
1158{
1159	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1160	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1161	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1162	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1163	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1164	struct chcr_dev *dev = c_ctx(tfm)->dev;
1165	struct chcr_context *ctx = c_ctx(tfm);
1166	struct adapter *adap = padap(ctx->dev);
1167	struct cipher_wr_param wrparam;
1168	struct sk_buff *skb;
1169	int bytes;
1170
1171	if (err)
1172		goto unmap;
1173	if (req->cryptlen == reqctx->processed) {
1174		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1175				      req);
1176		err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1177		goto complete;
1178	}
1179
1180	if (!reqctx->imm) {
1181		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1182					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1183					  reqctx->src_ofst, reqctx->dst_ofst);
1184		if ((bytes + reqctx->processed) >= req->cryptlen)
1185			bytes  = req->cryptlen - reqctx->processed;
1186		else
1187			bytes = rounddown(bytes, 16);
1188	} else {
1189		/*CTR mode counter overfloa*/
1190		bytes  = req->cryptlen - reqctx->processed;
1191	}
1192	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1193	if (err)
1194		goto unmap;
1195
1196	if (unlikely(bytes == 0)) {
1197		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1198				      req);
1199		memcpy(req->iv, reqctx->init_iv, IV);
1200		atomic_inc(&adap->chcr_stats.fallback);
1201		err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1202					   reqctx->op);
1203		goto complete;
1204	}
1205
1206	if (get_cryptoalg_subtype(tfm) ==
1207	    CRYPTO_ALG_SUB_TYPE_CTR)
1208		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1209	wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1210	wrparam.req = req;
1211	wrparam.bytes = bytes;
1212	skb = create_cipher_wr(&wrparam);
1213	if (IS_ERR(skb)) {
1214		pr_err("%s : Failed to form WR. No memory\n", __func__);
1215		err = PTR_ERR(skb);
1216		goto unmap;
1217	}
1218	skb->dev = u_ctx->lldi.ports[0];
1219	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1220	chcr_send_wr(skb);
1221	reqctx->last_req_len = bytes;
1222	reqctx->processed += bytes;
1223	if (get_cryptoalg_subtype(tfm) ==
1224		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1225			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1226		complete(&ctx->cbc_aes_aio_done);
1227	}
1228	return 0;
1229unmap:
1230	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1231complete:
1232	if (get_cryptoalg_subtype(tfm) ==
1233		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1234			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1235		complete(&ctx->cbc_aes_aio_done);
1236	}
1237	chcr_dec_wrcount(dev);
1238	req->base.complete(&req->base, err);
1239	return err;
1240}
1241
1242static int process_cipher(struct skcipher_request *req,
1243				  unsigned short qid,
1244				  struct sk_buff **skb,
1245				  unsigned short op_type)
1246{
1247	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1248	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1249	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1250	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1251	struct adapter *adap = padap(c_ctx(tfm)->dev);
1252	struct	cipher_wr_param wrparam;
1253	int bytes, err = -EINVAL;
1254	int subtype;
1255
1256	reqctx->processed = 0;
1257	reqctx->partial_req = 0;
1258	if (!req->iv)
1259		goto error;
1260	subtype = get_cryptoalg_subtype(tfm);
1261	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1262	    (req->cryptlen == 0) ||
1263	    (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1264		if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1265			goto fallback;
1266		else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1267			 subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1268			goto fallback;
1269		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1270		       ablkctx->enckey_len, req->cryptlen, ivsize);
1271		goto error;
1272	}
1273
1274	err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1275	if (err)
1276		goto error;
1277	if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1278					    AES_MIN_KEY_SIZE +
1279					    sizeof(struct cpl_rx_phys_dsgl) +
1280					/*Min dsgl size*/
1281					    32))) {
1282		/* Can be sent as Imm*/
1283		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1284
1285		dnents = sg_nents_xlen(req->dst, req->cryptlen,
1286				       CHCR_DST_SG_SIZE, 0);
1287		phys_dsgl = get_space_for_phys_dsgl(dnents);
1288		kctx_len = roundup(ablkctx->enckey_len, 16);
1289		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1290		reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1291			SGE_MAX_WR_LEN;
1292		bytes = IV + req->cryptlen;
1293
1294	} else {
1295		reqctx->imm = 0;
1296	}
1297
1298	if (!reqctx->imm) {
1299		bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1300					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1301					  0, 0);
1302		if ((bytes + reqctx->processed) >= req->cryptlen)
1303			bytes  = req->cryptlen - reqctx->processed;
1304		else
1305			bytes = rounddown(bytes, 16);
1306	} else {
1307		bytes = req->cryptlen;
1308	}
1309	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1310		bytes = adjust_ctr_overflow(req->iv, bytes);
1311	}
1312	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1313		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1314		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1315				CTR_RFC3686_IV_SIZE);
1316
1317		/* initialize counter portion of counter block */
1318		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1319			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1320		memcpy(reqctx->init_iv, reqctx->iv, IV);
1321
1322	} else {
1323
1324		memcpy(reqctx->iv, req->iv, IV);
1325		memcpy(reqctx->init_iv, req->iv, IV);
1326	}
1327	if (unlikely(bytes == 0)) {
1328		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1329				      req);
1330fallback:       atomic_inc(&adap->chcr_stats.fallback);
1331		err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
1332					   subtype ==
1333					   CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1334					   reqctx->iv : req->iv,
1335					   op_type);
1336		goto error;
1337	}
1338	reqctx->op = op_type;
1339	reqctx->srcsg = req->src;
1340	reqctx->dstsg = req->dst;
1341	reqctx->src_ofst = 0;
1342	reqctx->dst_ofst = 0;
1343	wrparam.qid = qid;
1344	wrparam.req = req;
1345	wrparam.bytes = bytes;
1346	*skb = create_cipher_wr(&wrparam);
1347	if (IS_ERR(*skb)) {
1348		err = PTR_ERR(*skb);
1349		goto unmap;
1350	}
1351	reqctx->processed = bytes;
1352	reqctx->last_req_len = bytes;
1353	reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1354
1355	return 0;
1356unmap:
1357	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1358error:
1359	return err;
1360}
1361
1362static int chcr_aes_encrypt(struct skcipher_request *req)
1363{
1364	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1365	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1366	struct chcr_dev *dev = c_ctx(tfm)->dev;
1367	struct sk_buff *skb = NULL;
1368	int err;
1369	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1370	struct chcr_context *ctx = c_ctx(tfm);
1371	unsigned int cpu;
1372
1373	cpu = get_cpu();
1374	reqctx->txqidx = cpu % ctx->ntxq;
1375	reqctx->rxqidx = cpu % ctx->nrxq;
1376	put_cpu();
1377
1378	err = chcr_inc_wrcount(dev);
1379	if (err)
1380		return -ENXIO;
1381	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1382						reqctx->txqidx) &&
1383		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1384			err = -ENOSPC;
1385			goto error;
1386	}
1387
1388	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1389			     &skb, CHCR_ENCRYPT_OP);
1390	if (err || !skb)
1391		return  err;
1392	skb->dev = u_ctx->lldi.ports[0];
1393	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1394	chcr_send_wr(skb);
1395	if (get_cryptoalg_subtype(tfm) ==
1396		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1397			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1398			reqctx->partial_req = 1;
1399			wait_for_completion(&ctx->cbc_aes_aio_done);
1400        }
1401	return -EINPROGRESS;
1402error:
1403	chcr_dec_wrcount(dev);
1404	return err;
1405}
1406
1407static int chcr_aes_decrypt(struct skcipher_request *req)
1408{
1409	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1410	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1411	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1412	struct chcr_dev *dev = c_ctx(tfm)->dev;
1413	struct sk_buff *skb = NULL;
1414	int err;
1415	struct chcr_context *ctx = c_ctx(tfm);
1416	unsigned int cpu;
1417
1418	cpu = get_cpu();
1419	reqctx->txqidx = cpu % ctx->ntxq;
1420	reqctx->rxqidx = cpu % ctx->nrxq;
1421	put_cpu();
1422
1423	err = chcr_inc_wrcount(dev);
1424	if (err)
1425		return -ENXIO;
1426
1427	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1428						reqctx->txqidx) &&
1429		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1430			return -ENOSPC;
1431	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1432			     &skb, CHCR_DECRYPT_OP);
1433	if (err || !skb)
1434		return err;
1435	skb->dev = u_ctx->lldi.ports[0];
1436	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1437	chcr_send_wr(skb);
1438	return -EINPROGRESS;
1439}
1440static int chcr_device_init(struct chcr_context *ctx)
1441{
1442	struct uld_ctx *u_ctx = NULL;
1443	int txq_perchan, ntxq;
1444	int err = 0, rxq_perchan;
1445
1446	if (!ctx->dev) {
1447		u_ctx = assign_chcr_device();
1448		if (!u_ctx) {
1449			err = -ENXIO;
1450			pr_err("chcr device assignment fails\n");
1451			goto out;
1452		}
1453		ctx->dev = &u_ctx->dev;
1454		ntxq = u_ctx->lldi.ntxq;
1455		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1456		txq_perchan = ntxq / u_ctx->lldi.nchan;
1457		ctx->ntxq = ntxq;
1458		ctx->nrxq = u_ctx->lldi.nrxq;
1459		ctx->rxq_perchan = rxq_perchan;
1460		ctx->txq_perchan = txq_perchan;
1461	}
1462out:
1463	return err;
1464}
1465
1466static int chcr_init_tfm(struct crypto_skcipher *tfm)
1467{
1468	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1469	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1470	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1471
1472	ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1473				CRYPTO_ALG_NEED_FALLBACK);
1474	if (IS_ERR(ablkctx->sw_cipher)) {
1475		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1476		return PTR_ERR(ablkctx->sw_cipher);
1477	}
1478	init_completion(&ctx->cbc_aes_aio_done);
1479	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1480					 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1481
1482	return chcr_device_init(ctx);
1483}
1484
1485static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1486{
1487	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1488	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1489	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1490
1491	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1492	 * cannot be used as fallback in chcr_handle_cipher_response
1493	 */
1494	ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1495				CRYPTO_ALG_NEED_FALLBACK);
1496	if (IS_ERR(ablkctx->sw_cipher)) {
1497		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1498		return PTR_ERR(ablkctx->sw_cipher);
1499	}
1500	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1501				    crypto_skcipher_reqsize(ablkctx->sw_cipher));
1502	return chcr_device_init(ctx);
1503}
1504
1505
1506static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1507{
1508	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1509	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1510
1511	crypto_free_skcipher(ablkctx->sw_cipher);
1512}
1513
1514static int get_alg_config(struct algo_param *params,
1515			  unsigned int auth_size)
1516{
1517	switch (auth_size) {
1518	case SHA1_DIGEST_SIZE:
1519		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1520		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1521		params->result_size = SHA1_DIGEST_SIZE;
1522		break;
1523	case SHA224_DIGEST_SIZE:
1524		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1525		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1526		params->result_size = SHA256_DIGEST_SIZE;
1527		break;
1528	case SHA256_DIGEST_SIZE:
1529		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1530		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1531		params->result_size = SHA256_DIGEST_SIZE;
1532		break;
1533	case SHA384_DIGEST_SIZE:
1534		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1535		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1536		params->result_size = SHA512_DIGEST_SIZE;
1537		break;
1538	case SHA512_DIGEST_SIZE:
1539		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1540		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1541		params->result_size = SHA512_DIGEST_SIZE;
1542		break;
1543	default:
1544		pr_err("ERROR, unsupported digest size\n");
1545		return -EINVAL;
1546	}
1547	return 0;
1548}
1549
1550static inline void chcr_free_shash(struct crypto_shash *base_hash)
1551{
1552		crypto_free_shash(base_hash);
1553}
1554
1555/**
1556 *	create_hash_wr - Create hash work request
1557 *	@req: Cipher req base
1558 *	@param: Container for create_hash_wr()'s parameters
1559 */
1560static struct sk_buff *create_hash_wr(struct ahash_request *req,
1561				      struct hash_wr_param *param)
1562{
1563	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1564	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1565	struct chcr_context *ctx = h_ctx(tfm);
1566	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1567	struct sk_buff *skb = NULL;
1568	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1569	struct chcr_wr *chcr_req;
1570	struct ulptx_sgl *ulptx;
1571	unsigned int nents = 0, transhdr_len;
1572	unsigned int temp = 0;
1573	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1574		GFP_ATOMIC;
1575	struct adapter *adap = padap(h_ctx(tfm)->dev);
1576	int error = 0;
1577	unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1578
1579	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
1580	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1581	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1582				param->sg_len) <= SGE_MAX_WR_LEN;
1583	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1584		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1585	nents += param->bfr_len ? 1 : 0;
1586	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1587				param->sg_len, 16) : (sgl_len(nents) * 8);
1588	transhdr_len = roundup(transhdr_len, 16);
1589
1590	skb = alloc_skb(transhdr_len, flags);
1591	if (!skb)
1592		return ERR_PTR(-ENOMEM);
1593	chcr_req = __skb_put_zero(skb, transhdr_len);
1594
1595	chcr_req->sec_cpl.op_ivinsrtofst =
1596		FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1597
1598	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1599
1600	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1601		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1602	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1603		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1604	chcr_req->sec_cpl.seqno_numivs =
1605		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1606					 param->opad_needed, 0);
1607
1608	chcr_req->sec_cpl.ivgen_hdrlen =
1609		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1610
1611	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1612	       param->alg_prm.result_size);
1613
1614	if (param->opad_needed)
1615		memcpy(chcr_req->key_ctx.key +
1616		       ((param->alg_prm.result_size <= 32) ? 32 :
1617			CHCR_HASH_MAX_DIGEST_SIZE),
1618		       hmacctx->opad, param->alg_prm.result_size);
1619
1620	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1621					    param->alg_prm.mk_size, 0,
1622					    param->opad_needed,
1623					    ((param->kctx_len +
1624					     sizeof(chcr_req->key_ctx)) >> 4));
1625	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1626	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1627				     DUMMY_BYTES);
1628	if (param->bfr_len != 0) {
1629		req_ctx->hctx_wr.dma_addr =
1630			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1631				       param->bfr_len, DMA_TO_DEVICE);
1632		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1633				       req_ctx->hctx_wr. dma_addr)) {
1634			error = -ENOMEM;
1635			goto err;
1636		}
1637		req_ctx->hctx_wr.dma_len = param->bfr_len;
1638	} else {
1639		req_ctx->hctx_wr.dma_addr = 0;
1640	}
1641	chcr_add_hash_src_ent(req, ulptx, param);
1642	/* Request upto max wr size */
1643	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1644				(param->sg_len + param->bfr_len) : 0);
1645	atomic_inc(&adap->chcr_stats.digest_rqst);
1646	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1647		    param->hash_size, transhdr_len,
1648		    temp,  0);
1649	req_ctx->hctx_wr.skb = skb;
1650	return skb;
1651err:
1652	kfree_skb(skb);
1653	return  ERR_PTR(error);
1654}
1655
1656static int chcr_ahash_update(struct ahash_request *req)
1657{
1658	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1659	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1660	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1661	struct chcr_context *ctx = h_ctx(rtfm);
1662	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1663	struct sk_buff *skb;
1664	u8 remainder = 0, bs;
1665	unsigned int nbytes = req->nbytes;
1666	struct hash_wr_param params;
1667	int error;
1668	unsigned int cpu;
1669
1670	cpu = get_cpu();
1671	req_ctx->txqidx = cpu % ctx->ntxq;
1672	req_ctx->rxqidx = cpu % ctx->nrxq;
1673	put_cpu();
1674
1675	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1676
1677	if (nbytes + req_ctx->reqlen >= bs) {
1678		remainder = (nbytes + req_ctx->reqlen) % bs;
1679		nbytes = nbytes + req_ctx->reqlen - remainder;
1680	} else {
1681		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1682				   + req_ctx->reqlen, nbytes, 0);
1683		req_ctx->reqlen += nbytes;
1684		return 0;
1685	}
1686	error = chcr_inc_wrcount(dev);
1687	if (error)
1688		return -ENXIO;
1689	/* Detach state for CHCR means lldi or padap is freed. Increasing
1690	 * inflight count for dev guarantees that lldi and padap is valid
1691	 */
1692	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1693						req_ctx->txqidx) &&
1694		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1695			error = -ENOSPC;
1696			goto err;
1697	}
1698
1699	chcr_init_hctx_per_wr(req_ctx);
1700	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1701	if (error) {
1702		error = -ENOMEM;
1703		goto err;
1704	}
1705	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1706	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1707	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1708				     HASH_SPACE_LEFT(params.kctx_len), 0);
1709	if (params.sg_len > req->nbytes)
1710		params.sg_len = req->nbytes;
1711	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1712			req_ctx->reqlen;
1713	params.opad_needed = 0;
1714	params.more = 1;
1715	params.last = 0;
1716	params.bfr_len = req_ctx->reqlen;
1717	params.scmd1 = 0;
1718	req_ctx->hctx_wr.srcsg = req->src;
1719
1720	params.hash_size = params.alg_prm.result_size;
1721	req_ctx->data_len += params.sg_len + params.bfr_len;
1722	skb = create_hash_wr(req, &params);
1723	if (IS_ERR(skb)) {
1724		error = PTR_ERR(skb);
1725		goto unmap;
1726	}
1727
1728	req_ctx->hctx_wr.processed += params.sg_len;
1729	if (remainder) {
1730		/* Swap buffers */
1731		swap(req_ctx->reqbfr, req_ctx->skbfr);
1732		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1733				   req_ctx->reqbfr, remainder, req->nbytes -
1734				   remainder);
1735	}
1736	req_ctx->reqlen = remainder;
1737	skb->dev = u_ctx->lldi.ports[0];
1738	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1739	chcr_send_wr(skb);
1740	return -EINPROGRESS;
1741unmap:
1742	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1743err:
1744	chcr_dec_wrcount(dev);
1745	return error;
1746}
1747
1748static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1749{
1750	memset(bfr_ptr, 0, bs);
1751	*bfr_ptr = 0x80;
1752	if (bs == 64)
1753		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1754	else
1755		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1756}
1757
1758static int chcr_ahash_final(struct ahash_request *req)
1759{
1760	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1761	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1762	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1763	struct hash_wr_param params;
1764	struct sk_buff *skb;
1765	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1766	struct chcr_context *ctx = h_ctx(rtfm);
1767	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1768	int error;
1769	unsigned int cpu;
1770
1771	cpu = get_cpu();
1772	req_ctx->txqidx = cpu % ctx->ntxq;
1773	req_ctx->rxqidx = cpu % ctx->nrxq;
1774	put_cpu();
1775
1776	error = chcr_inc_wrcount(dev);
1777	if (error)
1778		return -ENXIO;
1779
1780	chcr_init_hctx_per_wr(req_ctx);
1781	if (is_hmac(crypto_ahash_tfm(rtfm)))
1782		params.opad_needed = 1;
1783	else
1784		params.opad_needed = 0;
1785	params.sg_len = 0;
1786	req_ctx->hctx_wr.isfinal = 1;
1787	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1788	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1789	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1790		params.opad_needed = 1;
1791		params.kctx_len *= 2;
1792	} else {
1793		params.opad_needed = 0;
1794	}
1795
1796	req_ctx->hctx_wr.result = 1;
1797	params.bfr_len = req_ctx->reqlen;
1798	req_ctx->data_len += params.bfr_len + params.sg_len;
1799	req_ctx->hctx_wr.srcsg = req->src;
1800	if (req_ctx->reqlen == 0) {
1801		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1802		params.last = 0;
1803		params.more = 1;
1804		params.scmd1 = 0;
1805		params.bfr_len = bs;
1806
1807	} else {
1808		params.scmd1 = req_ctx->data_len;
1809		params.last = 1;
1810		params.more = 0;
1811	}
1812	params.hash_size = crypto_ahash_digestsize(rtfm);
1813	skb = create_hash_wr(req, &params);
1814	if (IS_ERR(skb)) {
1815		error = PTR_ERR(skb);
1816		goto err;
1817	}
1818	req_ctx->reqlen = 0;
1819	skb->dev = u_ctx->lldi.ports[0];
1820	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1821	chcr_send_wr(skb);
1822	return -EINPROGRESS;
1823err:
1824	chcr_dec_wrcount(dev);
1825	return error;
1826}
1827
1828static int chcr_ahash_finup(struct ahash_request *req)
1829{
1830	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1831	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1832	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1833	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1834	struct chcr_context *ctx = h_ctx(rtfm);
1835	struct sk_buff *skb;
1836	struct hash_wr_param params;
1837	u8  bs;
1838	int error;
1839	unsigned int cpu;
1840
1841	cpu = get_cpu();
1842	req_ctx->txqidx = cpu % ctx->ntxq;
1843	req_ctx->rxqidx = cpu % ctx->nrxq;
1844	put_cpu();
1845
1846	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1847	error = chcr_inc_wrcount(dev);
1848	if (error)
1849		return -ENXIO;
1850
1851	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1852						req_ctx->txqidx) &&
1853		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1854			error = -ENOSPC;
1855			goto err;
1856	}
1857	chcr_init_hctx_per_wr(req_ctx);
1858	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1859	if (error) {
1860		error = -ENOMEM;
1861		goto err;
1862	}
1863
1864	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1865	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1866	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1867		params.kctx_len *= 2;
1868		params.opad_needed = 1;
1869	} else {
1870		params.opad_needed = 0;
1871	}
1872
1873	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1874				    HASH_SPACE_LEFT(params.kctx_len), 0);
1875	if (params.sg_len < req->nbytes) {
1876		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1877			params.kctx_len /= 2;
1878			params.opad_needed = 0;
1879		}
1880		params.last = 0;
1881		params.more = 1;
1882		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1883					- req_ctx->reqlen;
1884		params.hash_size = params.alg_prm.result_size;
1885		params.scmd1 = 0;
1886	} else {
1887		params.last = 1;
1888		params.more = 0;
1889		params.sg_len = req->nbytes;
1890		params.hash_size = crypto_ahash_digestsize(rtfm);
1891		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1892				params.sg_len;
1893	}
1894	params.bfr_len = req_ctx->reqlen;
1895	req_ctx->data_len += params.bfr_len + params.sg_len;
1896	req_ctx->hctx_wr.result = 1;
1897	req_ctx->hctx_wr.srcsg = req->src;
1898	if ((req_ctx->reqlen + req->nbytes) == 0) {
1899		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1900		params.last = 0;
1901		params.more = 1;
1902		params.scmd1 = 0;
1903		params.bfr_len = bs;
1904	}
1905	skb = create_hash_wr(req, &params);
1906	if (IS_ERR(skb)) {
1907		error = PTR_ERR(skb);
1908		goto unmap;
1909	}
1910	req_ctx->reqlen = 0;
1911	req_ctx->hctx_wr.processed += params.sg_len;
1912	skb->dev = u_ctx->lldi.ports[0];
1913	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1914	chcr_send_wr(skb);
1915	return -EINPROGRESS;
1916unmap:
1917	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1918err:
1919	chcr_dec_wrcount(dev);
1920	return error;
1921}
1922
1923static int chcr_ahash_digest(struct ahash_request *req)
1924{
1925	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1926	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1927	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1928	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1929	struct chcr_context *ctx = h_ctx(rtfm);
1930	struct sk_buff *skb;
1931	struct hash_wr_param params;
1932	u8  bs;
1933	int error;
1934	unsigned int cpu;
1935
1936	cpu = get_cpu();
1937	req_ctx->txqidx = cpu % ctx->ntxq;
1938	req_ctx->rxqidx = cpu % ctx->nrxq;
1939	put_cpu();
1940
1941	rtfm->init(req);
1942	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1943	error = chcr_inc_wrcount(dev);
1944	if (error)
1945		return -ENXIO;
1946
1947	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1948						req_ctx->txqidx) &&
1949		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1950			error = -ENOSPC;
1951			goto err;
1952	}
1953
1954	chcr_init_hctx_per_wr(req_ctx);
1955	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1956	if (error) {
1957		error = -ENOMEM;
1958		goto err;
1959	}
1960
1961	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1962	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1963	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1964		params.kctx_len *= 2;
1965		params.opad_needed = 1;
1966	} else {
1967		params.opad_needed = 0;
1968	}
1969	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1970				HASH_SPACE_LEFT(params.kctx_len), 0);
1971	if (params.sg_len < req->nbytes) {
1972		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1973			params.kctx_len /= 2;
1974			params.opad_needed = 0;
1975		}
1976		params.last = 0;
1977		params.more = 1;
1978		params.scmd1 = 0;
1979		params.sg_len = rounddown(params.sg_len, bs);
1980		params.hash_size = params.alg_prm.result_size;
1981	} else {
1982		params.sg_len = req->nbytes;
1983		params.hash_size = crypto_ahash_digestsize(rtfm);
1984		params.last = 1;
1985		params.more = 0;
1986		params.scmd1 = req->nbytes + req_ctx->data_len;
1987
1988	}
1989	params.bfr_len = 0;
1990	req_ctx->hctx_wr.result = 1;
1991	req_ctx->hctx_wr.srcsg = req->src;
1992	req_ctx->data_len += params.bfr_len + params.sg_len;
1993
1994	if (req->nbytes == 0) {
1995		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1996		params.more = 1;
1997		params.bfr_len = bs;
1998	}
1999
2000	skb = create_hash_wr(req, &params);
2001	if (IS_ERR(skb)) {
2002		error = PTR_ERR(skb);
2003		goto unmap;
2004	}
2005	req_ctx->hctx_wr.processed += params.sg_len;
2006	skb->dev = u_ctx->lldi.ports[0];
2007	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
2008	chcr_send_wr(skb);
2009	return -EINPROGRESS;
2010unmap:
2011	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2012err:
2013	chcr_dec_wrcount(dev);
2014	return error;
2015}
2016
2017static int chcr_ahash_continue(struct ahash_request *req)
2018{
2019	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2020	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2021	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2022	struct chcr_context *ctx = h_ctx(rtfm);
2023	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2024	struct sk_buff *skb;
2025	struct hash_wr_param params;
2026	u8  bs;
2027	int error;
2028	unsigned int cpu;
2029
2030	cpu = get_cpu();
2031	reqctx->txqidx = cpu % ctx->ntxq;
2032	reqctx->rxqidx = cpu % ctx->nrxq;
2033	put_cpu();
2034
2035	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2036	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
2037	params.kctx_len = roundup(params.alg_prm.result_size, 16);
2038	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2039		params.kctx_len *= 2;
2040		params.opad_needed = 1;
2041	} else {
2042		params.opad_needed = 0;
2043	}
2044	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2045					    HASH_SPACE_LEFT(params.kctx_len),
2046					    hctx_wr->src_ofst);
2047	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2048		params.sg_len = req->nbytes - hctx_wr->processed;
2049	if (!hctx_wr->result ||
2050	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2051		if (is_hmac(crypto_ahash_tfm(rtfm))) {
2052			params.kctx_len /= 2;
2053			params.opad_needed = 0;
2054		}
2055		params.last = 0;
2056		params.more = 1;
2057		params.sg_len = rounddown(params.sg_len, bs);
2058		params.hash_size = params.alg_prm.result_size;
2059		params.scmd1 = 0;
2060	} else {
2061		params.last = 1;
2062		params.more = 0;
2063		params.hash_size = crypto_ahash_digestsize(rtfm);
2064		params.scmd1 = reqctx->data_len + params.sg_len;
2065	}
2066	params.bfr_len = 0;
2067	reqctx->data_len += params.sg_len;
2068	skb = create_hash_wr(req, &params);
2069	if (IS_ERR(skb)) {
2070		error = PTR_ERR(skb);
2071		goto err;
2072	}
2073	hctx_wr->processed += params.sg_len;
2074	skb->dev = u_ctx->lldi.ports[0];
2075	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2076	chcr_send_wr(skb);
2077	return 0;
2078err:
2079	return error;
2080}
2081
2082static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2083					  unsigned char *input,
2084					  int err)
2085{
2086	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2087	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2088	int digestsize, updated_digestsize;
2089	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2090	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2091	struct chcr_dev *dev = h_ctx(tfm)->dev;
2092
2093	if (input == NULL)
2094		goto out;
2095	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2096	updated_digestsize = digestsize;
2097	if (digestsize == SHA224_DIGEST_SIZE)
2098		updated_digestsize = SHA256_DIGEST_SIZE;
2099	else if (digestsize == SHA384_DIGEST_SIZE)
2100		updated_digestsize = SHA512_DIGEST_SIZE;
2101
2102	if (hctx_wr->dma_addr) {
2103		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2104				 hctx_wr->dma_len, DMA_TO_DEVICE);
2105		hctx_wr->dma_addr = 0;
2106	}
2107	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2108				 req->nbytes)) {
2109		if (hctx_wr->result == 1) {
2110			hctx_wr->result = 0;
2111			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2112			       digestsize);
2113		} else {
2114			memcpy(reqctx->partial_hash,
2115			       input + sizeof(struct cpl_fw6_pld),
2116			       updated_digestsize);
2117
2118		}
2119		goto unmap;
2120	}
2121	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2122	       updated_digestsize);
2123
2124	err = chcr_ahash_continue(req);
2125	if (err)
2126		goto unmap;
2127	return;
2128unmap:
2129	if (hctx_wr->is_sg_map)
2130		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2131
2132
2133out:
2134	chcr_dec_wrcount(dev);
2135	req->base.complete(&req->base, err);
2136}
2137
2138/*
2139 *	chcr_handle_resp - Unmap the DMA buffers associated with the request
2140 *	@req: crypto request
2141 */
2142int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2143			 int err)
2144{
2145	struct crypto_tfm *tfm = req->tfm;
2146	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2147	struct adapter *adap = padap(ctx->dev);
2148
2149	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2150	case CRYPTO_ALG_TYPE_AEAD:
2151		err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2152		break;
2153
2154	case CRYPTO_ALG_TYPE_SKCIPHER:
2155		 chcr_handle_cipher_resp(skcipher_request_cast(req),
2156					       input, err);
2157		break;
2158	case CRYPTO_ALG_TYPE_AHASH:
2159		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2160		}
2161	atomic_inc(&adap->chcr_stats.complete);
2162	return err;
2163}
2164static int chcr_ahash_export(struct ahash_request *areq, void *out)
2165{
2166	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2167	struct chcr_ahash_req_ctx *state = out;
2168
2169	state->reqlen = req_ctx->reqlen;
2170	state->data_len = req_ctx->data_len;
2171	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2172	memcpy(state->partial_hash, req_ctx->partial_hash,
2173	       CHCR_HASH_MAX_DIGEST_SIZE);
2174	chcr_init_hctx_per_wr(state);
2175	return 0;
2176}
2177
2178static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2179{
2180	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2181	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2182
2183	req_ctx->reqlen = state->reqlen;
2184	req_ctx->data_len = state->data_len;
2185	req_ctx->reqbfr = req_ctx->bfr1;
2186	req_ctx->skbfr = req_ctx->bfr2;
2187	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2188	memcpy(req_ctx->partial_hash, state->partial_hash,
2189	       CHCR_HASH_MAX_DIGEST_SIZE);
2190	chcr_init_hctx_per_wr(req_ctx);
2191	return 0;
2192}
2193
2194static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2195			     unsigned int keylen)
2196{
2197	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2198	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2199	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2200	unsigned int i, err = 0, updated_digestsize;
2201
2202	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2203
2204	/* use the key to calculate the ipad and opad. ipad will sent with the
2205	 * first request's data. opad will be sent with the final hash result
2206	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2207	 */
2208	shash->tfm = hmacctx->base_hash;
2209	if (keylen > bs) {
2210		err = crypto_shash_digest(shash, key, keylen,
2211					  hmacctx->ipad);
2212		if (err)
2213			goto out;
2214		keylen = digestsize;
2215	} else {
2216		memcpy(hmacctx->ipad, key, keylen);
2217	}
2218	memset(hmacctx->ipad + keylen, 0, bs - keylen);
2219	memcpy(hmacctx->opad, hmacctx->ipad, bs);
2220
2221	for (i = 0; i < bs / sizeof(int); i++) {
2222		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2223		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2224	}
2225
2226	updated_digestsize = digestsize;
2227	if (digestsize == SHA224_DIGEST_SIZE)
2228		updated_digestsize = SHA256_DIGEST_SIZE;
2229	else if (digestsize == SHA384_DIGEST_SIZE)
2230		updated_digestsize = SHA512_DIGEST_SIZE;
2231	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2232					hmacctx->ipad, digestsize);
2233	if (err)
2234		goto out;
2235	chcr_change_order(hmacctx->ipad, updated_digestsize);
2236
2237	err = chcr_compute_partial_hash(shash, hmacctx->opad,
2238					hmacctx->opad, digestsize);
2239	if (err)
2240		goto out;
2241	chcr_change_order(hmacctx->opad, updated_digestsize);
2242out:
2243	return err;
2244}
2245
2246static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2247			       unsigned int key_len)
2248{
2249	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2250	unsigned short context_size = 0;
2251	int err;
2252
2253	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2254	if (err)
2255		goto badkey_err;
2256
2257	memcpy(ablkctx->key, key, key_len);
2258	ablkctx->enckey_len = key_len;
2259	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2260	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2261	/* Both keys for xts must be aligned to 16 byte boundary
2262	 * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2263	 */
2264	if (key_len == 48) {
2265		context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2266				+ 16) >> 4;
2267		memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2268		memset(ablkctx->key + 24, 0, 8);
2269		memset(ablkctx->key + 56, 0, 8);
2270		ablkctx->enckey_len = 64;
2271		ablkctx->key_ctx_hdr =
2272			FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2273					 CHCR_KEYCTX_NO_KEY, 1,
2274					 0, context_size);
2275	} else {
2276		ablkctx->key_ctx_hdr =
2277		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2278				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2279				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2280				 CHCR_KEYCTX_NO_KEY, 1,
2281				 0, context_size);
2282	}
2283	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2284	return 0;
2285badkey_err:
2286	ablkctx->enckey_len = 0;
2287
2288	return err;
2289}
2290
2291static int chcr_sha_init(struct ahash_request *areq)
2292{
2293	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2294	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2295	int digestsize =  crypto_ahash_digestsize(tfm);
2296
2297	req_ctx->data_len = 0;
2298	req_ctx->reqlen = 0;
2299	req_ctx->reqbfr = req_ctx->bfr1;
2300	req_ctx->skbfr = req_ctx->bfr2;
2301	copy_hash_init_values(req_ctx->partial_hash, digestsize);
2302
2303	return 0;
2304}
2305
2306static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2307{
2308	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2309				 sizeof(struct chcr_ahash_req_ctx));
2310	return chcr_device_init(crypto_tfm_ctx(tfm));
2311}
2312
2313static int chcr_hmac_init(struct ahash_request *areq)
2314{
2315	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2316	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2317	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2318	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2319	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2320
2321	chcr_sha_init(areq);
2322	req_ctx->data_len = bs;
2323	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2324		if (digestsize == SHA224_DIGEST_SIZE)
2325			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2326			       SHA256_DIGEST_SIZE);
2327		else if (digestsize == SHA384_DIGEST_SIZE)
2328			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2329			       SHA512_DIGEST_SIZE);
2330		else
2331			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2332			       digestsize);
2333	}
2334	return 0;
2335}
2336
2337static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2338{
2339	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2340	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2341	unsigned int digestsize =
2342		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2343
2344	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2345				 sizeof(struct chcr_ahash_req_ctx));
2346	hmacctx->base_hash = chcr_alloc_shash(digestsize);
2347	if (IS_ERR(hmacctx->base_hash))
2348		return PTR_ERR(hmacctx->base_hash);
2349	return chcr_device_init(crypto_tfm_ctx(tfm));
2350}
2351
2352static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2353{
2354	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2355	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2356
2357	if (hmacctx->base_hash) {
2358		chcr_free_shash(hmacctx->base_hash);
2359		hmacctx->base_hash = NULL;
2360	}
2361}
2362
2363inline void chcr_aead_common_exit(struct aead_request *req)
2364{
2365	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2366	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2367	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2368
2369	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2370}
2371
2372static int chcr_aead_common_init(struct aead_request *req)
2373{
2374	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2375	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2376	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2377	unsigned int authsize = crypto_aead_authsize(tfm);
2378	int error = -EINVAL;
2379
2380	/* validate key size */
2381	if (aeadctx->enckey_len == 0)
2382		goto err;
2383	if (reqctx->op && req->cryptlen < authsize)
2384		goto err;
2385	if (reqctx->b0_len)
2386		reqctx->scratch_pad = reqctx->iv + IV;
2387	else
2388		reqctx->scratch_pad = NULL;
2389
2390	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2391				  reqctx->op);
2392	if (error) {
2393		error = -ENOMEM;
2394		goto err;
2395	}
2396
2397	return 0;
2398err:
2399	return error;
2400}
2401
2402static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2403				   int aadmax, int wrlen,
2404				   unsigned short op_type)
2405{
2406	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2407
2408	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2409	    dst_nents > MAX_DSGL_ENT ||
2410	    (req->assoclen > aadmax) ||
2411	    (wrlen > SGE_MAX_WR_LEN))
2412		return 1;
2413	return 0;
2414}
2415
2416static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2417{
2418	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2419	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2420	struct aead_request *subreq = aead_request_ctx_dma(req);
2421
2422	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2423	aead_request_set_callback(subreq, req->base.flags,
2424				  req->base.complete, req->base.data);
2425	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2426				 req->iv);
2427	aead_request_set_ad(subreq, req->assoclen);
2428	return op_type ? crypto_aead_decrypt(subreq) :
2429		crypto_aead_encrypt(subreq);
2430}
2431
2432static struct sk_buff *create_authenc_wr(struct aead_request *req,
2433					 unsigned short qid,
2434					 int size)
2435{
2436	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2437	struct chcr_context *ctx = a_ctx(tfm);
2438	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2439	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2440	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2441	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2442	struct sk_buff *skb = NULL;
2443	struct chcr_wr *chcr_req;
2444	struct cpl_rx_phys_dsgl *phys_cpl;
2445	struct ulptx_sgl *ulptx;
2446	unsigned int transhdr_len;
2447	unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2448	unsigned int   kctx_len = 0, dnents, snents;
2449	unsigned int  authsize = crypto_aead_authsize(tfm);
2450	int error = -EINVAL;
2451	u8 *ivptr;
2452	int null = 0;
2453	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2454		GFP_ATOMIC;
2455	struct adapter *adap = padap(ctx->dev);
2456	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2457
2458	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2459	if (req->cryptlen == 0)
2460		return NULL;
2461
2462	reqctx->b0_len = 0;
2463	error = chcr_aead_common_init(req);
2464	if (error)
2465		return ERR_PTR(error);
2466
2467	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2468		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2469		null = 1;
2470	}
2471	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2472		(reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2473	dnents += MIN_AUTH_SG; // For IV
2474	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2475			       CHCR_SRC_SG_SIZE, 0);
2476	dst_size = get_space_for_phys_dsgl(dnents);
2477	kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2478		- sizeof(chcr_req->key_ctx);
2479	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2480	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2481			SGE_MAX_WR_LEN;
2482	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2483			: (sgl_len(snents) * 8);
2484	transhdr_len += temp;
2485	transhdr_len = roundup(transhdr_len, 16);
2486
2487	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2488				    transhdr_len, reqctx->op)) {
2489		atomic_inc(&adap->chcr_stats.fallback);
2490		chcr_aead_common_exit(req);
2491		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2492	}
2493	skb = alloc_skb(transhdr_len, flags);
2494	if (!skb) {
2495		error = -ENOMEM;
2496		goto err;
2497	}
2498
2499	chcr_req = __skb_put_zero(skb, transhdr_len);
2500
2501	temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2502
2503	/*
2504	 * Input order	is AAD,IV and Payload. where IV should be included as
2505	 * the part of authdata. All other fields should be filled according
2506	 * to the hardware spec
2507	 */
2508	chcr_req->sec_cpl.op_ivinsrtofst =
2509				FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2510	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2511	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2512					null ? 0 : 1 + IV,
2513					null ? 0 : IV + req->assoclen,
2514					req->assoclen + IV + 1,
2515					(temp & 0x1F0) >> 4);
2516	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2517					temp & 0xF,
2518					null ? 0 : req->assoclen + IV + 1,
2519					temp, temp);
2520	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2521	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2522		temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2523	else
2524		temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2525	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2526					(reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2527					temp,
2528					actx->auth_mode, aeadctx->hmac_ctrl,
2529					IV >> 1);
2530	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2531					 0, 0, dst_size);
2532
2533	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2534	if (reqctx->op == CHCR_ENCRYPT_OP ||
2535		subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2536		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2537		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2538		       aeadctx->enckey_len);
2539	else
2540		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2541		       aeadctx->enckey_len);
2542
2543	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2544	       actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2545	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2546	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2547	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2548	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2549	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2550		memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2551		memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2552				CTR_RFC3686_IV_SIZE);
2553		*(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2554			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2555	} else {
2556		memcpy(ivptr, req->iv, IV);
2557	}
2558	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2559	chcr_add_aead_src_ent(req, ulptx);
2560	atomic_inc(&adap->chcr_stats.cipher_rqst);
2561	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2562		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2563	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2564		   transhdr_len, temp, 0);
2565	reqctx->skb = skb;
2566
2567	return skb;
2568err:
2569	chcr_aead_common_exit(req);
2570
2571	return ERR_PTR(error);
2572}
2573
2574int chcr_aead_dma_map(struct device *dev,
2575		      struct aead_request *req,
2576		      unsigned short op_type)
2577{
2578	int error;
2579	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2580	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2581	unsigned int authsize = crypto_aead_authsize(tfm);
2582	int src_len, dst_len;
2583
2584	/* calculate and handle src and dst sg length separately
2585	 * for inplace and out-of place operations
2586	 */
2587	if (req->src == req->dst) {
2588		src_len = req->assoclen + req->cryptlen + (op_type ?
2589							0 : authsize);
2590		dst_len = src_len;
2591	} else {
2592		src_len = req->assoclen + req->cryptlen;
2593		dst_len = req->assoclen + req->cryptlen + (op_type ?
2594							-authsize : authsize);
2595	}
2596
2597	if (!req->cryptlen || !src_len || !dst_len)
2598		return 0;
2599	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2600					DMA_BIDIRECTIONAL);
2601	if (dma_mapping_error(dev, reqctx->iv_dma))
2602		return -ENOMEM;
2603	if (reqctx->b0_len)
2604		reqctx->b0_dma = reqctx->iv_dma + IV;
2605	else
2606		reqctx->b0_dma = 0;
2607	if (req->src == req->dst) {
2608		error = dma_map_sg(dev, req->src,
2609				sg_nents_for_len(req->src, src_len),
2610					DMA_BIDIRECTIONAL);
2611		if (!error)
2612			goto err;
2613	} else {
2614		error = dma_map_sg(dev, req->src,
2615				   sg_nents_for_len(req->src, src_len),
2616				   DMA_TO_DEVICE);
2617		if (!error)
2618			goto err;
2619		error = dma_map_sg(dev, req->dst,
2620				   sg_nents_for_len(req->dst, dst_len),
2621				   DMA_FROM_DEVICE);
2622		if (!error) {
2623			dma_unmap_sg(dev, req->src,
2624				     sg_nents_for_len(req->src, src_len),
2625				     DMA_TO_DEVICE);
2626			goto err;
2627		}
2628	}
2629
2630	return 0;
2631err:
2632	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2633	return -ENOMEM;
2634}
2635
2636void chcr_aead_dma_unmap(struct device *dev,
2637			 struct aead_request *req,
2638			 unsigned short op_type)
2639{
2640	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2641	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2642	unsigned int authsize = crypto_aead_authsize(tfm);
2643	int src_len, dst_len;
2644
2645	/* calculate and handle src and dst sg length separately
2646	 * for inplace and out-of place operations
2647	 */
2648	if (req->src == req->dst) {
2649		src_len = req->assoclen + req->cryptlen + (op_type ?
2650							0 : authsize);
2651		dst_len = src_len;
2652	} else {
2653		src_len = req->assoclen + req->cryptlen;
2654		dst_len = req->assoclen + req->cryptlen + (op_type ?
2655						-authsize : authsize);
2656	}
2657
2658	if (!req->cryptlen || !src_len || !dst_len)
2659		return;
2660
2661	dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2662					DMA_BIDIRECTIONAL);
2663	if (req->src == req->dst) {
2664		dma_unmap_sg(dev, req->src,
2665			     sg_nents_for_len(req->src, src_len),
2666			     DMA_BIDIRECTIONAL);
2667	} else {
2668		dma_unmap_sg(dev, req->src,
2669			     sg_nents_for_len(req->src, src_len),
2670			     DMA_TO_DEVICE);
2671		dma_unmap_sg(dev, req->dst,
2672			     sg_nents_for_len(req->dst, dst_len),
2673			     DMA_FROM_DEVICE);
2674	}
2675}
2676
2677void chcr_add_aead_src_ent(struct aead_request *req,
2678			   struct ulptx_sgl *ulptx)
2679{
2680	struct ulptx_walk ulp_walk;
2681	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2682
2683	if (reqctx->imm) {
2684		u8 *buf = (u8 *)ulptx;
2685
2686		if (reqctx->b0_len) {
2687			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2688			buf += reqctx->b0_len;
2689		}
2690		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2691				   buf, req->cryptlen + req->assoclen, 0);
2692	} else {
2693		ulptx_walk_init(&ulp_walk, ulptx);
2694		if (reqctx->b0_len)
2695			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2696					    reqctx->b0_dma);
2697		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2698				  req->assoclen,  0);
2699		ulptx_walk_end(&ulp_walk);
2700	}
2701}
2702
2703void chcr_add_aead_dst_ent(struct aead_request *req,
2704			   struct cpl_rx_phys_dsgl *phys_cpl,
2705			   unsigned short qid)
2706{
2707	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2708	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2709	struct dsgl_walk dsgl_walk;
2710	unsigned int authsize = crypto_aead_authsize(tfm);
2711	struct chcr_context *ctx = a_ctx(tfm);
2712	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2713	u32 temp;
2714	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2715
2716	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2717	dsgl_walk_init(&dsgl_walk, phys_cpl);
2718	dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2719	temp = req->assoclen + req->cryptlen +
2720		(reqctx->op ? -authsize : authsize);
2721	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2722	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2723}
2724
2725void chcr_add_cipher_src_ent(struct skcipher_request *req,
2726			     void *ulptx,
2727			     struct  cipher_wr_param *wrparam)
2728{
2729	struct ulptx_walk ulp_walk;
2730	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2731	u8 *buf = ulptx;
2732
2733	memcpy(buf, reqctx->iv, IV);
2734	buf += IV;
2735	if (reqctx->imm) {
2736		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2737				   buf, wrparam->bytes, reqctx->processed);
2738	} else {
2739		ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2740		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2741				  reqctx->src_ofst);
2742		reqctx->srcsg = ulp_walk.last_sg;
2743		reqctx->src_ofst = ulp_walk.last_sg_len;
2744		ulptx_walk_end(&ulp_walk);
2745	}
2746}
2747
2748void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2749			     struct cpl_rx_phys_dsgl *phys_cpl,
2750			     struct  cipher_wr_param *wrparam,
2751			     unsigned short qid)
2752{
2753	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2754	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2755	struct chcr_context *ctx = c_ctx(tfm);
2756	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2757	struct dsgl_walk dsgl_walk;
2758	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2759
2760	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2761	dsgl_walk_init(&dsgl_walk, phys_cpl);
2762	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2763			 reqctx->dst_ofst);
2764	reqctx->dstsg = dsgl_walk.last_sg;
2765	reqctx->dst_ofst = dsgl_walk.last_sg_len;
2766	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2767}
2768
2769void chcr_add_hash_src_ent(struct ahash_request *req,
2770			   struct ulptx_sgl *ulptx,
2771			   struct hash_wr_param *param)
2772{
2773	struct ulptx_walk ulp_walk;
2774	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2775
2776	if (reqctx->hctx_wr.imm) {
2777		u8 *buf = (u8 *)ulptx;
2778
2779		if (param->bfr_len) {
2780			memcpy(buf, reqctx->reqbfr, param->bfr_len);
2781			buf += param->bfr_len;
2782		}
2783
2784		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2785				   sg_nents(reqctx->hctx_wr.srcsg), buf,
2786				   param->sg_len, 0);
2787	} else {
2788		ulptx_walk_init(&ulp_walk, ulptx);
2789		if (param->bfr_len)
2790			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2791					    reqctx->hctx_wr.dma_addr);
2792		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2793				  param->sg_len, reqctx->hctx_wr.src_ofst);
2794		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2795		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2796		ulptx_walk_end(&ulp_walk);
2797	}
2798}
2799
2800int chcr_hash_dma_map(struct device *dev,
2801		      struct ahash_request *req)
2802{
2803	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2804	int error = 0;
2805
2806	if (!req->nbytes)
2807		return 0;
2808	error = dma_map_sg(dev, req->src, sg_nents(req->src),
2809			   DMA_TO_DEVICE);
2810	if (!error)
2811		return -ENOMEM;
2812	req_ctx->hctx_wr.is_sg_map = 1;
2813	return 0;
2814}
2815
2816void chcr_hash_dma_unmap(struct device *dev,
2817			 struct ahash_request *req)
2818{
2819	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2820
2821	if (!req->nbytes)
2822		return;
2823
2824	dma_unmap_sg(dev, req->src, sg_nents(req->src),
2825			   DMA_TO_DEVICE);
2826	req_ctx->hctx_wr.is_sg_map = 0;
2827
2828}
2829
2830int chcr_cipher_dma_map(struct device *dev,
2831			struct skcipher_request *req)
2832{
2833	int error;
2834
2835	if (req->src == req->dst) {
2836		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2837				   DMA_BIDIRECTIONAL);
2838		if (!error)
2839			goto err;
2840	} else {
2841		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2842				   DMA_TO_DEVICE);
2843		if (!error)
2844			goto err;
2845		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2846				   DMA_FROM_DEVICE);
2847		if (!error) {
2848			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2849				   DMA_TO_DEVICE);
2850			goto err;
2851		}
2852	}
2853
2854	return 0;
2855err:
2856	return -ENOMEM;
2857}
2858
2859void chcr_cipher_dma_unmap(struct device *dev,
2860			   struct skcipher_request *req)
2861{
2862	if (req->src == req->dst) {
2863		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2864				   DMA_BIDIRECTIONAL);
2865	} else {
2866		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2867				   DMA_TO_DEVICE);
2868		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2869				   DMA_FROM_DEVICE);
2870	}
2871}
2872
2873static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2874{
2875	__be32 data;
2876
2877	memset(block, 0, csize);
2878	block += csize;
2879
2880	if (csize >= 4)
2881		csize = 4;
2882	else if (msglen > (unsigned int)(1 << (8 * csize)))
2883		return -EOVERFLOW;
2884
2885	data = cpu_to_be32(msglen);
2886	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2887
2888	return 0;
2889}
2890
2891static int generate_b0(struct aead_request *req, u8 *ivptr,
2892			unsigned short op_type)
2893{
2894	unsigned int l, lp, m;
2895	int rc;
2896	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2897	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2898	u8 *b0 = reqctx->scratch_pad;
2899
2900	m = crypto_aead_authsize(aead);
2901
2902	memcpy(b0, ivptr, 16);
2903
2904	lp = b0[0];
2905	l = lp + 1;
2906
2907	/* set m, bits 3-5 */
2908	*b0 |= (8 * ((m - 2) / 2));
2909
2910	/* set adata, bit 6, if associated data is used */
2911	if (req->assoclen)
2912		*b0 |= 64;
2913	rc = set_msg_len(b0 + 16 - l,
2914			 (op_type == CHCR_DECRYPT_OP) ?
2915			 req->cryptlen - m : req->cryptlen, l);
2916
2917	return rc;
2918}
2919
2920static inline int crypto_ccm_check_iv(const u8 *iv)
2921{
2922	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2923	if (iv[0] < 1 || iv[0] > 7)
2924		return -EINVAL;
2925
2926	return 0;
2927}
2928
2929static int ccm_format_packet(struct aead_request *req,
2930			     u8 *ivptr,
2931			     unsigned int sub_type,
2932			     unsigned short op_type,
2933			     unsigned int assoclen)
2934{
2935	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2936	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2937	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2938	int rc = 0;
2939
2940	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2941		ivptr[0] = 3;
2942		memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2943		memcpy(ivptr + 4, req->iv, 8);
2944		memset(ivptr + 12, 0, 4);
2945	} else {
2946		memcpy(ivptr, req->iv, 16);
2947	}
2948	if (assoclen)
2949		put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
2950
2951	rc = generate_b0(req, ivptr, op_type);
2952	/* zero the ctr value */
2953	memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2954	return rc;
2955}
2956
2957static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2958				  unsigned int dst_size,
2959				  struct aead_request *req,
2960				  unsigned short op_type)
2961{
2962	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2963	struct chcr_context *ctx = a_ctx(tfm);
2964	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2965	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2966	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2967	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2968	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2969	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2970	unsigned int ccm_xtra;
2971	unsigned int tag_offset = 0, auth_offset = 0;
2972	unsigned int assoclen;
2973
2974	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2975
2976	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2977		assoclen = req->assoclen - 8;
2978	else
2979		assoclen = req->assoclen;
2980	ccm_xtra = CCM_B0_SIZE +
2981		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2982
2983	auth_offset = req->cryptlen ?
2984		(req->assoclen + IV + 1 + ccm_xtra) : 0;
2985	if (op_type == CHCR_DECRYPT_OP) {
2986		if (crypto_aead_authsize(tfm) != req->cryptlen)
2987			tag_offset = crypto_aead_authsize(tfm);
2988		else
2989			auth_offset = 0;
2990	}
2991
2992	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2993	sec_cpl->pldlen =
2994		htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2995	/* For CCM there wil be b0 always. So AAD start will be 1 always */
2996	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2997				1 + IV,	IV + assoclen + ccm_xtra,
2998				req->assoclen + IV + 1 + ccm_xtra, 0);
2999
3000	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
3001					auth_offset, tag_offset,
3002					(op_type == CHCR_ENCRYPT_OP) ? 0 :
3003					crypto_aead_authsize(tfm));
3004	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
3005					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
3006					cipher_mode, mac_mode,
3007					aeadctx->hmac_ctrl, IV >> 1);
3008
3009	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
3010					0, dst_size);
3011}
3012
3013static int aead_ccm_validate_input(unsigned short op_type,
3014				   struct aead_request *req,
3015				   struct chcr_aead_ctx *aeadctx,
3016				   unsigned int sub_type)
3017{
3018	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
3019		if (crypto_ccm_check_iv(req->iv)) {
3020			pr_err("CCM: IV check fails\n");
3021			return -EINVAL;
3022		}
3023	} else {
3024		if (req->assoclen != 16 && req->assoclen != 20) {
3025			pr_err("RFC4309: Invalid AAD length %d\n",
3026			       req->assoclen);
3027			return -EINVAL;
3028		}
3029	}
3030	return 0;
3031}
3032
3033static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
3034					  unsigned short qid,
3035					  int size)
3036{
3037	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3038	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3039	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3040	struct sk_buff *skb = NULL;
3041	struct chcr_wr *chcr_req;
3042	struct cpl_rx_phys_dsgl *phys_cpl;
3043	struct ulptx_sgl *ulptx;
3044	unsigned int transhdr_len;
3045	unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
3046	unsigned int sub_type, assoclen = req->assoclen;
3047	unsigned int authsize = crypto_aead_authsize(tfm);
3048	int error = -EINVAL;
3049	u8 *ivptr;
3050	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3051		GFP_ATOMIC;
3052	struct adapter *adap = padap(a_ctx(tfm)->dev);
3053
3054	sub_type = get_aead_subtype(tfm);
3055	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
3056		assoclen -= 8;
3057	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
3058	error = chcr_aead_common_init(req);
3059	if (error)
3060		return ERR_PTR(error);
3061
3062	error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
3063	if (error)
3064		goto err;
3065	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3066			+ (reqctx->op ? -authsize : authsize),
3067			CHCR_DST_SG_SIZE, 0);
3068	dnents += MIN_CCM_SG; // For IV and B0
3069	dst_size = get_space_for_phys_dsgl(dnents);
3070	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3071			       CHCR_SRC_SG_SIZE, 0);
3072	snents += MIN_CCM_SG; //For B0
3073	kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3074	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3075	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3076		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
3077	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3078				     reqctx->b0_len, 16) :
3079		(sgl_len(snents) *  8);
3080	transhdr_len += temp;
3081	transhdr_len = roundup(transhdr_len, 16);
3082
3083	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3084				reqctx->b0_len, transhdr_len, reqctx->op)) {
3085		atomic_inc(&adap->chcr_stats.fallback);
3086		chcr_aead_common_exit(req);
3087		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3088	}
3089	skb = alloc_skb(transhdr_len,  flags);
3090
3091	if (!skb) {
3092		error = -ENOMEM;
3093		goto err;
3094	}
3095
3096	chcr_req = __skb_put_zero(skb, transhdr_len);
3097
3098	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3099
3100	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3101	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3102	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3103			aeadctx->key, aeadctx->enckey_len);
3104
3105	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3106	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3107	ulptx = (struct ulptx_sgl *)(ivptr + IV);
3108	error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3109	if (error)
3110		goto dstmap_fail;
3111	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3112	chcr_add_aead_src_ent(req, ulptx);
3113
3114	atomic_inc(&adap->chcr_stats.aead_rqst);
3115	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3116		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3117		reqctx->b0_len) : 0);
3118	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3119		    transhdr_len, temp, 0);
3120	reqctx->skb = skb;
3121
3122	return skb;
3123dstmap_fail:
3124	kfree_skb(skb);
3125err:
3126	chcr_aead_common_exit(req);
3127	return ERR_PTR(error);
3128}
3129
3130static struct sk_buff *create_gcm_wr(struct aead_request *req,
3131				     unsigned short qid,
3132				     int size)
3133{
3134	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3135	struct chcr_context *ctx = a_ctx(tfm);
3136	struct uld_ctx *u_ctx = ULD_CTX(ctx);
3137	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3138	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3139	struct sk_buff *skb = NULL;
3140	struct chcr_wr *chcr_req;
3141	struct cpl_rx_phys_dsgl *phys_cpl;
3142	struct ulptx_sgl *ulptx;
3143	unsigned int transhdr_len, dnents = 0, snents;
3144	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3145	unsigned int authsize = crypto_aead_authsize(tfm);
3146	int error = -EINVAL;
3147	u8 *ivptr;
3148	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3149		GFP_ATOMIC;
3150	struct adapter *adap = padap(ctx->dev);
3151	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3152
3153	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
3154	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3155		assoclen = req->assoclen - 8;
3156
3157	reqctx->b0_len = 0;
3158	error = chcr_aead_common_init(req);
3159	if (error)
3160		return ERR_PTR(error);
3161	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3162				(reqctx->op ? -authsize : authsize),
3163				CHCR_DST_SG_SIZE, 0);
3164	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3165			       CHCR_SRC_SG_SIZE, 0);
3166	dnents += MIN_GCM_SG; // For IV
3167	dst_size = get_space_for_phys_dsgl(dnents);
3168	kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3169	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3170	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3171			SGE_MAX_WR_LEN;
3172	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3173		(sgl_len(snents) * 8);
3174	transhdr_len += temp;
3175	transhdr_len = roundup(transhdr_len, 16);
3176	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3177			    transhdr_len, reqctx->op)) {
3178
3179		atomic_inc(&adap->chcr_stats.fallback);
3180		chcr_aead_common_exit(req);
3181		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3182	}
3183	skb = alloc_skb(transhdr_len, flags);
3184	if (!skb) {
3185		error = -ENOMEM;
3186		goto err;
3187	}
3188
3189	chcr_req = __skb_put_zero(skb, transhdr_len);
3190
3191	//Offset of tag from end
3192	temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3193	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3194						rx_channel_id, 2, 1);
3195	chcr_req->sec_cpl.pldlen =
3196		htonl(req->assoclen + IV + req->cryptlen);
3197	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3198					assoclen ? 1 + IV : 0,
3199					assoclen ? IV + assoclen : 0,
3200					req->assoclen + IV + 1, 0);
3201	chcr_req->sec_cpl.cipherstop_lo_authinsert =
3202			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3203						temp, temp);
3204	chcr_req->sec_cpl.seqno_numivs =
3205			FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3206					CHCR_ENCRYPT_OP) ? 1 : 0,
3207					CHCR_SCMD_CIPHER_MODE_AES_GCM,
3208					CHCR_SCMD_AUTH_MODE_GHASH,
3209					aeadctx->hmac_ctrl, IV >> 1);
3210	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3211					0, 0, dst_size);
3212	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3213	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3214	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3215	       GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3216
3217	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3218	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3219	/* prepare a 16 byte iv */
3220	/* S   A   L  T |  IV | 0x00000001 */
3221	if (get_aead_subtype(tfm) ==
3222	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3223		memcpy(ivptr, aeadctx->salt, 4);
3224		memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3225	} else {
3226		memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3227	}
3228	put_unaligned_be32(0x01, &ivptr[12]);
3229	ulptx = (struct ulptx_sgl *)(ivptr + 16);
3230
3231	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3232	chcr_add_aead_src_ent(req, ulptx);
3233	atomic_inc(&adap->chcr_stats.aead_rqst);
3234	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3235		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3236	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3237		    transhdr_len, temp, reqctx->verify);
3238	reqctx->skb = skb;
3239	return skb;
3240
3241err:
3242	chcr_aead_common_exit(req);
3243	return ERR_PTR(error);
3244}
3245
3246
3247
3248static int chcr_aead_cra_init(struct crypto_aead *tfm)
3249{
3250	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3251	struct aead_alg *alg = crypto_aead_alg(tfm);
3252
3253	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3254					       CRYPTO_ALG_NEED_FALLBACK |
3255					       CRYPTO_ALG_ASYNC);
3256	if  (IS_ERR(aeadctx->sw_cipher))
3257		return PTR_ERR(aeadctx->sw_cipher);
3258	crypto_aead_set_reqsize_dma(
3259		tfm, max(sizeof(struct chcr_aead_reqctx),
3260			 sizeof(struct aead_request) +
3261			 crypto_aead_reqsize(aeadctx->sw_cipher)));
3262	return chcr_device_init(a_ctx(tfm));
3263}
3264
3265static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3266{
3267	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3268
3269	crypto_free_aead(aeadctx->sw_cipher);
3270}
3271
3272static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3273					unsigned int authsize)
3274{
3275	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3276
3277	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3278	aeadctx->mayverify = VERIFY_HW;
3279	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3280}
3281static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3282				    unsigned int authsize)
3283{
3284	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3285	u32 maxauth = crypto_aead_maxauthsize(tfm);
3286
3287	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3288	 * true for sha1. authsize == 12 condition should be before
3289	 * authsize == (maxauth >> 1)
3290	 */
3291	if (authsize == ICV_4) {
3292		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3293		aeadctx->mayverify = VERIFY_HW;
3294	} else if (authsize == ICV_6) {
3295		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3296		aeadctx->mayverify = VERIFY_HW;
3297	} else if (authsize == ICV_10) {
3298		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3299		aeadctx->mayverify = VERIFY_HW;
3300	} else if (authsize == ICV_12) {
3301		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3302		aeadctx->mayverify = VERIFY_HW;
3303	} else if (authsize == ICV_14) {
3304		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3305		aeadctx->mayverify = VERIFY_HW;
3306	} else if (authsize == (maxauth >> 1)) {
3307		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3308		aeadctx->mayverify = VERIFY_HW;
3309	} else if (authsize == maxauth) {
3310		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3311		aeadctx->mayverify = VERIFY_HW;
3312	} else {
3313		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3314		aeadctx->mayverify = VERIFY_SW;
3315	}
3316	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3317}
3318
3319
3320static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3321{
3322	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3323
3324	switch (authsize) {
3325	case ICV_4:
3326		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3327		aeadctx->mayverify = VERIFY_HW;
3328		break;
3329	case ICV_8:
3330		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3331		aeadctx->mayverify = VERIFY_HW;
3332		break;
3333	case ICV_12:
3334		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3335		aeadctx->mayverify = VERIFY_HW;
3336		break;
3337	case ICV_14:
3338		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3339		aeadctx->mayverify = VERIFY_HW;
3340		break;
3341	case ICV_16:
3342		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3343		aeadctx->mayverify = VERIFY_HW;
3344		break;
3345	case ICV_13:
3346	case ICV_15:
3347		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3348		aeadctx->mayverify = VERIFY_SW;
3349		break;
3350	default:
3351		return -EINVAL;
3352	}
3353	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3354}
3355
3356static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3357					  unsigned int authsize)
3358{
3359	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3360
3361	switch (authsize) {
3362	case ICV_8:
3363		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3364		aeadctx->mayverify = VERIFY_HW;
3365		break;
3366	case ICV_12:
3367		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3368		aeadctx->mayverify = VERIFY_HW;
3369		break;
3370	case ICV_16:
3371		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3372		aeadctx->mayverify = VERIFY_HW;
3373		break;
3374	default:
3375		return -EINVAL;
3376	}
3377	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3378}
3379
3380static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3381				unsigned int authsize)
3382{
3383	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3384
3385	switch (authsize) {
3386	case ICV_4:
3387		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3388		aeadctx->mayverify = VERIFY_HW;
3389		break;
3390	case ICV_6:
3391		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3392		aeadctx->mayverify = VERIFY_HW;
3393		break;
3394	case ICV_8:
3395		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3396		aeadctx->mayverify = VERIFY_HW;
3397		break;
3398	case ICV_10:
3399		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3400		aeadctx->mayverify = VERIFY_HW;
3401		break;
3402	case ICV_12:
3403		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3404		aeadctx->mayverify = VERIFY_HW;
3405		break;
3406	case ICV_14:
3407		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3408		aeadctx->mayverify = VERIFY_HW;
3409		break;
3410	case ICV_16:
3411		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3412		aeadctx->mayverify = VERIFY_HW;
3413		break;
3414	default:
3415		return -EINVAL;
3416	}
3417	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3418}
3419
3420static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3421				const u8 *key,
3422				unsigned int keylen)
3423{
3424	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3425	unsigned char ck_size, mk_size;
3426	int key_ctx_size = 0;
3427
3428	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3429	if (keylen == AES_KEYSIZE_128) {
3430		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3431		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3432	} else if (keylen == AES_KEYSIZE_192) {
3433		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3434		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3435	} else if (keylen == AES_KEYSIZE_256) {
3436		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3437		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3438	} else {
3439		aeadctx->enckey_len = 0;
3440		return	-EINVAL;
3441	}
3442	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3443						key_ctx_size >> 4);
3444	memcpy(aeadctx->key, key, keylen);
3445	aeadctx->enckey_len = keylen;
3446
3447	return 0;
3448}
3449
3450static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3451				const u8 *key,
3452				unsigned int keylen)
3453{
3454	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3455	int error;
3456
3457	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3458	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3459			      CRYPTO_TFM_REQ_MASK);
3460	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3461	if (error)
3462		return error;
3463	return chcr_ccm_common_setkey(aead, key, keylen);
3464}
3465
3466static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3467				    unsigned int keylen)
3468{
3469	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3470	int error;
3471
3472	if (keylen < 3) {
3473		aeadctx->enckey_len = 0;
3474		return	-EINVAL;
3475	}
3476	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3477	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3478			      CRYPTO_TFM_REQ_MASK);
3479	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3480	if (error)
3481		return error;
3482	keylen -= 3;
3483	memcpy(aeadctx->salt, key + keylen, 3);
3484	return chcr_ccm_common_setkey(aead, key, keylen);
3485}
3486
3487static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3488			   unsigned int keylen)
3489{
3490	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3491	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3492	unsigned int ck_size;
3493	int ret = 0, key_ctx_size = 0;
3494	struct crypto_aes_ctx aes;
3495
3496	aeadctx->enckey_len = 0;
3497	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3498	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3499			      & CRYPTO_TFM_REQ_MASK);
3500	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3501	if (ret)
3502		goto out;
3503
3504	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3505	    keylen > 3) {
3506		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3507		memcpy(aeadctx->salt, key + keylen, 4);
3508	}
3509	if (keylen == AES_KEYSIZE_128) {
3510		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3511	} else if (keylen == AES_KEYSIZE_192) {
3512		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3513	} else if (keylen == AES_KEYSIZE_256) {
3514		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3515	} else {
3516		pr_err("GCM: Invalid key length %d\n", keylen);
3517		ret = -EINVAL;
3518		goto out;
3519	}
3520
3521	memcpy(aeadctx->key, key, keylen);
3522	aeadctx->enckey_len = keylen;
3523	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3524		AEAD_H_SIZE;
3525	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3526						CHCR_KEYCTX_MAC_KEY_SIZE_128,
3527						0, 0,
3528						key_ctx_size >> 4);
3529	/* Calculate the H = CIPH(K, 0 repeated 16 times).
3530	 * It will go in key context
3531	 */
3532	ret = aes_expandkey(&aes, key, keylen);
3533	if (ret) {
3534		aeadctx->enckey_len = 0;
3535		goto out;
3536	}
3537	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3538	aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3539	memzero_explicit(&aes, sizeof(aes));
3540
3541out:
3542	return ret;
3543}
3544
3545static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3546				   unsigned int keylen)
3547{
3548	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3549	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3550	/* it contains auth and cipher key both*/
3551	struct crypto_authenc_keys keys;
3552	unsigned int bs, subtype;
3553	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3554	int err = 0, i, key_ctx_len = 0;
3555	unsigned char ck_size = 0;
3556	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3557	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3558	struct algo_param param;
3559	int align;
3560	u8 *o_ptr = NULL;
3561
3562	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3563	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3564			      & CRYPTO_TFM_REQ_MASK);
3565	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3566	if (err)
3567		goto out;
3568
3569	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3570		goto out;
3571
3572	if (get_alg_config(&param, max_authsize)) {
3573		pr_err("Unsupported digest size\n");
3574		goto out;
3575	}
3576	subtype = get_aead_subtype(authenc);
3577	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3578		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3579		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3580			goto out;
3581		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3582		- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3583		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3584	}
3585	if (keys.enckeylen == AES_KEYSIZE_128) {
3586		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3587	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3588		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3589	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3590		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3591	} else {
3592		pr_err("Unsupported cipher key\n");
3593		goto out;
3594	}
3595
3596	/* Copy only encryption key. We use authkey to generate h(ipad) and
3597	 * h(opad) so authkey is not needed again. authkeylen size have the
3598	 * size of the hash digest size.
3599	 */
3600	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3601	aeadctx->enckey_len = keys.enckeylen;
3602	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3603		subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3604
3605		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3606			    aeadctx->enckey_len << 3);
3607	}
3608	base_hash  = chcr_alloc_shash(max_authsize);
3609	if (IS_ERR(base_hash)) {
3610		pr_err("Base driver cannot be loaded\n");
3611		goto out;
3612	}
3613	{
3614		SHASH_DESC_ON_STACK(shash, base_hash);
3615
3616		shash->tfm = base_hash;
3617		bs = crypto_shash_blocksize(base_hash);
3618		align = KEYCTX_ALIGN_PAD(max_authsize);
3619		o_ptr =  actx->h_iopad + param.result_size + align;
3620
3621		if (keys.authkeylen > bs) {
3622			err = crypto_shash_digest(shash, keys.authkey,
3623						  keys.authkeylen,
3624						  o_ptr);
3625			if (err) {
3626				pr_err("Base driver cannot be loaded\n");
3627				goto out;
3628			}
3629			keys.authkeylen = max_authsize;
3630		} else
3631			memcpy(o_ptr, keys.authkey, keys.authkeylen);
3632
3633		/* Compute the ipad-digest*/
3634		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3635		memcpy(pad, o_ptr, keys.authkeylen);
3636		for (i = 0; i < bs >> 2; i++)
3637			*((unsigned int *)pad + i) ^= IPAD_DATA;
3638
3639		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3640					      max_authsize))
3641			goto out;
3642		/* Compute the opad-digest */
3643		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3644		memcpy(pad, o_ptr, keys.authkeylen);
3645		for (i = 0; i < bs >> 2; i++)
3646			*((unsigned int *)pad + i) ^= OPAD_DATA;
3647
3648		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3649			goto out;
3650
3651		/* convert the ipad and opad digest to network order */
3652		chcr_change_order(actx->h_iopad, param.result_size);
3653		chcr_change_order(o_ptr, param.result_size);
3654		key_ctx_len = sizeof(struct _key_ctx) +
3655			roundup(keys.enckeylen, 16) +
3656			(param.result_size + align) * 2;
3657		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3658						0, 1, key_ctx_len >> 4);
3659		actx->auth_mode = param.auth_mode;
3660		chcr_free_shash(base_hash);
3661
3662		memzero_explicit(&keys, sizeof(keys));
3663		return 0;
3664	}
3665out:
3666	aeadctx->enckey_len = 0;
3667	memzero_explicit(&keys, sizeof(keys));
3668	if (!IS_ERR(base_hash))
3669		chcr_free_shash(base_hash);
3670	return -EINVAL;
3671}
3672
3673static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3674					const u8 *key, unsigned int keylen)
3675{
3676	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3677	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3678	struct crypto_authenc_keys keys;
3679	int err;
3680	/* it contains auth and cipher key both*/
3681	unsigned int subtype;
3682	int key_ctx_len = 0;
3683	unsigned char ck_size = 0;
3684
3685	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3686	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3687			      & CRYPTO_TFM_REQ_MASK);
3688	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3689	if (err)
3690		goto out;
3691
3692	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3693		goto out;
3694
3695	subtype = get_aead_subtype(authenc);
3696	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3697	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3698		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3699			goto out;
3700		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3701			- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3702		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3703	}
3704	if (keys.enckeylen == AES_KEYSIZE_128) {
3705		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3706	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3707		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3708	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3709		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3710	} else {
3711		pr_err("Unsupported cipher key %d\n", keys.enckeylen);
3712		goto out;
3713	}
3714	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3715	aeadctx->enckey_len = keys.enckeylen;
3716	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3717	    subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3718		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3719				aeadctx->enckey_len << 3);
3720	}
3721	key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3722
3723	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3724						0, key_ctx_len >> 4);
3725	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3726	memzero_explicit(&keys, sizeof(keys));
3727	return 0;
3728out:
3729	aeadctx->enckey_len = 0;
3730	memzero_explicit(&keys, sizeof(keys));
3731	return -EINVAL;
3732}
3733
3734static int chcr_aead_op(struct aead_request *req,
3735			int size,
3736			create_wr_t create_wr_fn)
3737{
3738	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3739	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3740	struct chcr_context *ctx = a_ctx(tfm);
3741	struct uld_ctx *u_ctx = ULD_CTX(ctx);
3742	struct sk_buff *skb;
3743	struct chcr_dev *cdev;
3744
3745	cdev = a_ctx(tfm)->dev;
3746	if (!cdev) {
3747		pr_err("%s : No crypto device.\n", __func__);
3748		return -ENXIO;
3749	}
3750
3751	if (chcr_inc_wrcount(cdev)) {
3752	/* Detach state for CHCR means lldi or padap is freed.
3753	 * We cannot increment fallback here.
3754	 */
3755		return chcr_aead_fallback(req, reqctx->op);
3756	}
3757
3758	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3759					reqctx->txqidx) &&
3760		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3761			chcr_dec_wrcount(cdev);
3762			return -ENOSPC;
3763	}
3764
3765	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3766	    crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3767		pr_err("RFC4106: Invalid value of assoclen %d\n",
3768		       req->assoclen);
3769		return -EINVAL;
3770	}
3771
3772	/* Form a WR from req */
3773	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3774
3775	if (IS_ERR_OR_NULL(skb)) {
3776		chcr_dec_wrcount(cdev);
3777		return PTR_ERR_OR_ZERO(skb);
3778	}
3779
3780	skb->dev = u_ctx->lldi.ports[0];
3781	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3782	chcr_send_wr(skb);
3783	return -EINPROGRESS;
3784}
3785
3786static int chcr_aead_encrypt(struct aead_request *req)
3787{
3788	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3789	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3790	struct chcr_context *ctx = a_ctx(tfm);
3791	unsigned int cpu;
3792
3793	cpu = get_cpu();
3794	reqctx->txqidx = cpu % ctx->ntxq;
3795	reqctx->rxqidx = cpu % ctx->nrxq;
3796	put_cpu();
3797
3798	reqctx->verify = VERIFY_HW;
3799	reqctx->op = CHCR_ENCRYPT_OP;
3800
3801	switch (get_aead_subtype(tfm)) {
3802	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3803	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3804	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3805	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3806		return chcr_aead_op(req, 0, create_authenc_wr);
3807	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3808	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3809		return chcr_aead_op(req, 0, create_aead_ccm_wr);
3810	default:
3811		return chcr_aead_op(req, 0, create_gcm_wr);
3812	}
3813}
3814
3815static int chcr_aead_decrypt(struct aead_request *req)
3816{
3817	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3818	struct chcr_context *ctx = a_ctx(tfm);
3819	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3820	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3821	int size;
3822	unsigned int cpu;
3823
3824	cpu = get_cpu();
3825	reqctx->txqidx = cpu % ctx->ntxq;
3826	reqctx->rxqidx = cpu % ctx->nrxq;
3827	put_cpu();
3828
3829	if (aeadctx->mayverify == VERIFY_SW) {
3830		size = crypto_aead_maxauthsize(tfm);
3831		reqctx->verify = VERIFY_SW;
3832	} else {
3833		size = 0;
3834		reqctx->verify = VERIFY_HW;
3835	}
3836	reqctx->op = CHCR_DECRYPT_OP;
3837	switch (get_aead_subtype(tfm)) {
3838	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3839	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3840	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3841	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3842		return chcr_aead_op(req, size, create_authenc_wr);
3843	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3844	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3845		return chcr_aead_op(req, size, create_aead_ccm_wr);
3846	default:
3847		return chcr_aead_op(req, size, create_gcm_wr);
3848	}
3849}
3850
3851static struct chcr_alg_template driver_algs[] = {
3852	/* AES-CBC */
3853	{
3854		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3855		.is_registered = 0,
3856		.alg.skcipher = {
3857			.base.cra_name		= "cbc(aes)",
3858			.base.cra_driver_name	= "cbc-aes-chcr",
3859			.base.cra_blocksize	= AES_BLOCK_SIZE,
3860
3861			.init			= chcr_init_tfm,
3862			.exit			= chcr_exit_tfm,
3863			.min_keysize		= AES_MIN_KEY_SIZE,
3864			.max_keysize		= AES_MAX_KEY_SIZE,
3865			.ivsize			= AES_BLOCK_SIZE,
3866			.setkey			= chcr_aes_cbc_setkey,
3867			.encrypt		= chcr_aes_encrypt,
3868			.decrypt		= chcr_aes_decrypt,
3869			}
3870	},
3871	{
3872		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3873		.is_registered = 0,
3874		.alg.skcipher = {
3875			.base.cra_name		= "xts(aes)",
3876			.base.cra_driver_name	= "xts-aes-chcr",
3877			.base.cra_blocksize	= AES_BLOCK_SIZE,
3878
3879			.init			= chcr_init_tfm,
3880			.exit			= chcr_exit_tfm,
3881			.min_keysize		= 2 * AES_MIN_KEY_SIZE,
3882			.max_keysize		= 2 * AES_MAX_KEY_SIZE,
3883			.ivsize			= AES_BLOCK_SIZE,
3884			.setkey			= chcr_aes_xts_setkey,
3885			.encrypt		= chcr_aes_encrypt,
3886			.decrypt		= chcr_aes_decrypt,
3887			}
3888	},
3889	{
3890		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3891		.is_registered = 0,
3892		.alg.skcipher = {
3893			.base.cra_name		= "ctr(aes)",
3894			.base.cra_driver_name	= "ctr-aes-chcr",
3895			.base.cra_blocksize	= 1,
3896
3897			.init			= chcr_init_tfm,
3898			.exit			= chcr_exit_tfm,
3899			.min_keysize		= AES_MIN_KEY_SIZE,
3900			.max_keysize		= AES_MAX_KEY_SIZE,
3901			.ivsize			= AES_BLOCK_SIZE,
3902			.setkey			= chcr_aes_ctr_setkey,
3903			.encrypt		= chcr_aes_encrypt,
3904			.decrypt		= chcr_aes_decrypt,
3905		}
3906	},
3907	{
3908		.type = CRYPTO_ALG_TYPE_SKCIPHER |
3909			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3910		.is_registered = 0,
3911		.alg.skcipher = {
3912			.base.cra_name		= "rfc3686(ctr(aes))",
3913			.base.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3914			.base.cra_blocksize	= 1,
3915
3916			.init			= chcr_rfc3686_init,
3917			.exit			= chcr_exit_tfm,
3918			.min_keysize		= AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3919			.max_keysize		= AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3920			.ivsize			= CTR_RFC3686_IV_SIZE,
3921			.setkey			= chcr_aes_rfc3686_setkey,
3922			.encrypt		= chcr_aes_encrypt,
3923			.decrypt		= chcr_aes_decrypt,
3924		}
3925	},
3926	/* SHA */
3927	{
3928		.type = CRYPTO_ALG_TYPE_AHASH,
3929		.is_registered = 0,
3930		.alg.hash = {
3931			.halg.digestsize = SHA1_DIGEST_SIZE,
3932			.halg.base = {
3933				.cra_name = "sha1",
3934				.cra_driver_name = "sha1-chcr",
3935				.cra_blocksize = SHA1_BLOCK_SIZE,
3936			}
3937		}
3938	},
3939	{
3940		.type = CRYPTO_ALG_TYPE_AHASH,
3941		.is_registered = 0,
3942		.alg.hash = {
3943			.halg.digestsize = SHA256_DIGEST_SIZE,
3944			.halg.base = {
3945				.cra_name = "sha256",
3946				.cra_driver_name = "sha256-chcr",
3947				.cra_blocksize = SHA256_BLOCK_SIZE,
3948			}
3949		}
3950	},
3951	{
3952		.type = CRYPTO_ALG_TYPE_AHASH,
3953		.is_registered = 0,
3954		.alg.hash = {
3955			.halg.digestsize = SHA224_DIGEST_SIZE,
3956			.halg.base = {
3957				.cra_name = "sha224",
3958				.cra_driver_name = "sha224-chcr",
3959				.cra_blocksize = SHA224_BLOCK_SIZE,
3960			}
3961		}
3962	},
3963	{
3964		.type = CRYPTO_ALG_TYPE_AHASH,
3965		.is_registered = 0,
3966		.alg.hash = {
3967			.halg.digestsize = SHA384_DIGEST_SIZE,
3968			.halg.base = {
3969				.cra_name = "sha384",
3970				.cra_driver_name = "sha384-chcr",
3971				.cra_blocksize = SHA384_BLOCK_SIZE,
3972			}
3973		}
3974	},
3975	{
3976		.type = CRYPTO_ALG_TYPE_AHASH,
3977		.is_registered = 0,
3978		.alg.hash = {
3979			.halg.digestsize = SHA512_DIGEST_SIZE,
3980			.halg.base = {
3981				.cra_name = "sha512",
3982				.cra_driver_name = "sha512-chcr",
3983				.cra_blocksize = SHA512_BLOCK_SIZE,
3984			}
3985		}
3986	},
3987	/* HMAC */
3988	{
3989		.type = CRYPTO_ALG_TYPE_HMAC,
3990		.is_registered = 0,
3991		.alg.hash = {
3992			.halg.digestsize = SHA1_DIGEST_SIZE,
3993			.halg.base = {
3994				.cra_name = "hmac(sha1)",
3995				.cra_driver_name = "hmac-sha1-chcr",
3996				.cra_blocksize = SHA1_BLOCK_SIZE,
3997			}
3998		}
3999	},
4000	{
4001		.type = CRYPTO_ALG_TYPE_HMAC,
4002		.is_registered = 0,
4003		.alg.hash = {
4004			.halg.digestsize = SHA224_DIGEST_SIZE,
4005			.halg.base = {
4006				.cra_name = "hmac(sha224)",
4007				.cra_driver_name = "hmac-sha224-chcr",
4008				.cra_blocksize = SHA224_BLOCK_SIZE,
4009			}
4010		}
4011	},
4012	{
4013		.type = CRYPTO_ALG_TYPE_HMAC,
4014		.is_registered = 0,
4015		.alg.hash = {
4016			.halg.digestsize = SHA256_DIGEST_SIZE,
4017			.halg.base = {
4018				.cra_name = "hmac(sha256)",
4019				.cra_driver_name = "hmac-sha256-chcr",
4020				.cra_blocksize = SHA256_BLOCK_SIZE,
4021			}
4022		}
4023	},
4024	{
4025		.type = CRYPTO_ALG_TYPE_HMAC,
4026		.is_registered = 0,
4027		.alg.hash = {
4028			.halg.digestsize = SHA384_DIGEST_SIZE,
4029			.halg.base = {
4030				.cra_name = "hmac(sha384)",
4031				.cra_driver_name = "hmac-sha384-chcr",
4032				.cra_blocksize = SHA384_BLOCK_SIZE,
4033			}
4034		}
4035	},
4036	{
4037		.type = CRYPTO_ALG_TYPE_HMAC,
4038		.is_registered = 0,
4039		.alg.hash = {
4040			.halg.digestsize = SHA512_DIGEST_SIZE,
4041			.halg.base = {
4042				.cra_name = "hmac(sha512)",
4043				.cra_driver_name = "hmac-sha512-chcr",
4044				.cra_blocksize = SHA512_BLOCK_SIZE,
4045			}
4046		}
4047	},
4048	/* Add AEAD Algorithms */
4049	{
4050		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
4051		.is_registered = 0,
4052		.alg.aead = {
4053			.base = {
4054				.cra_name = "gcm(aes)",
4055				.cra_driver_name = "gcm-aes-chcr",
4056				.cra_blocksize	= 1,
4057				.cra_priority = CHCR_AEAD_PRIORITY,
4058				.cra_ctxsize =	sizeof(struct chcr_context) +
4059						sizeof(struct chcr_aead_ctx) +
4060						sizeof(struct chcr_gcm_ctx),
4061			},
4062			.ivsize = GCM_AES_IV_SIZE,
4063			.maxauthsize = GHASH_DIGEST_SIZE,
4064			.setkey = chcr_gcm_setkey,
4065			.setauthsize = chcr_gcm_setauthsize,
4066		}
4067	},
4068	{
4069		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4070		.is_registered = 0,
4071		.alg.aead = {
4072			.base = {
4073				.cra_name = "rfc4106(gcm(aes))",
4074				.cra_driver_name = "rfc4106-gcm-aes-chcr",
4075				.cra_blocksize	 = 1,
4076				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4077				.cra_ctxsize =	sizeof(struct chcr_context) +
4078						sizeof(struct chcr_aead_ctx) +
4079						sizeof(struct chcr_gcm_ctx),
4080
4081			},
4082			.ivsize = GCM_RFC4106_IV_SIZE,
4083			.maxauthsize	= GHASH_DIGEST_SIZE,
4084			.setkey = chcr_gcm_setkey,
4085			.setauthsize	= chcr_4106_4309_setauthsize,
4086		}
4087	},
4088	{
4089		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4090		.is_registered = 0,
4091		.alg.aead = {
4092			.base = {
4093				.cra_name = "ccm(aes)",
4094				.cra_driver_name = "ccm-aes-chcr",
4095				.cra_blocksize	 = 1,
4096				.cra_priority = CHCR_AEAD_PRIORITY,
4097				.cra_ctxsize =	sizeof(struct chcr_context) +
4098						sizeof(struct chcr_aead_ctx),
4099
4100			},
4101			.ivsize = AES_BLOCK_SIZE,
4102			.maxauthsize	= GHASH_DIGEST_SIZE,
4103			.setkey = chcr_aead_ccm_setkey,
4104			.setauthsize	= chcr_ccm_setauthsize,
4105		}
4106	},
4107	{
4108		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4109		.is_registered = 0,
4110		.alg.aead = {
4111			.base = {
4112				.cra_name = "rfc4309(ccm(aes))",
4113				.cra_driver_name = "rfc4309-ccm-aes-chcr",
4114				.cra_blocksize	 = 1,
4115				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4116				.cra_ctxsize =	sizeof(struct chcr_context) +
4117						sizeof(struct chcr_aead_ctx),
4118
4119			},
4120			.ivsize = 8,
4121			.maxauthsize	= GHASH_DIGEST_SIZE,
4122			.setkey = chcr_aead_rfc4309_setkey,
4123			.setauthsize = chcr_4106_4309_setauthsize,
4124		}
4125	},
4126	{
4127		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4128		.is_registered = 0,
4129		.alg.aead = {
4130			.base = {
4131				.cra_name = "authenc(hmac(sha1),cbc(aes))",
4132				.cra_driver_name =
4133					"authenc-hmac-sha1-cbc-aes-chcr",
4134				.cra_blocksize	 = AES_BLOCK_SIZE,
4135				.cra_priority = CHCR_AEAD_PRIORITY,
4136				.cra_ctxsize =	sizeof(struct chcr_context) +
4137						sizeof(struct chcr_aead_ctx) +
4138						sizeof(struct chcr_authenc_ctx),
4139
4140			},
4141			.ivsize = AES_BLOCK_SIZE,
4142			.maxauthsize = SHA1_DIGEST_SIZE,
4143			.setkey = chcr_authenc_setkey,
4144			.setauthsize = chcr_authenc_setauthsize,
4145		}
4146	},
4147	{
4148		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4149		.is_registered = 0,
4150		.alg.aead = {
4151			.base = {
4152
4153				.cra_name = "authenc(hmac(sha256),cbc(aes))",
4154				.cra_driver_name =
4155					"authenc-hmac-sha256-cbc-aes-chcr",
4156				.cra_blocksize	 = AES_BLOCK_SIZE,
4157				.cra_priority = CHCR_AEAD_PRIORITY,
4158				.cra_ctxsize =	sizeof(struct chcr_context) +
4159						sizeof(struct chcr_aead_ctx) +
4160						sizeof(struct chcr_authenc_ctx),
4161
4162			},
4163			.ivsize = AES_BLOCK_SIZE,
4164			.maxauthsize	= SHA256_DIGEST_SIZE,
4165			.setkey = chcr_authenc_setkey,
4166			.setauthsize = chcr_authenc_setauthsize,
4167		}
4168	},
4169	{
4170		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4171		.is_registered = 0,
4172		.alg.aead = {
4173			.base = {
4174				.cra_name = "authenc(hmac(sha224),cbc(aes))",
4175				.cra_driver_name =
4176					"authenc-hmac-sha224-cbc-aes-chcr",
4177				.cra_blocksize	 = AES_BLOCK_SIZE,
4178				.cra_priority = CHCR_AEAD_PRIORITY,
4179				.cra_ctxsize =	sizeof(struct chcr_context) +
4180						sizeof(struct chcr_aead_ctx) +
4181						sizeof(struct chcr_authenc_ctx),
4182			},
4183			.ivsize = AES_BLOCK_SIZE,
4184			.maxauthsize = SHA224_DIGEST_SIZE,
4185			.setkey = chcr_authenc_setkey,
4186			.setauthsize = chcr_authenc_setauthsize,
4187		}
4188	},
4189	{
4190		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4191		.is_registered = 0,
4192		.alg.aead = {
4193			.base = {
4194				.cra_name = "authenc(hmac(sha384),cbc(aes))",
4195				.cra_driver_name =
4196					"authenc-hmac-sha384-cbc-aes-chcr",
4197				.cra_blocksize	 = AES_BLOCK_SIZE,
4198				.cra_priority = CHCR_AEAD_PRIORITY,
4199				.cra_ctxsize =	sizeof(struct chcr_context) +
4200						sizeof(struct chcr_aead_ctx) +
4201						sizeof(struct chcr_authenc_ctx),
4202
4203			},
4204			.ivsize = AES_BLOCK_SIZE,
4205			.maxauthsize = SHA384_DIGEST_SIZE,
4206			.setkey = chcr_authenc_setkey,
4207			.setauthsize = chcr_authenc_setauthsize,
4208		}
4209	},
4210	{
4211		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4212		.is_registered = 0,
4213		.alg.aead = {
4214			.base = {
4215				.cra_name = "authenc(hmac(sha512),cbc(aes))",
4216				.cra_driver_name =
4217					"authenc-hmac-sha512-cbc-aes-chcr",
4218				.cra_blocksize	 = AES_BLOCK_SIZE,
4219				.cra_priority = CHCR_AEAD_PRIORITY,
4220				.cra_ctxsize =	sizeof(struct chcr_context) +
4221						sizeof(struct chcr_aead_ctx) +
4222						sizeof(struct chcr_authenc_ctx),
4223
4224			},
4225			.ivsize = AES_BLOCK_SIZE,
4226			.maxauthsize = SHA512_DIGEST_SIZE,
4227			.setkey = chcr_authenc_setkey,
4228			.setauthsize = chcr_authenc_setauthsize,
4229		}
4230	},
4231	{
4232		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4233		.is_registered = 0,
4234		.alg.aead = {
4235			.base = {
4236				.cra_name = "authenc(digest_null,cbc(aes))",
4237				.cra_driver_name =
4238					"authenc-digest_null-cbc-aes-chcr",
4239				.cra_blocksize	 = AES_BLOCK_SIZE,
4240				.cra_priority = CHCR_AEAD_PRIORITY,
4241				.cra_ctxsize =	sizeof(struct chcr_context) +
4242						sizeof(struct chcr_aead_ctx) +
4243						sizeof(struct chcr_authenc_ctx),
4244
4245			},
4246			.ivsize  = AES_BLOCK_SIZE,
4247			.maxauthsize = 0,
4248			.setkey  = chcr_aead_digest_null_setkey,
4249			.setauthsize = chcr_authenc_null_setauthsize,
4250		}
4251	},
4252	{
4253		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4254		.is_registered = 0,
4255		.alg.aead = {
4256			.base = {
4257				.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4258				.cra_driver_name =
4259				"authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4260				.cra_blocksize	 = 1,
4261				.cra_priority = CHCR_AEAD_PRIORITY,
4262				.cra_ctxsize =	sizeof(struct chcr_context) +
4263						sizeof(struct chcr_aead_ctx) +
4264						sizeof(struct chcr_authenc_ctx),
4265
4266			},
4267			.ivsize = CTR_RFC3686_IV_SIZE,
4268			.maxauthsize = SHA1_DIGEST_SIZE,
4269			.setkey = chcr_authenc_setkey,
4270			.setauthsize = chcr_authenc_setauthsize,
4271		}
4272	},
4273	{
4274		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4275		.is_registered = 0,
4276		.alg.aead = {
4277			.base = {
4278
4279				.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4280				.cra_driver_name =
4281				"authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4282				.cra_blocksize	 = 1,
4283				.cra_priority = CHCR_AEAD_PRIORITY,
4284				.cra_ctxsize =	sizeof(struct chcr_context) +
4285						sizeof(struct chcr_aead_ctx) +
4286						sizeof(struct chcr_authenc_ctx),
4287
4288			},
4289			.ivsize = CTR_RFC3686_IV_SIZE,
4290			.maxauthsize	= SHA256_DIGEST_SIZE,
4291			.setkey = chcr_authenc_setkey,
4292			.setauthsize = chcr_authenc_setauthsize,
4293		}
4294	},
4295	{
4296		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4297		.is_registered = 0,
4298		.alg.aead = {
4299			.base = {
4300				.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4301				.cra_driver_name =
4302				"authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4303				.cra_blocksize	 = 1,
4304				.cra_priority = CHCR_AEAD_PRIORITY,
4305				.cra_ctxsize =	sizeof(struct chcr_context) +
4306						sizeof(struct chcr_aead_ctx) +
4307						sizeof(struct chcr_authenc_ctx),
4308			},
4309			.ivsize = CTR_RFC3686_IV_SIZE,
4310			.maxauthsize = SHA224_DIGEST_SIZE,
4311			.setkey = chcr_authenc_setkey,
4312			.setauthsize = chcr_authenc_setauthsize,
4313		}
4314	},
4315	{
4316		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4317		.is_registered = 0,
4318		.alg.aead = {
4319			.base = {
4320				.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4321				.cra_driver_name =
4322				"authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4323				.cra_blocksize	 = 1,
4324				.cra_priority = CHCR_AEAD_PRIORITY,
4325				.cra_ctxsize =	sizeof(struct chcr_context) +
4326						sizeof(struct chcr_aead_ctx) +
4327						sizeof(struct chcr_authenc_ctx),
4328
4329			},
4330			.ivsize = CTR_RFC3686_IV_SIZE,
4331			.maxauthsize = SHA384_DIGEST_SIZE,
4332			.setkey = chcr_authenc_setkey,
4333			.setauthsize = chcr_authenc_setauthsize,
4334		}
4335	},
4336	{
4337		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4338		.is_registered = 0,
4339		.alg.aead = {
4340			.base = {
4341				.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4342				.cra_driver_name =
4343				"authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4344				.cra_blocksize	 = 1,
4345				.cra_priority = CHCR_AEAD_PRIORITY,
4346				.cra_ctxsize =	sizeof(struct chcr_context) +
4347						sizeof(struct chcr_aead_ctx) +
4348						sizeof(struct chcr_authenc_ctx),
4349
4350			},
4351			.ivsize = CTR_RFC3686_IV_SIZE,
4352			.maxauthsize = SHA512_DIGEST_SIZE,
4353			.setkey = chcr_authenc_setkey,
4354			.setauthsize = chcr_authenc_setauthsize,
4355		}
4356	},
4357	{
4358		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4359		.is_registered = 0,
4360		.alg.aead = {
4361			.base = {
4362				.cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4363				.cra_driver_name =
4364				"authenc-digest_null-rfc3686-ctr-aes-chcr",
4365				.cra_blocksize	 = 1,
4366				.cra_priority = CHCR_AEAD_PRIORITY,
4367				.cra_ctxsize =	sizeof(struct chcr_context) +
4368						sizeof(struct chcr_aead_ctx) +
4369						sizeof(struct chcr_authenc_ctx),
4370
4371			},
4372			.ivsize  = CTR_RFC3686_IV_SIZE,
4373			.maxauthsize = 0,
4374			.setkey  = chcr_aead_digest_null_setkey,
4375			.setauthsize = chcr_authenc_null_setauthsize,
4376		}
4377	},
4378};
4379
4380/*
4381 *	chcr_unregister_alg - Deregister crypto algorithms with
4382 *	kernel framework.
4383 */
4384static int chcr_unregister_alg(void)
4385{
4386	int i;
4387
4388	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4389		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4390		case CRYPTO_ALG_TYPE_SKCIPHER:
4391			if (driver_algs[i].is_registered && refcount_read(
4392			    &driver_algs[i].alg.skcipher.base.cra_refcnt)
4393			    == 1) {
4394				crypto_unregister_skcipher(
4395						&driver_algs[i].alg.skcipher);
4396				driver_algs[i].is_registered = 0;
4397			}
4398			break;
4399		case CRYPTO_ALG_TYPE_AEAD:
4400			if (driver_algs[i].is_registered && refcount_read(
4401			    &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
4402				crypto_unregister_aead(
4403						&driver_algs[i].alg.aead);
4404				driver_algs[i].is_registered = 0;
4405			}
4406			break;
4407		case CRYPTO_ALG_TYPE_AHASH:
4408			if (driver_algs[i].is_registered && refcount_read(
4409			    &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4410			    == 1) {
4411				crypto_unregister_ahash(
4412						&driver_algs[i].alg.hash);
4413				driver_algs[i].is_registered = 0;
4414			}
4415			break;
4416		}
4417	}
4418	return 0;
4419}
4420
4421#define SZ_AHASH_CTX sizeof(struct chcr_context)
4422#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4423#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4424
4425/*
4426 *	chcr_register_alg - Register crypto algorithms with kernel framework.
4427 */
4428static int chcr_register_alg(void)
4429{
4430	struct crypto_alg ai;
4431	struct ahash_alg *a_hash;
4432	int err = 0, i;
4433	char *name = NULL;
4434
4435	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4436		if (driver_algs[i].is_registered)
4437			continue;
4438		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4439		case CRYPTO_ALG_TYPE_SKCIPHER:
4440			driver_algs[i].alg.skcipher.base.cra_priority =
4441				CHCR_CRA_PRIORITY;
4442			driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4443			driver_algs[i].alg.skcipher.base.cra_flags =
4444				CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4445				CRYPTO_ALG_ALLOCATES_MEMORY |
4446				CRYPTO_ALG_NEED_FALLBACK;
4447			driver_algs[i].alg.skcipher.base.cra_ctxsize =
4448				sizeof(struct chcr_context) +
4449				sizeof(struct ablk_ctx);
4450			driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4451
4452			err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4453			name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4454			break;
4455		case CRYPTO_ALG_TYPE_AEAD:
4456			driver_algs[i].alg.aead.base.cra_flags =
4457				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4458				CRYPTO_ALG_ALLOCATES_MEMORY;
4459			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4460			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4461			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4462			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4463			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4464			err = crypto_register_aead(&driver_algs[i].alg.aead);
4465			name = driver_algs[i].alg.aead.base.cra_driver_name;
4466			break;
4467		case CRYPTO_ALG_TYPE_AHASH:
4468			a_hash = &driver_algs[i].alg.hash;
4469			a_hash->update = chcr_ahash_update;
4470			a_hash->final = chcr_ahash_final;
4471			a_hash->finup = chcr_ahash_finup;
4472			a_hash->digest = chcr_ahash_digest;
4473			a_hash->export = chcr_ahash_export;
4474			a_hash->import = chcr_ahash_import;
4475			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4476			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4477			a_hash->halg.base.cra_module = THIS_MODULE;
4478			a_hash->halg.base.cra_flags =
4479				CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4480			a_hash->halg.base.cra_alignmask = 0;
4481			a_hash->halg.base.cra_exit = NULL;
4482
4483			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4484				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4485				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4486				a_hash->init = chcr_hmac_init;
4487				a_hash->setkey = chcr_ahash_setkey;
4488				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4489			} else {
4490				a_hash->init = chcr_sha_init;
4491				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4492				a_hash->halg.base.cra_init = chcr_sha_cra_init;
4493			}
4494			err = crypto_register_ahash(&driver_algs[i].alg.hash);
4495			ai = driver_algs[i].alg.hash.halg.base;
4496			name = ai.cra_driver_name;
4497			break;
4498		}
4499		if (err) {
4500			pr_err("%s : Algorithm registration failed\n", name);
4501			goto register_err;
4502		} else {
4503			driver_algs[i].is_registered = 1;
4504		}
4505	}
4506	return 0;
4507
4508register_err:
4509	chcr_unregister_alg();
4510	return err;
4511}
4512
4513/*
4514 *	start_crypto - Register the crypto algorithms.
4515 *	This should called once when the first device comesup. After this
4516 *	kernel will start calling driver APIs for crypto operations.
4517 */
4518int start_crypto(void)
4519{
4520	return chcr_register_alg();
4521}
4522
4523/*
4524 *	stop_crypto - Deregister all the crypto algorithms with kernel.
4525 *	This should be called once when the last device goes down. After this
4526 *	kernel will not call the driver API for crypto operations.
4527 */
4528int stop_crypto(void)
4529{
4530	chcr_unregister_alg();
4531	return 0;
4532}