Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2 * This file is part of the Chelsio T6 Crypto driver for Linux.
   3 *
   4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 *
  34 * Written and Maintained by:
  35 *	Manoj Malviya (manojmalviya@chelsio.com)
  36 *	Atul Gupta (atul.gupta@chelsio.com)
  37 *	Jitendra Lulla (jlulla@chelsio.com)
  38 *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  39 *	Harsh Jain (harsh@chelsio.com)
  40 */
  41
  42#define pr_fmt(fmt) "chcr:" fmt
  43
  44#include <linux/kernel.h>
  45#include <linux/module.h>
  46#include <linux/crypto.h>
 
  47#include <linux/skbuff.h>
  48#include <linux/rtnetlink.h>
  49#include <linux/highmem.h>
  50#include <linux/scatterlist.h>
  51
  52#include <crypto/aes.h>
  53#include <crypto/algapi.h>
  54#include <crypto/hash.h>
  55#include <crypto/gcm.h>
  56#include <crypto/sha1.h>
  57#include <crypto/sha2.h>
  58#include <crypto/authenc.h>
  59#include <crypto/ctr.h>
  60#include <crypto/gf128mul.h>
  61#include <crypto/internal/aead.h>
  62#include <crypto/null.h>
  63#include <crypto/internal/skcipher.h>
  64#include <crypto/aead.h>
  65#include <crypto/scatterwalk.h>
  66#include <crypto/internal/hash.h>
  67
  68#include "t4fw_api.h"
  69#include "t4_msg.h"
  70#include "chcr_core.h"
  71#include "chcr_algo.h"
  72#include "chcr_crypto.h"
  73
  74#define IV AES_BLOCK_SIZE
  75
  76static unsigned int sgl_ent_len[] = {
  77	0, 0, 16, 24, 40, 48, 64, 72, 88,
  78	96, 112, 120, 136, 144, 160, 168, 184,
  79	192, 208, 216, 232, 240, 256, 264, 280,
  80	288, 304, 312, 328, 336, 352, 360, 376
  81};
  82
  83static unsigned int dsgl_ent_len[] = {
  84	0, 32, 32, 48, 48, 64, 64, 80, 80,
  85	112, 112, 128, 128, 144, 144, 160, 160,
  86	192, 192, 208, 208, 224, 224, 240, 240,
  87	272, 272, 288, 288, 304, 304, 320, 320
  88};
  89
  90static u32 round_constant[11] = {
  91	0x01000000, 0x02000000, 0x04000000, 0x08000000,
  92	0x10000000, 0x20000000, 0x40000000, 0x80000000,
  93	0x1B000000, 0x36000000, 0x6C000000
  94};
  95
  96static int chcr_handle_cipher_resp(struct skcipher_request *req,
  97				   unsigned char *input, int err);
  98
  99static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
 100{
 101	return &ctx->crypto_ctx->aeadctx;
 102}
 103
 104static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
 105{
 106	return &ctx->crypto_ctx->ablkctx;
 107}
 108
 109static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
 110{
 111	return &ctx->crypto_ctx->hmacctx;
 112}
 113
 114static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
 115{
 116	return gctx->ctx->gcm;
 117}
 118
 119static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
 120{
 121	return gctx->ctx->authenc;
 122}
 123
 124static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
 125{
 126	return container_of(ctx->dev, struct uld_ctx, dev);
 127}
 128
 
 
 
 
 
 129static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
 130{
 131	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
 132}
 133
 134static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
 135			 unsigned int entlen,
 136			 unsigned int skip)
 137{
 138	int nents = 0;
 139	unsigned int less;
 140	unsigned int skip_len = 0;
 141
 142	while (sg && skip) {
 143		if (sg_dma_len(sg) <= skip) {
 144			skip -= sg_dma_len(sg);
 145			skip_len = 0;
 146			sg = sg_next(sg);
 147		} else {
 148			skip_len = skip;
 149			skip = 0;
 150		}
 151	}
 152
 153	while (sg && reqlen) {
 154		less = min(reqlen, sg_dma_len(sg) - skip_len);
 155		nents += DIV_ROUND_UP(less, entlen);
 156		reqlen -= less;
 157		skip_len = 0;
 158		sg = sg_next(sg);
 159	}
 160	return nents;
 161}
 162
 163static inline int get_aead_subtype(struct crypto_aead *aead)
 164{
 165	struct aead_alg *alg = crypto_aead_alg(aead);
 166	struct chcr_alg_template *chcr_crypto_alg =
 167		container_of(alg, struct chcr_alg_template, alg.aead);
 168	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 169}
 170
 171void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
 172{
 173	u8 temp[SHA512_DIGEST_SIZE];
 174	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 175	int authsize = crypto_aead_authsize(tfm);
 176	struct cpl_fw6_pld *fw6_pld;
 177	int cmp = 0;
 178
 179	fw6_pld = (struct cpl_fw6_pld *)input;
 180	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
 181	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
 182		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
 183	} else {
 184
 185		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
 186				authsize, req->assoclen +
 187				req->cryptlen - authsize);
 188		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
 189	}
 190	if (cmp)
 191		*err = -EBADMSG;
 192	else
 193		*err = 0;
 194}
 195
 196static int chcr_inc_wrcount(struct chcr_dev *dev)
 197{
 198	if (dev->state == CHCR_DETACH)
 199		return 1;
 200	atomic_inc(&dev->inflight);
 201	return 0;
 202}
 203
 204static inline void chcr_dec_wrcount(struct chcr_dev *dev)
 205{
 206	atomic_dec(&dev->inflight);
 207}
 208
 209static inline int chcr_handle_aead_resp(struct aead_request *req,
 210					 unsigned char *input,
 211					 int err)
 212{
 213	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
 214	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 215	struct chcr_dev *dev = a_ctx(tfm)->dev;
 216
 217	chcr_aead_common_exit(req);
 218	if (reqctx->verify == VERIFY_SW) {
 219		chcr_verify_tag(req, input, &err);
 220		reqctx->verify = VERIFY_HW;
 221	}
 222	chcr_dec_wrcount(dev);
 223	aead_request_complete(req, err);
 224
 225	return err;
 226}
 227
 228static void get_aes_decrypt_key(unsigned char *dec_key,
 229				       const unsigned char *key,
 230				       unsigned int keylength)
 231{
 232	u32 temp;
 233	u32 w_ring[MAX_NK];
 234	int i, j, k;
 235	u8  nr, nk;
 236
 237	switch (keylength) {
 238	case AES_KEYLENGTH_128BIT:
 239		nk = KEYLENGTH_4BYTES;
 240		nr = NUMBER_OF_ROUNDS_10;
 241		break;
 242	case AES_KEYLENGTH_192BIT:
 243		nk = KEYLENGTH_6BYTES;
 244		nr = NUMBER_OF_ROUNDS_12;
 245		break;
 246	case AES_KEYLENGTH_256BIT:
 247		nk = KEYLENGTH_8BYTES;
 248		nr = NUMBER_OF_ROUNDS_14;
 249		break;
 250	default:
 251		return;
 252	}
 253	for (i = 0; i < nk; i++)
 254		w_ring[i] = get_unaligned_be32(&key[i * 4]);
 255
 256	i = 0;
 257	temp = w_ring[nk - 1];
 258	while (i + nk < (nr + 1) * 4) {
 259		if (!(i % nk)) {
 260			/* RotWord(temp) */
 261			temp = (temp << 8) | (temp >> 24);
 262			temp = aes_ks_subword(temp);
 263			temp ^= round_constant[i / nk];
 264		} else if (nk == 8 && (i % 4 == 0)) {
 265			temp = aes_ks_subword(temp);
 266		}
 267		w_ring[i % nk] ^= temp;
 268		temp = w_ring[i % nk];
 269		i++;
 270	}
 271	i--;
 272	for (k = 0, j = i % nk; k < nk; k++) {
 273		put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
 274		j--;
 275		if (j < 0)
 276			j += nk;
 277	}
 278}
 279
 280static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
 281{
 282	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
 283
 284	switch (ds) {
 285	case SHA1_DIGEST_SIZE:
 286		base_hash = crypto_alloc_shash("sha1", 0, 0);
 287		break;
 288	case SHA224_DIGEST_SIZE:
 289		base_hash = crypto_alloc_shash("sha224", 0, 0);
 290		break;
 291	case SHA256_DIGEST_SIZE:
 292		base_hash = crypto_alloc_shash("sha256", 0, 0);
 293		break;
 294	case SHA384_DIGEST_SIZE:
 295		base_hash = crypto_alloc_shash("sha384", 0, 0);
 296		break;
 297	case SHA512_DIGEST_SIZE:
 298		base_hash = crypto_alloc_shash("sha512", 0, 0);
 299		break;
 300	}
 301
 302	return base_hash;
 303}
 304
 305static int chcr_compute_partial_hash(struct shash_desc *desc,
 306				     char *iopad, char *result_hash,
 307				     int digest_size)
 308{
 309	struct sha1_state sha1_st;
 310	struct sha256_state sha256_st;
 311	struct sha512_state sha512_st;
 312	int error;
 313
 314	if (digest_size == SHA1_DIGEST_SIZE) {
 315		error = crypto_shash_init(desc) ?:
 316			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
 317			crypto_shash_export(desc, (void *)&sha1_st);
 318		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
 319	} else if (digest_size == SHA224_DIGEST_SIZE) {
 320		error = crypto_shash_init(desc) ?:
 321			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 322			crypto_shash_export(desc, (void *)&sha256_st);
 323		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 324
 325	} else if (digest_size == SHA256_DIGEST_SIZE) {
 326		error = crypto_shash_init(desc) ?:
 327			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 328			crypto_shash_export(desc, (void *)&sha256_st);
 329		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 330
 331	} else if (digest_size == SHA384_DIGEST_SIZE) {
 332		error = crypto_shash_init(desc) ?:
 333			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 334			crypto_shash_export(desc, (void *)&sha512_st);
 335		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 336
 337	} else if (digest_size == SHA512_DIGEST_SIZE) {
 338		error = crypto_shash_init(desc) ?:
 339			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 340			crypto_shash_export(desc, (void *)&sha512_st);
 341		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 342	} else {
 343		error = -EINVAL;
 344		pr_err("Unknown digest size %d\n", digest_size);
 345	}
 346	return error;
 347}
 348
 349static void chcr_change_order(char *buf, int ds)
 350{
 351	int i;
 352
 353	if (ds == SHA512_DIGEST_SIZE) {
 354		for (i = 0; i < (ds / sizeof(u64)); i++)
 355			*((__be64 *)buf + i) =
 356				cpu_to_be64(*((u64 *)buf + i));
 357	} else {
 358		for (i = 0; i < (ds / sizeof(u32)); i++)
 359			*((__be32 *)buf + i) =
 360				cpu_to_be32(*((u32 *)buf + i));
 361	}
 362}
 363
 364static inline int is_hmac(struct crypto_tfm *tfm)
 365{
 366	struct crypto_alg *alg = tfm->__crt_alg;
 367	struct chcr_alg_template *chcr_crypto_alg =
 368		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
 369			     alg.hash);
 370	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
 371		return 1;
 372	return 0;
 373}
 374
 375static inline void dsgl_walk_init(struct dsgl_walk *walk,
 376				   struct cpl_rx_phys_dsgl *dsgl)
 377{
 378	walk->dsgl = dsgl;
 379	walk->nents = 0;
 380	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
 381}
 382
 383static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
 384				 int pci_chan_id)
 385{
 386	struct cpl_rx_phys_dsgl *phys_cpl;
 387
 388	phys_cpl = walk->dsgl;
 389
 390	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
 391				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
 392	phys_cpl->pcirlxorder_to_noofsgentr =
 393		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
 394		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
 395		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
 396		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
 397		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
 398		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
 399	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
 400	phys_cpl->rss_hdr_int.qid = htons(qid);
 401	phys_cpl->rss_hdr_int.hash_val = 0;
 402	phys_cpl->rss_hdr_int.channel = pci_chan_id;
 403}
 404
 405static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
 406					size_t size,
 407					dma_addr_t addr)
 408{
 409	int j;
 410
 411	if (!size)
 412		return;
 413	j = walk->nents;
 414	walk->to->len[j % 8] = htons(size);
 415	walk->to->addr[j % 8] = cpu_to_be64(addr);
 416	j++;
 417	if ((j % 8) == 0)
 418		walk->to++;
 419	walk->nents = j;
 420}
 421
 422static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
 423			   struct scatterlist *sg,
 424			      unsigned int slen,
 425			      unsigned int skip)
 426{
 427	int skip_len = 0;
 428	unsigned int left_size = slen, len = 0;
 429	unsigned int j = walk->nents;
 430	int offset, ent_len;
 431
 432	if (!slen)
 433		return;
 434	while (sg && skip) {
 435		if (sg_dma_len(sg) <= skip) {
 436			skip -= sg_dma_len(sg);
 437			skip_len = 0;
 438			sg = sg_next(sg);
 439		} else {
 440			skip_len = skip;
 441			skip = 0;
 442		}
 443	}
 444
 445	while (left_size && sg) {
 446		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 447		offset = 0;
 448		while (len) {
 449			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
 450			walk->to->len[j % 8] = htons(ent_len);
 451			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
 452						      offset + skip_len);
 453			offset += ent_len;
 454			len -= ent_len;
 455			j++;
 456			if ((j % 8) == 0)
 457				walk->to++;
 458		}
 459		walk->last_sg = sg;
 460		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
 461					  skip_len) + skip_len;
 462		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 463		skip_len = 0;
 464		sg = sg_next(sg);
 465	}
 466	walk->nents = j;
 467}
 468
 469static inline void ulptx_walk_init(struct ulptx_walk *walk,
 470				   struct ulptx_sgl *ulp)
 471{
 472	walk->sgl = ulp;
 473	walk->nents = 0;
 474	walk->pair_idx = 0;
 475	walk->pair = ulp->sge;
 476	walk->last_sg = NULL;
 477	walk->last_sg_len = 0;
 478}
 479
 480static inline void ulptx_walk_end(struct ulptx_walk *walk)
 481{
 482	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
 483			      ULPTX_NSGE_V(walk->nents));
 484}
 485
 486
 487static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
 488					size_t size,
 489					dma_addr_t addr)
 490{
 491	if (!size)
 492		return;
 493
 494	if (walk->nents == 0) {
 495		walk->sgl->len0 = cpu_to_be32(size);
 496		walk->sgl->addr0 = cpu_to_be64(addr);
 497	} else {
 498		walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
 499		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
 500		walk->pair_idx = !walk->pair_idx;
 501		if (!walk->pair_idx)
 502			walk->pair++;
 503	}
 504	walk->nents++;
 505}
 506
 507static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
 508					struct scatterlist *sg,
 509			       unsigned int len,
 510			       unsigned int skip)
 511{
 512	int small;
 513	int skip_len = 0;
 514	unsigned int sgmin;
 515
 516	if (!len)
 517		return;
 518	while (sg && skip) {
 519		if (sg_dma_len(sg) <= skip) {
 520			skip -= sg_dma_len(sg);
 521			skip_len = 0;
 522			sg = sg_next(sg);
 523		} else {
 524			skip_len = skip;
 525			skip = 0;
 526		}
 527	}
 528	WARN(!sg, "SG should not be null here\n");
 529	if (sg && (walk->nents == 0)) {
 530		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
 531		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 532		walk->sgl->len0 = cpu_to_be32(sgmin);
 533		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
 534		walk->nents++;
 535		len -= sgmin;
 536		walk->last_sg = sg;
 537		walk->last_sg_len = sgmin + skip_len;
 538		skip_len += sgmin;
 539		if (sg_dma_len(sg) == skip_len) {
 540			sg = sg_next(sg);
 541			skip_len = 0;
 542		}
 543	}
 544
 545	while (sg && len) {
 546		small = min(sg_dma_len(sg) - skip_len, len);
 547		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 548		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
 549		walk->pair->addr[walk->pair_idx] =
 550			cpu_to_be64(sg_dma_address(sg) + skip_len);
 551		walk->pair_idx = !walk->pair_idx;
 552		walk->nents++;
 553		if (!walk->pair_idx)
 554			walk->pair++;
 555		len -= sgmin;
 556		skip_len += sgmin;
 557		walk->last_sg = sg;
 558		walk->last_sg_len = skip_len;
 559		if (sg_dma_len(sg) == skip_len) {
 560			sg = sg_next(sg);
 561			skip_len = 0;
 562		}
 563	}
 564}
 565
 566static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
 567{
 568	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 569	struct chcr_alg_template *chcr_crypto_alg =
 570		container_of(alg, struct chcr_alg_template, alg.skcipher);
 571
 572	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 573}
 574
 575static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
 576{
 577	struct adapter *adap = netdev2adap(dev);
 578	struct sge_uld_txq_info *txq_info =
 579		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
 580	struct sge_uld_txq *txq;
 581	int ret = 0;
 582
 583	local_bh_disable();
 584	txq = &txq_info->uldtxq[idx];
 585	spin_lock(&txq->sendq.lock);
 586	if (txq->full)
 587		ret = -1;
 588	spin_unlock(&txq->sendq.lock);
 589	local_bh_enable();
 590	return ret;
 591}
 592
 593static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
 594			       struct _key_ctx *key_ctx)
 595{
 596	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
 597		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
 598	} else {
 599		memcpy(key_ctx->key,
 600		       ablkctx->key + (ablkctx->enckey_len >> 1),
 601		       ablkctx->enckey_len >> 1);
 602		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
 603		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
 604	}
 605	return 0;
 606}
 607
 608static int chcr_hash_ent_in_wr(struct scatterlist *src,
 609			     unsigned int minsg,
 610			     unsigned int space,
 611			     unsigned int srcskip)
 612{
 613	int srclen = 0;
 614	int srcsg = minsg;
 615	int soffset = 0, sless;
 616
 617	if (sg_dma_len(src) == srcskip) {
 618		src = sg_next(src);
 619		srcskip = 0;
 620	}
 621	while (src && space > (sgl_ent_len[srcsg + 1])) {
 622		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
 623							CHCR_SRC_SG_SIZE);
 624		srclen += sless;
 625		soffset += sless;
 626		srcsg++;
 627		if (sg_dma_len(src) == (soffset + srcskip)) {
 628			src = sg_next(src);
 629			soffset = 0;
 630			srcskip = 0;
 631		}
 632	}
 633	return srclen;
 634}
 635
 636static int chcr_sg_ent_in_wr(struct scatterlist *src,
 637			     struct scatterlist *dst,
 638			     unsigned int minsg,
 639			     unsigned int space,
 640			     unsigned int srcskip,
 641			     unsigned int dstskip)
 642{
 643	int srclen = 0, dstlen = 0;
 644	int srcsg = minsg, dstsg = minsg;
 645	int offset = 0, soffset = 0, less, sless = 0;
 646
 647	if (sg_dma_len(src) == srcskip) {
 648		src = sg_next(src);
 649		srcskip = 0;
 650	}
 651	if (sg_dma_len(dst) == dstskip) {
 652		dst = sg_next(dst);
 653		dstskip = 0;
 654	}
 655
 656	while (src && dst &&
 657	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
 658		sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
 659				CHCR_SRC_SG_SIZE);
 660		srclen += sless;
 661		srcsg++;
 662		offset = 0;
 663		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
 664		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
 665			if (srclen <= dstlen)
 666				break;
 667			less = min_t(unsigned int, sg_dma_len(dst) - offset -
 668				     dstskip, CHCR_DST_SG_SIZE);
 669			dstlen += less;
 670			offset += less;
 671			if ((offset + dstskip) == sg_dma_len(dst)) {
 672				dst = sg_next(dst);
 673				offset = 0;
 674			}
 675			dstsg++;
 676			dstskip = 0;
 677		}
 678		soffset += sless;
 679		if ((soffset + srcskip) == sg_dma_len(src)) {
 680			src = sg_next(src);
 681			srcskip = 0;
 682			soffset = 0;
 683		}
 684
 685	}
 686	return min(srclen, dstlen);
 687}
 688
 689static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
 690				struct skcipher_request *req,
 
 
 
 691				u8 *iv,
 692				unsigned short op_type)
 693{
 694	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
 695	int err;
 696
 697	skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
 698	skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
 699				      req->base.complete, req->base.data);
 700	skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
 701				   req->cryptlen, iv);
 702
 703	err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
 704			crypto_skcipher_encrypt(&reqctx->fallback_req);
 
 
 
 
 
 
 705
 706	return err;
 707
 708}
 709
 710static inline int get_qidxs(struct crypto_async_request *req,
 711			    unsigned int *txqidx, unsigned int *rxqidx)
 712{
 713	struct crypto_tfm *tfm = req->tfm;
 714	int ret = 0;
 715
 716	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
 717	case CRYPTO_ALG_TYPE_AEAD:
 718	{
 719		struct aead_request *aead_req =
 720			container_of(req, struct aead_request, base);
 721		struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(aead_req);
 722		*txqidx = reqctx->txqidx;
 723		*rxqidx = reqctx->rxqidx;
 724		break;
 725	}
 726	case CRYPTO_ALG_TYPE_SKCIPHER:
 727	{
 728		struct skcipher_request *sk_req =
 729			container_of(req, struct skcipher_request, base);
 730		struct chcr_skcipher_req_ctx *reqctx =
 731			skcipher_request_ctx(sk_req);
 732		*txqidx = reqctx->txqidx;
 733		*rxqidx = reqctx->rxqidx;
 734		break;
 735	}
 736	case CRYPTO_ALG_TYPE_AHASH:
 737	{
 738		struct ahash_request *ahash_req =
 739			container_of(req, struct ahash_request, base);
 740		struct chcr_ahash_req_ctx *reqctx =
 741			ahash_request_ctx(ahash_req);
 742		*txqidx = reqctx->txqidx;
 743		*rxqidx = reqctx->rxqidx;
 744		break;
 745	}
 746	default:
 747		ret = -EINVAL;
 748		/* should never get here */
 749		BUG();
 750		break;
 751	}
 752	return ret;
 753}
 754
 755static inline void create_wreq(struct chcr_context *ctx,
 756			       struct chcr_wr *chcr_req,
 757			       struct crypto_async_request *req,
 758			       unsigned int imm,
 759			       int hash_sz,
 760			       unsigned int len16,
 761			       unsigned int sc_len,
 762			       unsigned int lcb)
 763{
 764	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 765	unsigned int tx_channel_id, rx_channel_id;
 766	unsigned int txqidx = 0, rxqidx = 0;
 767	unsigned int qid, fid, portno;
 768
 769	get_qidxs(req, &txqidx, &rxqidx);
 770	qid = u_ctx->lldi.rxq_ids[rxqidx];
 771	fid = u_ctx->lldi.rxq_ids[0];
 772	portno = rxqidx / ctx->rxq_perchan;
 773	tx_channel_id = txqidx / ctx->txq_perchan;
 774	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
 775
 776
 777	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
 778	chcr_req->wreq.pld_size_hash_size =
 779		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
 780	chcr_req->wreq.len16_pkd =
 781		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
 782	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
 783	chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
 784							    !!lcb, txqidx);
 
 785
 786	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
 
 787	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
 788				((sizeof(chcr_req->wreq)) >> 4)));
 
 789	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
 790	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
 791					   sizeof(chcr_req->key_ctx) + sc_len);
 792}
 793
 794/**
 795 *	create_cipher_wr - form the WR for cipher operations
 796 *	@wrparam: Container for create_cipher_wr()'s parameters
 
 
 
 797 */
 798static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
 799{
 800	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
 801	struct chcr_context *ctx = c_ctx(tfm);
 802	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 803	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
 804	struct sk_buff *skb = NULL;
 805	struct chcr_wr *chcr_req;
 806	struct cpl_rx_phys_dsgl *phys_cpl;
 807	struct ulptx_sgl *ulptx;
 808	struct chcr_skcipher_req_ctx *reqctx =
 809		skcipher_request_ctx(wrparam->req);
 810	unsigned int temp = 0, transhdr_len, dst_size;
 811	int error;
 812	int nents;
 813	unsigned int kctx_len;
 814	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 815			GFP_KERNEL : GFP_ATOMIC;
 816	struct adapter *adap = padap(ctx->dev);
 817	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
 818
 819	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
 820	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
 821			      reqctx->dst_ofst);
 822	dst_size = get_space_for_phys_dsgl(nents);
 823	kctx_len = roundup(ablkctx->enckey_len, 16);
 824	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
 825	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
 826				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
 827	temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
 828				     (sgl_len(nents) * 8);
 829	transhdr_len += temp;
 830	transhdr_len = roundup(transhdr_len, 16);
 831	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
 832	if (!skb) {
 833		error = -ENOMEM;
 834		goto err;
 835	}
 836	chcr_req = __skb_put_zero(skb, transhdr_len);
 837	chcr_req->sec_cpl.op_ivinsrtofst =
 838			FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
 839
 840	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
 841	chcr_req->sec_cpl.aadstart_cipherstop_hi =
 842			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
 843
 844	chcr_req->sec_cpl.cipherstop_lo_authinsert =
 845			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
 846	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
 847							 ablkctx->ciph_mode,
 848							 0, 0, IV >> 1);
 849	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
 850							  0, 1, dst_size);
 851
 852	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
 853	if ((reqctx->op == CHCR_DECRYPT_OP) &&
 854	    (!(get_cryptoalg_subtype(tfm) ==
 855	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
 856	    (!(get_cryptoalg_subtype(tfm) ==
 857	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
 858		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
 859	} else {
 860		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
 861		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
 862			memcpy(chcr_req->key_ctx.key, ablkctx->key,
 863			       ablkctx->enckey_len);
 864		} else {
 865			memcpy(chcr_req->key_ctx.key, ablkctx->key +
 866			       (ablkctx->enckey_len >> 1),
 867			       ablkctx->enckey_len >> 1);
 868			memcpy(chcr_req->key_ctx.key +
 869			       (ablkctx->enckey_len >> 1),
 870			       ablkctx->key,
 871			       ablkctx->enckey_len >> 1);
 872		}
 873	}
 874	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
 875	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
 876	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
 877	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
 878
 879	atomic_inc(&adap->chcr_stats.cipher_rqst);
 880	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
 881		+ (reqctx->imm ? (wrparam->bytes) : 0);
 882	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
 883		    transhdr_len, temp,
 884			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
 885	reqctx->skb = skb;
 886
 887	if (reqctx->op && (ablkctx->ciph_mode ==
 888			   CHCR_SCMD_CIPHER_MODE_AES_CBC))
 889		sg_pcopy_to_buffer(wrparam->req->src,
 890			sg_nents(wrparam->req->src), wrparam->req->iv, 16,
 891			reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
 892
 893	return skb;
 894err:
 895	return ERR_PTR(error);
 896}
 897
 898static inline int chcr_keyctx_ck_size(unsigned int keylen)
 899{
 900	int ck_size = 0;
 901
 902	if (keylen == AES_KEYSIZE_128)
 903		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
 904	else if (keylen == AES_KEYSIZE_192)
 905		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
 906	else if (keylen == AES_KEYSIZE_256)
 907		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
 908	else
 909		ck_size = 0;
 910
 911	return ck_size;
 912}
 913static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
 914				       const u8 *key,
 915				       unsigned int keylen)
 916{
 
 917	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 
 918
 919	crypto_skcipher_clear_flags(ablkctx->sw_cipher,
 920				CRYPTO_TFM_REQ_MASK);
 921	crypto_skcipher_set_flags(ablkctx->sw_cipher,
 922				cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 923	return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
 
 
 
 
 
 924}
 925
 926static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
 927			       const u8 *key,
 928			       unsigned int keylen)
 929{
 930	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 931	unsigned int ck_size, context_size;
 932	u16 alignment = 0;
 933	int err;
 934
 935	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 936	if (err)
 937		goto badkey_err;
 938
 939	ck_size = chcr_keyctx_ck_size(keylen);
 940	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
 941	memcpy(ablkctx->key, key, keylen);
 942	ablkctx->enckey_len = keylen;
 943	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
 944	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 945			keylen + alignment) >> 4;
 946
 947	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 948						0, 0, context_size);
 949	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
 950	return 0;
 951badkey_err:
 
 952	ablkctx->enckey_len = 0;
 953
 954	return err;
 955}
 956
 957static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
 958				   const u8 *key,
 959				   unsigned int keylen)
 960{
 961	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 962	unsigned int ck_size, context_size;
 963	u16 alignment = 0;
 964	int err;
 965
 966	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 967	if (err)
 968		goto badkey_err;
 969	ck_size = chcr_keyctx_ck_size(keylen);
 970	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
 971	memcpy(ablkctx->key, key, keylen);
 972	ablkctx->enckey_len = keylen;
 973	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 974			keylen + alignment) >> 4;
 975
 976	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 977						0, 0, context_size);
 978	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
 979
 980	return 0;
 981badkey_err:
 
 982	ablkctx->enckey_len = 0;
 983
 984	return err;
 985}
 986
 987static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
 988				   const u8 *key,
 989				   unsigned int keylen)
 990{
 991	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 992	unsigned int ck_size, context_size;
 993	u16 alignment = 0;
 994	int err;
 995
 996	if (keylen < CTR_RFC3686_NONCE_SIZE)
 997		return -EINVAL;
 998	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
 999	       CTR_RFC3686_NONCE_SIZE);
1000
1001	keylen -= CTR_RFC3686_NONCE_SIZE;
1002	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1003	if (err)
1004		goto badkey_err;
1005
1006	ck_size = chcr_keyctx_ck_size(keylen);
1007	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1008	memcpy(ablkctx->key, key, keylen);
1009	ablkctx->enckey_len = keylen;
1010	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1011			keylen + alignment) >> 4;
1012
1013	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1014						0, 0, context_size);
1015	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1016
1017	return 0;
1018badkey_err:
 
1019	ablkctx->enckey_len = 0;
1020
1021	return err;
1022}
1023static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1024{
1025	unsigned int size = AES_BLOCK_SIZE;
1026	__be32 *b = (__be32 *)(dstiv + size);
1027	u32 c, prev;
1028
1029	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1030	for (; size >= 4; size -= 4) {
1031		prev = be32_to_cpu(*--b);
1032		c = prev + add;
1033		*b = cpu_to_be32(c);
1034		if (prev < c)
1035			break;
1036		add = 1;
1037	}
1038
1039}
1040
1041static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1042{
1043	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1044	u64 c;
1045	u32 temp = be32_to_cpu(*--b);
1046
1047	temp = ~temp;
1048	c = (u64)temp +  1; // No of block can processed without overflow
1049	if ((bytes / AES_BLOCK_SIZE) >= c)
1050		bytes = c * AES_BLOCK_SIZE;
1051	return bytes;
1052}
1053
1054static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1055			     u32 isfinal)
1056{
1057	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1058	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1059	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1060	struct crypto_aes_ctx aes;
1061	int ret, i;
1062	u8 *key;
1063	unsigned int keylen;
1064	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1065	int round8 = round / 8;
1066
1067	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1068
1069	keylen = ablkctx->enckey_len / 2;
1070	key = ablkctx->key + keylen;
1071	/* For a 192 bit key remove the padded zeroes which was
1072	 * added in chcr_xts_setkey
1073	 */
1074	if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1075			== CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1076		ret = aes_expandkey(&aes, key, keylen - 8);
1077	else
1078		ret = aes_expandkey(&aes, key, keylen);
1079	if (ret)
1080		return ret;
1081	aes_encrypt(&aes, iv, iv);
1082	for (i = 0; i < round8; i++)
1083		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1084
1085	for (i = 0; i < (round % 8); i++)
1086		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1087
1088	if (!isfinal)
1089		aes_decrypt(&aes, iv, iv);
1090
1091	memzero_explicit(&aes, sizeof(aes));
1092	return 0;
1093}
1094
1095static int chcr_update_cipher_iv(struct skcipher_request *req,
1096				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1097{
1098	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1099	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1100	int subtype = get_cryptoalg_subtype(tfm);
1101	int ret = 0;
1102
1103	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1104		ctr_add_iv(iv, req->iv, (reqctx->processed /
1105			   AES_BLOCK_SIZE));
1106	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1107		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1108			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1109						AES_BLOCK_SIZE) + 1);
1110	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1111		ret = chcr_update_tweak(req, iv, 0);
1112	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1113		if (reqctx->op)
1114			/*Updated before sending last WR*/
1115			memcpy(iv, req->iv, AES_BLOCK_SIZE);
1116		else
1117			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1118	}
1119
1120	return ret;
1121
1122}
1123
1124/* We need separate function for final iv because in rfc3686  Initial counter
1125 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1126 * for subsequent update requests
1127 */
1128
1129static int chcr_final_cipher_iv(struct skcipher_request *req,
1130				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1131{
1132	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1133	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1134	int subtype = get_cryptoalg_subtype(tfm);
1135	int ret = 0;
1136
1137	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1138		ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1139						       AES_BLOCK_SIZE));
1140	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1141		if (!reqctx->partial_req)
1142			memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1143		else
1144			ret = chcr_update_tweak(req, iv, 1);
1145	}
1146	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1147		/*Already updated for Decrypt*/
1148		if (!reqctx->op)
1149			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1150
1151	}
1152	return ret;
1153
1154}
1155
1156static int chcr_handle_cipher_resp(struct skcipher_request *req,
1157				   unsigned char *input, int err)
1158{
1159	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1160	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1161	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1162	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1163	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1164	struct chcr_dev *dev = c_ctx(tfm)->dev;
1165	struct chcr_context *ctx = c_ctx(tfm);
1166	struct adapter *adap = padap(ctx->dev);
1167	struct cipher_wr_param wrparam;
1168	struct sk_buff *skb;
 
 
 
 
1169	int bytes;
1170
1171	if (err)
1172		goto unmap;
1173	if (req->cryptlen == reqctx->processed) {
1174		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1175				      req);
1176		err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1177		goto complete;
1178	}
1179
1180	if (!reqctx->imm) {
1181		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1182					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1183					  reqctx->src_ofst, reqctx->dst_ofst);
1184		if ((bytes + reqctx->processed) >= req->cryptlen)
1185			bytes  = req->cryptlen - reqctx->processed;
1186		else
1187			bytes = rounddown(bytes, 16);
1188	} else {
1189		/*CTR mode counter overflow*/
1190		bytes  = req->cryptlen - reqctx->processed;
1191	}
1192	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1193	if (err)
1194		goto unmap;
1195
1196	if (unlikely(bytes == 0)) {
1197		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1198				      req);
1199		memcpy(req->iv, reqctx->init_iv, IV);
1200		atomic_inc(&adap->chcr_stats.fallback);
1201		err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1202					   reqctx->op);
 
 
 
1203		goto complete;
1204	}
1205
1206	if (get_cryptoalg_subtype(tfm) ==
1207	    CRYPTO_ALG_SUB_TYPE_CTR)
1208		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1209	wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1210	wrparam.req = req;
1211	wrparam.bytes = bytes;
1212	skb = create_cipher_wr(&wrparam);
1213	if (IS_ERR(skb)) {
1214		pr_err("%s : Failed to form WR. No memory\n", __func__);
1215		err = PTR_ERR(skb);
1216		goto unmap;
1217	}
1218	skb->dev = u_ctx->lldi.ports[0];
1219	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1220	chcr_send_wr(skb);
1221	reqctx->last_req_len = bytes;
1222	reqctx->processed += bytes;
1223	if (get_cryptoalg_subtype(tfm) ==
1224		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1225			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1226		complete(&ctx->cbc_aes_aio_done);
1227	}
1228	return 0;
1229unmap:
1230	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1231complete:
1232	if (get_cryptoalg_subtype(tfm) ==
1233		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1234			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1235		complete(&ctx->cbc_aes_aio_done);
1236	}
1237	chcr_dec_wrcount(dev);
1238	skcipher_request_complete(req, err);
1239	return err;
1240}
1241
1242static int process_cipher(struct skcipher_request *req,
1243				  unsigned short qid,
1244				  struct sk_buff **skb,
1245				  unsigned short op_type)
1246{
1247	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1248	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1249	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1250	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1251	struct adapter *adap = padap(c_ctx(tfm)->dev);
1252	struct	cipher_wr_param wrparam;
1253	int bytes, err = -EINVAL;
1254	int subtype;
1255
1256	reqctx->processed = 0;
1257	reqctx->partial_req = 0;
1258	if (!req->iv)
1259		goto error;
1260	subtype = get_cryptoalg_subtype(tfm);
1261	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1262	    (req->cryptlen == 0) ||
1263	    (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1264		if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1265			goto fallback;
1266		else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1267			 subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1268			goto fallback;
1269		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1270		       ablkctx->enckey_len, req->cryptlen, ivsize);
1271		goto error;
1272	}
1273
1274	err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1275	if (err)
1276		goto error;
1277	if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1278					    AES_MIN_KEY_SIZE +
1279					    sizeof(struct cpl_rx_phys_dsgl) +
1280					/*Min dsgl size*/
1281					    32))) {
1282		/* Can be sent as Imm*/
1283		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1284
1285		dnents = sg_nents_xlen(req->dst, req->cryptlen,
1286				       CHCR_DST_SG_SIZE, 0);
1287		phys_dsgl = get_space_for_phys_dsgl(dnents);
1288		kctx_len = roundup(ablkctx->enckey_len, 16);
1289		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1290		reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1291			SGE_MAX_WR_LEN;
1292		bytes = IV + req->cryptlen;
1293
1294	} else {
1295		reqctx->imm = 0;
1296	}
1297
1298	if (!reqctx->imm) {
1299		bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1300					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1301					  0, 0);
1302		if ((bytes + reqctx->processed) >= req->cryptlen)
1303			bytes  = req->cryptlen - reqctx->processed;
1304		else
1305			bytes = rounddown(bytes, 16);
1306	} else {
1307		bytes = req->cryptlen;
1308	}
1309	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1310		bytes = adjust_ctr_overflow(req->iv, bytes);
 
1311	}
1312	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
 
1313		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1314		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1315				CTR_RFC3686_IV_SIZE);
1316
1317		/* initialize counter portion of counter block */
1318		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1319			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1320		memcpy(reqctx->init_iv, reqctx->iv, IV);
1321
1322	} else {
1323
1324		memcpy(reqctx->iv, req->iv, IV);
1325		memcpy(reqctx->init_iv, req->iv, IV);
1326	}
1327	if (unlikely(bytes == 0)) {
1328		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1329				      req);
1330fallback:       atomic_inc(&adap->chcr_stats.fallback);
1331		err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
1332					   subtype ==
1333					   CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1334					   reqctx->iv : req->iv,
 
1335					   op_type);
1336		goto error;
1337	}
1338	reqctx->op = op_type;
1339	reqctx->srcsg = req->src;
1340	reqctx->dstsg = req->dst;
1341	reqctx->src_ofst = 0;
1342	reqctx->dst_ofst = 0;
1343	wrparam.qid = qid;
1344	wrparam.req = req;
1345	wrparam.bytes = bytes;
1346	*skb = create_cipher_wr(&wrparam);
1347	if (IS_ERR(*skb)) {
1348		err = PTR_ERR(*skb);
1349		goto unmap;
1350	}
1351	reqctx->processed = bytes;
1352	reqctx->last_req_len = bytes;
1353	reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1354
1355	return 0;
1356unmap:
1357	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1358error:
1359	return err;
1360}
1361
1362static int chcr_aes_encrypt(struct skcipher_request *req)
1363{
1364	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1365	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1366	struct chcr_dev *dev = c_ctx(tfm)->dev;
1367	struct sk_buff *skb = NULL;
1368	int err;
1369	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1370	struct chcr_context *ctx = c_ctx(tfm);
1371	unsigned int cpu;
1372
1373	cpu = get_cpu();
1374	reqctx->txqidx = cpu % ctx->ntxq;
1375	reqctx->rxqidx = cpu % ctx->nrxq;
1376	put_cpu();
1377
1378	err = chcr_inc_wrcount(dev);
1379	if (err)
1380		return -ENXIO;
1381	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1382						reqctx->txqidx) &&
1383		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
 
1384			err = -ENOSPC;
1385			goto error;
 
1386	}
1387
1388	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1389			     &skb, CHCR_ENCRYPT_OP);
1390	if (err || !skb)
1391		return  err;
1392	skb->dev = u_ctx->lldi.ports[0];
1393	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1394	chcr_send_wr(skb);
1395	if (get_cryptoalg_subtype(tfm) ==
1396		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1397			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1398			reqctx->partial_req = 1;
1399			wait_for_completion(&ctx->cbc_aes_aio_done);
1400        }
1401	return -EINPROGRESS;
1402error:
1403	chcr_dec_wrcount(dev);
1404	return err;
1405}
1406
1407static int chcr_aes_decrypt(struct skcipher_request *req)
1408{
1409	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1410	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1411	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1412	struct chcr_dev *dev = c_ctx(tfm)->dev;
1413	struct sk_buff *skb = NULL;
1414	int err;
1415	struct chcr_context *ctx = c_ctx(tfm);
1416	unsigned int cpu;
1417
1418	cpu = get_cpu();
1419	reqctx->txqidx = cpu % ctx->ntxq;
1420	reqctx->rxqidx = cpu % ctx->nrxq;
1421	put_cpu();
1422
1423	err = chcr_inc_wrcount(dev);
1424	if (err)
1425		return -ENXIO;
1426
1427	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1428						reqctx->txqidx) &&
1429		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
 
1430			return -ENOSPC;
1431	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
 
 
1432			     &skb, CHCR_DECRYPT_OP);
1433	if (err || !skb)
1434		return err;
1435	skb->dev = u_ctx->lldi.ports[0];
1436	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1437	chcr_send_wr(skb);
1438	return -EINPROGRESS;
1439}
 
1440static int chcr_device_init(struct chcr_context *ctx)
1441{
1442	struct uld_ctx *u_ctx = NULL;
1443	int txq_perchan, ntxq;
1444	int err = 0, rxq_perchan;
 
1445
 
1446	if (!ctx->dev) {
1447		u_ctx = assign_chcr_device();
1448		if (!u_ctx) {
1449			err = -ENXIO;
1450			pr_err("chcr device assignment fails\n");
1451			goto out;
1452		}
1453		ctx->dev = &u_ctx->dev;
1454		ntxq = u_ctx->lldi.ntxq;
1455		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1456		txq_perchan = ntxq / u_ctx->lldi.nchan;
1457		ctx->ntxq = ntxq;
1458		ctx->nrxq = u_ctx->lldi.nrxq;
1459		ctx->rxq_perchan = rxq_perchan;
1460		ctx->txq_perchan = txq_perchan;
 
 
 
 
 
 
 
 
 
 
 
 
1461	}
1462out:
1463	return err;
1464}
1465
1466static int chcr_init_tfm(struct crypto_skcipher *tfm)
1467{
1468	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1469	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1470	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1471
1472	ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1473				CRYPTO_ALG_NEED_FALLBACK);
1474	if (IS_ERR(ablkctx->sw_cipher)) {
1475		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1476		return PTR_ERR(ablkctx->sw_cipher);
1477	}
1478	init_completion(&ctx->cbc_aes_aio_done);
1479	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1480					 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1481
1482	return chcr_device_init(ctx);
 
1483}
1484
1485static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1486{
1487	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1488	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1489	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1490
1491	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1492	 * cannot be used as fallback in chcr_handle_cipher_response
1493	 */
1494	ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1495				CRYPTO_ALG_NEED_FALLBACK);
1496	if (IS_ERR(ablkctx->sw_cipher)) {
1497		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1498		return PTR_ERR(ablkctx->sw_cipher);
1499	}
1500	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1501				    crypto_skcipher_reqsize(ablkctx->sw_cipher));
1502	return chcr_device_init(ctx);
1503}
1504
1505
1506static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1507{
1508	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1509	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1510
1511	crypto_free_skcipher(ablkctx->sw_cipher);
1512}
1513
1514static int get_alg_config(struct algo_param *params,
1515			  unsigned int auth_size)
1516{
1517	switch (auth_size) {
1518	case SHA1_DIGEST_SIZE:
1519		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1520		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1521		params->result_size = SHA1_DIGEST_SIZE;
1522		break;
1523	case SHA224_DIGEST_SIZE:
1524		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1525		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1526		params->result_size = SHA256_DIGEST_SIZE;
1527		break;
1528	case SHA256_DIGEST_SIZE:
1529		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1530		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1531		params->result_size = SHA256_DIGEST_SIZE;
1532		break;
1533	case SHA384_DIGEST_SIZE:
1534		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1535		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1536		params->result_size = SHA512_DIGEST_SIZE;
1537		break;
1538	case SHA512_DIGEST_SIZE:
1539		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1540		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1541		params->result_size = SHA512_DIGEST_SIZE;
1542		break;
1543	default:
1544		pr_err("ERROR, unsupported digest size\n");
1545		return -EINVAL;
1546	}
1547	return 0;
1548}
1549
1550static inline void chcr_free_shash(struct crypto_shash *base_hash)
1551{
1552		crypto_free_shash(base_hash);
1553}
1554
1555/**
1556 *	create_hash_wr - Create hash work request
1557 *	@req: Cipher req base
1558 *	@param: Container for create_hash_wr()'s parameters
1559 */
1560static struct sk_buff *create_hash_wr(struct ahash_request *req,
1561				      struct hash_wr_param *param)
1562{
1563	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1564	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1565	struct chcr_context *ctx = h_ctx(tfm);
1566	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1567	struct sk_buff *skb = NULL;
1568	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1569	struct chcr_wr *chcr_req;
1570	struct ulptx_sgl *ulptx;
1571	unsigned int nents = 0, transhdr_len;
1572	unsigned int temp = 0;
1573	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1574		GFP_ATOMIC;
1575	struct adapter *adap = padap(h_ctx(tfm)->dev);
1576	int error = 0;
1577	unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1578
1579	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
1580	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1581	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1582				param->sg_len) <= SGE_MAX_WR_LEN;
1583	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1584		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1585	nents += param->bfr_len ? 1 : 0;
1586	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1587				param->sg_len, 16) : (sgl_len(nents) * 8);
1588	transhdr_len = roundup(transhdr_len, 16);
1589
1590	skb = alloc_skb(transhdr_len, flags);
1591	if (!skb)
1592		return ERR_PTR(-ENOMEM);
1593	chcr_req = __skb_put_zero(skb, transhdr_len);
1594
1595	chcr_req->sec_cpl.op_ivinsrtofst =
1596		FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1597
1598	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1599
1600	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1601		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1602	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1603		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1604	chcr_req->sec_cpl.seqno_numivs =
1605		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1606					 param->opad_needed, 0);
1607
1608	chcr_req->sec_cpl.ivgen_hdrlen =
1609		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1610
1611	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1612	       param->alg_prm.result_size);
1613
1614	if (param->opad_needed)
1615		memcpy(chcr_req->key_ctx.key +
1616		       ((param->alg_prm.result_size <= 32) ? 32 :
1617			CHCR_HASH_MAX_DIGEST_SIZE),
1618		       hmacctx->opad, param->alg_prm.result_size);
1619
1620	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1621					    param->alg_prm.mk_size, 0,
1622					    param->opad_needed,
1623					    ((param->kctx_len +
1624					     sizeof(chcr_req->key_ctx)) >> 4));
1625	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1626	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1627				     DUMMY_BYTES);
1628	if (param->bfr_len != 0) {
1629		req_ctx->hctx_wr.dma_addr =
1630			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1631				       param->bfr_len, DMA_TO_DEVICE);
1632		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1633				       req_ctx->hctx_wr. dma_addr)) {
1634			error = -ENOMEM;
1635			goto err;
1636		}
1637		req_ctx->hctx_wr.dma_len = param->bfr_len;
1638	} else {
1639		req_ctx->hctx_wr.dma_addr = 0;
1640	}
1641	chcr_add_hash_src_ent(req, ulptx, param);
1642	/* Request upto max wr size */
1643	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1644				(param->sg_len + param->bfr_len) : 0);
1645	atomic_inc(&adap->chcr_stats.digest_rqst);
1646	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1647		    param->hash_size, transhdr_len,
1648		    temp,  0);
1649	req_ctx->hctx_wr.skb = skb;
1650	return skb;
1651err:
1652	kfree_skb(skb);
1653	return  ERR_PTR(error);
1654}
1655
1656static int chcr_ahash_update(struct ahash_request *req)
1657{
1658	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1659	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1660	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1661	struct chcr_context *ctx = h_ctx(rtfm);
1662	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1663	struct sk_buff *skb;
1664	u8 remainder = 0, bs;
1665	unsigned int nbytes = req->nbytes;
1666	struct hash_wr_param params;
1667	int error;
1668	unsigned int cpu;
1669
1670	cpu = get_cpu();
1671	req_ctx->txqidx = cpu % ctx->ntxq;
1672	req_ctx->rxqidx = cpu % ctx->nrxq;
1673	put_cpu();
1674
1675	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
 
1676
1677	if (nbytes + req_ctx->reqlen >= bs) {
1678		remainder = (nbytes + req_ctx->reqlen) % bs;
1679		nbytes = nbytes + req_ctx->reqlen - remainder;
1680	} else {
1681		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1682				   + req_ctx->reqlen, nbytes, 0);
1683		req_ctx->reqlen += nbytes;
1684		return 0;
1685	}
1686	error = chcr_inc_wrcount(dev);
1687	if (error)
1688		return -ENXIO;
1689	/* Detach state for CHCR means lldi or padap is freed. Increasing
1690	 * inflight count for dev guarantees that lldi and padap is valid
1691	 */
1692	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1693						req_ctx->txqidx) &&
1694		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
 
1695			error = -ENOSPC;
1696			goto err;
 
1697	}
1698
1699	chcr_init_hctx_per_wr(req_ctx);
1700	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1701	if (error) {
1702		error = -ENOMEM;
1703		goto err;
1704	}
1705	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1706	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1707	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1708				     HASH_SPACE_LEFT(params.kctx_len), 0);
1709	if (params.sg_len > req->nbytes)
1710		params.sg_len = req->nbytes;
1711	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1712			req_ctx->reqlen;
1713	params.opad_needed = 0;
1714	params.more = 1;
1715	params.last = 0;
1716	params.bfr_len = req_ctx->reqlen;
1717	params.scmd1 = 0;
1718	req_ctx->hctx_wr.srcsg = req->src;
1719
1720	params.hash_size = params.alg_prm.result_size;
1721	req_ctx->data_len += params.sg_len + params.bfr_len;
1722	skb = create_hash_wr(req, &params);
1723	if (IS_ERR(skb)) {
1724		error = PTR_ERR(skb);
1725		goto unmap;
1726	}
1727
1728	req_ctx->hctx_wr.processed += params.sg_len;
1729	if (remainder) {
1730		/* Swap buffers */
1731		swap(req_ctx->reqbfr, req_ctx->skbfr);
1732		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1733				   req_ctx->reqbfr, remainder, req->nbytes -
1734				   remainder);
1735	}
1736	req_ctx->reqlen = remainder;
1737	skb->dev = u_ctx->lldi.ports[0];
1738	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1739	chcr_send_wr(skb);
1740	return -EINPROGRESS;
 
1741unmap:
1742	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1743err:
1744	chcr_dec_wrcount(dev);
1745	return error;
1746}
1747
1748static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1749{
1750	memset(bfr_ptr, 0, bs);
1751	*bfr_ptr = 0x80;
1752	if (bs == 64)
1753		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1754	else
1755		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1756}
1757
1758static int chcr_ahash_final(struct ahash_request *req)
1759{
1760	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1761	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1762	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1763	struct hash_wr_param params;
1764	struct sk_buff *skb;
1765	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1766	struct chcr_context *ctx = h_ctx(rtfm);
1767	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1768	int error;
1769	unsigned int cpu;
1770
1771	cpu = get_cpu();
1772	req_ctx->txqidx = cpu % ctx->ntxq;
1773	req_ctx->rxqidx = cpu % ctx->nrxq;
1774	put_cpu();
1775
1776	error = chcr_inc_wrcount(dev);
1777	if (error)
1778		return -ENXIO;
1779
1780	chcr_init_hctx_per_wr(req_ctx);
 
1781	if (is_hmac(crypto_ahash_tfm(rtfm)))
1782		params.opad_needed = 1;
1783	else
1784		params.opad_needed = 0;
1785	params.sg_len = 0;
1786	req_ctx->hctx_wr.isfinal = 1;
1787	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1788	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1789	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1790		params.opad_needed = 1;
1791		params.kctx_len *= 2;
1792	} else {
1793		params.opad_needed = 0;
1794	}
1795
1796	req_ctx->hctx_wr.result = 1;
1797	params.bfr_len = req_ctx->reqlen;
1798	req_ctx->data_len += params.bfr_len + params.sg_len;
1799	req_ctx->hctx_wr.srcsg = req->src;
1800	if (req_ctx->reqlen == 0) {
1801		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1802		params.last = 0;
1803		params.more = 1;
1804		params.scmd1 = 0;
1805		params.bfr_len = bs;
1806
1807	} else {
1808		params.scmd1 = req_ctx->data_len;
1809		params.last = 1;
1810		params.more = 0;
1811	}
1812	params.hash_size = crypto_ahash_digestsize(rtfm);
1813	skb = create_hash_wr(req, &params);
1814	if (IS_ERR(skb)) {
1815		error = PTR_ERR(skb);
1816		goto err;
1817	}
1818	req_ctx->reqlen = 0;
1819	skb->dev = u_ctx->lldi.ports[0];
1820	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1821	chcr_send_wr(skb);
1822	return -EINPROGRESS;
1823err:
1824	chcr_dec_wrcount(dev);
1825	return error;
1826}
1827
1828static int chcr_ahash_finup(struct ahash_request *req)
1829{
1830	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1831	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1832	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1833	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1834	struct chcr_context *ctx = h_ctx(rtfm);
1835	struct sk_buff *skb;
1836	struct hash_wr_param params;
1837	u8  bs;
1838	int error;
1839	unsigned int cpu;
1840
1841	cpu = get_cpu();
1842	req_ctx->txqidx = cpu % ctx->ntxq;
1843	req_ctx->rxqidx = cpu % ctx->nrxq;
1844	put_cpu();
1845
1846	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
 
1847	error = chcr_inc_wrcount(dev);
1848	if (error)
1849		return -ENXIO;
1850
1851	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1852						req_ctx->txqidx) &&
1853		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
 
1854			error = -ENOSPC;
1855			goto err;
 
1856	}
1857	chcr_init_hctx_per_wr(req_ctx);
1858	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1859	if (error) {
1860		error = -ENOMEM;
1861		goto err;
1862	}
1863
1864	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1865	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1866	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1867		params.kctx_len *= 2;
1868		params.opad_needed = 1;
1869	} else {
1870		params.opad_needed = 0;
1871	}
1872
1873	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1874				    HASH_SPACE_LEFT(params.kctx_len), 0);
1875	if (params.sg_len < req->nbytes) {
1876		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1877			params.kctx_len /= 2;
1878			params.opad_needed = 0;
1879		}
1880		params.last = 0;
1881		params.more = 1;
1882		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1883					- req_ctx->reqlen;
1884		params.hash_size = params.alg_prm.result_size;
1885		params.scmd1 = 0;
1886	} else {
1887		params.last = 1;
1888		params.more = 0;
1889		params.sg_len = req->nbytes;
1890		params.hash_size = crypto_ahash_digestsize(rtfm);
1891		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1892				params.sg_len;
1893	}
1894	params.bfr_len = req_ctx->reqlen;
1895	req_ctx->data_len += params.bfr_len + params.sg_len;
1896	req_ctx->hctx_wr.result = 1;
1897	req_ctx->hctx_wr.srcsg = req->src;
1898	if ((req_ctx->reqlen + req->nbytes) == 0) {
1899		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1900		params.last = 0;
1901		params.more = 1;
1902		params.scmd1 = 0;
1903		params.bfr_len = bs;
1904	}
1905	skb = create_hash_wr(req, &params);
1906	if (IS_ERR(skb)) {
1907		error = PTR_ERR(skb);
1908		goto unmap;
1909	}
1910	req_ctx->reqlen = 0;
1911	req_ctx->hctx_wr.processed += params.sg_len;
1912	skb->dev = u_ctx->lldi.ports[0];
1913	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1914	chcr_send_wr(skb);
1915	return -EINPROGRESS;
 
1916unmap:
1917	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1918err:
1919	chcr_dec_wrcount(dev);
1920	return error;
1921}
1922
1923static int chcr_hmac_init(struct ahash_request *areq);
1924static int chcr_sha_init(struct ahash_request *areq);
1925
1926static int chcr_ahash_digest(struct ahash_request *req)
1927{
1928	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1929	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1930	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1931	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1932	struct chcr_context *ctx = h_ctx(rtfm);
1933	struct sk_buff *skb;
1934	struct hash_wr_param params;
1935	u8  bs;
1936	int error;
1937	unsigned int cpu;
1938
1939	cpu = get_cpu();
1940	req_ctx->txqidx = cpu % ctx->ntxq;
1941	req_ctx->rxqidx = cpu % ctx->nrxq;
1942	put_cpu();
1943
1944	if (is_hmac(crypto_ahash_tfm(rtfm)))
1945		chcr_hmac_init(req);
1946	else
1947		chcr_sha_init(req);
1948
 
1949	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1950	error = chcr_inc_wrcount(dev);
1951	if (error)
1952		return -ENXIO;
1953
 
1954	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1955						req_ctx->txqidx) &&
1956		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
 
1957			error = -ENOSPC;
1958			goto err;
 
1959	}
1960
1961	chcr_init_hctx_per_wr(req_ctx);
1962	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1963	if (error) {
1964		error = -ENOMEM;
1965		goto err;
1966	}
1967
1968	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1969	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1970	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1971		params.kctx_len *= 2;
1972		params.opad_needed = 1;
1973	} else {
1974		params.opad_needed = 0;
1975	}
1976	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1977				HASH_SPACE_LEFT(params.kctx_len), 0);
1978	if (params.sg_len < req->nbytes) {
1979		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1980			params.kctx_len /= 2;
1981			params.opad_needed = 0;
1982		}
1983		params.last = 0;
1984		params.more = 1;
1985		params.scmd1 = 0;
1986		params.sg_len = rounddown(params.sg_len, bs);
1987		params.hash_size = params.alg_prm.result_size;
1988	} else {
1989		params.sg_len = req->nbytes;
1990		params.hash_size = crypto_ahash_digestsize(rtfm);
1991		params.last = 1;
1992		params.more = 0;
1993		params.scmd1 = req->nbytes + req_ctx->data_len;
1994
1995	}
1996	params.bfr_len = 0;
1997	req_ctx->hctx_wr.result = 1;
1998	req_ctx->hctx_wr.srcsg = req->src;
1999	req_ctx->data_len += params.bfr_len + params.sg_len;
2000
2001	if (req->nbytes == 0) {
2002		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
2003		params.more = 1;
2004		params.bfr_len = bs;
2005	}
2006
2007	skb = create_hash_wr(req, &params);
2008	if (IS_ERR(skb)) {
2009		error = PTR_ERR(skb);
2010		goto unmap;
2011	}
2012	req_ctx->hctx_wr.processed += params.sg_len;
2013	skb->dev = u_ctx->lldi.ports[0];
2014	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
2015	chcr_send_wr(skb);
2016	return -EINPROGRESS;
2017unmap:
2018	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2019err:
2020	chcr_dec_wrcount(dev);
2021	return error;
2022}
2023
2024static int chcr_ahash_continue(struct ahash_request *req)
2025{
2026	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2027	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2028	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2029	struct chcr_context *ctx = h_ctx(rtfm);
2030	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2031	struct sk_buff *skb;
2032	struct hash_wr_param params;
2033	u8  bs;
2034	int error;
2035	unsigned int cpu;
2036
2037	cpu = get_cpu();
2038	reqctx->txqidx = cpu % ctx->ntxq;
2039	reqctx->rxqidx = cpu % ctx->nrxq;
2040	put_cpu();
2041
2042	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
 
2043	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
2044	params.kctx_len = roundup(params.alg_prm.result_size, 16);
2045	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2046		params.kctx_len *= 2;
2047		params.opad_needed = 1;
2048	} else {
2049		params.opad_needed = 0;
2050	}
2051	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2052					    HASH_SPACE_LEFT(params.kctx_len),
2053					    hctx_wr->src_ofst);
2054	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2055		params.sg_len = req->nbytes - hctx_wr->processed;
2056	if (!hctx_wr->result ||
2057	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2058		if (is_hmac(crypto_ahash_tfm(rtfm))) {
2059			params.kctx_len /= 2;
2060			params.opad_needed = 0;
2061		}
2062		params.last = 0;
2063		params.more = 1;
2064		params.sg_len = rounddown(params.sg_len, bs);
2065		params.hash_size = params.alg_prm.result_size;
2066		params.scmd1 = 0;
2067	} else {
2068		params.last = 1;
2069		params.more = 0;
2070		params.hash_size = crypto_ahash_digestsize(rtfm);
2071		params.scmd1 = reqctx->data_len + params.sg_len;
2072	}
2073	params.bfr_len = 0;
2074	reqctx->data_len += params.sg_len;
2075	skb = create_hash_wr(req, &params);
2076	if (IS_ERR(skb)) {
2077		error = PTR_ERR(skb);
2078		goto err;
2079	}
2080	hctx_wr->processed += params.sg_len;
2081	skb->dev = u_ctx->lldi.ports[0];
2082	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2083	chcr_send_wr(skb);
2084	return 0;
2085err:
2086	return error;
2087}
2088
2089static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2090					  unsigned char *input,
2091					  int err)
2092{
2093	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2094	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2095	int digestsize, updated_digestsize;
2096	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2097	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2098	struct chcr_dev *dev = h_ctx(tfm)->dev;
2099
2100	if (input == NULL)
2101		goto out;
2102	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2103	updated_digestsize = digestsize;
2104	if (digestsize == SHA224_DIGEST_SIZE)
2105		updated_digestsize = SHA256_DIGEST_SIZE;
2106	else if (digestsize == SHA384_DIGEST_SIZE)
2107		updated_digestsize = SHA512_DIGEST_SIZE;
2108
2109	if (hctx_wr->dma_addr) {
2110		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2111				 hctx_wr->dma_len, DMA_TO_DEVICE);
2112		hctx_wr->dma_addr = 0;
2113	}
2114	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2115				 req->nbytes)) {
2116		if (hctx_wr->result == 1) {
2117			hctx_wr->result = 0;
2118			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2119			       digestsize);
2120		} else {
2121			memcpy(reqctx->partial_hash,
2122			       input + sizeof(struct cpl_fw6_pld),
2123			       updated_digestsize);
2124
2125		}
2126		goto unmap;
2127	}
2128	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2129	       updated_digestsize);
2130
2131	err = chcr_ahash_continue(req);
2132	if (err)
2133		goto unmap;
2134	return;
2135unmap:
2136	if (hctx_wr->is_sg_map)
2137		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2138
2139
2140out:
2141	chcr_dec_wrcount(dev);
2142	ahash_request_complete(req, err);
2143}
2144
2145/*
2146 *	chcr_handle_resp - Unmap the DMA buffers associated with the request
2147 *	@req: crypto request
2148 */
2149int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2150			 int err)
2151{
2152	struct crypto_tfm *tfm = req->tfm;
2153	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2154	struct adapter *adap = padap(ctx->dev);
2155
2156	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2157	case CRYPTO_ALG_TYPE_AEAD:
2158		err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2159		break;
2160
2161	case CRYPTO_ALG_TYPE_SKCIPHER:
2162		 chcr_handle_cipher_resp(skcipher_request_cast(req),
2163					       input, err);
2164		break;
2165	case CRYPTO_ALG_TYPE_AHASH:
2166		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2167		}
2168	atomic_inc(&adap->chcr_stats.complete);
2169	return err;
2170}
2171static int chcr_ahash_export(struct ahash_request *areq, void *out)
2172{
2173	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2174	struct chcr_ahash_req_ctx *state = out;
2175
2176	state->reqlen = req_ctx->reqlen;
2177	state->data_len = req_ctx->data_len;
2178	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2179	memcpy(state->partial_hash, req_ctx->partial_hash,
2180	       CHCR_HASH_MAX_DIGEST_SIZE);
2181	chcr_init_hctx_per_wr(state);
2182	return 0;
2183}
2184
2185static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2186{
2187	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2188	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2189
2190	req_ctx->reqlen = state->reqlen;
2191	req_ctx->data_len = state->data_len;
2192	req_ctx->reqbfr = req_ctx->bfr1;
2193	req_ctx->skbfr = req_ctx->bfr2;
2194	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2195	memcpy(req_ctx->partial_hash, state->partial_hash,
2196	       CHCR_HASH_MAX_DIGEST_SIZE);
2197	chcr_init_hctx_per_wr(req_ctx);
2198	return 0;
2199}
2200
2201static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2202			     unsigned int keylen)
2203{
2204	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2205	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2206	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2207	unsigned int i, err = 0, updated_digestsize;
2208
2209	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2210
2211	/* use the key to calculate the ipad and opad. ipad will sent with the
2212	 * first request's data. opad will be sent with the final hash result
2213	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2214	 */
2215	shash->tfm = hmacctx->base_hash;
2216	if (keylen > bs) {
2217		err = crypto_shash_digest(shash, key, keylen,
2218					  hmacctx->ipad);
2219		if (err)
2220			goto out;
2221		keylen = digestsize;
2222	} else {
2223		memcpy(hmacctx->ipad, key, keylen);
2224	}
2225	memset(hmacctx->ipad + keylen, 0, bs - keylen);
2226	unsafe_memcpy(hmacctx->opad, hmacctx->ipad, bs,
2227		      "fortified memcpy causes -Wrestrict warning");
2228
2229	for (i = 0; i < bs / sizeof(int); i++) {
2230		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2231		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2232	}
2233
2234	updated_digestsize = digestsize;
2235	if (digestsize == SHA224_DIGEST_SIZE)
2236		updated_digestsize = SHA256_DIGEST_SIZE;
2237	else if (digestsize == SHA384_DIGEST_SIZE)
2238		updated_digestsize = SHA512_DIGEST_SIZE;
2239	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2240					hmacctx->ipad, digestsize);
2241	if (err)
2242		goto out;
2243	chcr_change_order(hmacctx->ipad, updated_digestsize);
2244
2245	err = chcr_compute_partial_hash(shash, hmacctx->opad,
2246					hmacctx->opad, digestsize);
2247	if (err)
2248		goto out;
2249	chcr_change_order(hmacctx->opad, updated_digestsize);
2250out:
2251	return err;
2252}
2253
2254static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2255			       unsigned int key_len)
2256{
2257	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2258	unsigned short context_size = 0;
2259	int err;
2260
2261	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2262	if (err)
2263		goto badkey_err;
2264
2265	memcpy(ablkctx->key, key, key_len);
2266	ablkctx->enckey_len = key_len;
2267	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2268	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2269	/* Both keys for xts must be aligned to 16 byte boundary
2270	 * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2271	 */
2272	if (key_len == 48) {
2273		context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2274				+ 16) >> 4;
2275		memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2276		memset(ablkctx->key + 24, 0, 8);
2277		memset(ablkctx->key + 56, 0, 8);
2278		ablkctx->enckey_len = 64;
2279		ablkctx->key_ctx_hdr =
2280			FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2281					 CHCR_KEYCTX_NO_KEY, 1,
2282					 0, context_size);
2283	} else {
2284		ablkctx->key_ctx_hdr =
2285		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2286				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2287				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2288				 CHCR_KEYCTX_NO_KEY, 1,
2289				 0, context_size);
2290	}
2291	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2292	return 0;
2293badkey_err:
 
2294	ablkctx->enckey_len = 0;
2295
2296	return err;
2297}
2298
2299static int chcr_sha_init(struct ahash_request *areq)
2300{
2301	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2302	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2303	int digestsize =  crypto_ahash_digestsize(tfm);
2304
2305	req_ctx->data_len = 0;
2306	req_ctx->reqlen = 0;
2307	req_ctx->reqbfr = req_ctx->bfr1;
2308	req_ctx->skbfr = req_ctx->bfr2;
2309	copy_hash_init_values(req_ctx->partial_hash, digestsize);
2310
2311	return 0;
2312}
2313
2314static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2315{
2316	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2317				 sizeof(struct chcr_ahash_req_ctx));
2318	return chcr_device_init(crypto_tfm_ctx(tfm));
2319}
2320
2321static int chcr_hmac_init(struct ahash_request *areq)
2322{
2323	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2324	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2325	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2326	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2327	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2328
2329	chcr_sha_init(areq);
2330	req_ctx->data_len = bs;
2331	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2332		if (digestsize == SHA224_DIGEST_SIZE)
2333			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2334			       SHA256_DIGEST_SIZE);
2335		else if (digestsize == SHA384_DIGEST_SIZE)
2336			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2337			       SHA512_DIGEST_SIZE);
2338		else
2339			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2340			       digestsize);
2341	}
2342	return 0;
2343}
2344
2345static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2346{
2347	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2348	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2349	unsigned int digestsize =
2350		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2351
2352	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2353				 sizeof(struct chcr_ahash_req_ctx));
2354	hmacctx->base_hash = chcr_alloc_shash(digestsize);
2355	if (IS_ERR(hmacctx->base_hash))
2356		return PTR_ERR(hmacctx->base_hash);
2357	return chcr_device_init(crypto_tfm_ctx(tfm));
2358}
2359
2360static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2361{
2362	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2363	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2364
2365	if (hmacctx->base_hash) {
2366		chcr_free_shash(hmacctx->base_hash);
2367		hmacctx->base_hash = NULL;
2368	}
2369}
2370
2371inline void chcr_aead_common_exit(struct aead_request *req)
2372{
2373	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2374	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2375	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2376
2377	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2378}
2379
2380static int chcr_aead_common_init(struct aead_request *req)
2381{
2382	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2383	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2384	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2385	unsigned int authsize = crypto_aead_authsize(tfm);
2386	int error = -EINVAL;
2387
2388	/* validate key size */
2389	if (aeadctx->enckey_len == 0)
2390		goto err;
2391	if (reqctx->op && req->cryptlen < authsize)
2392		goto err;
2393	if (reqctx->b0_len)
2394		reqctx->scratch_pad = reqctx->iv + IV;
2395	else
2396		reqctx->scratch_pad = NULL;
2397
2398	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2399				  reqctx->op);
2400	if (error) {
2401		error = -ENOMEM;
2402		goto err;
2403	}
2404
2405	return 0;
2406err:
2407	return error;
2408}
2409
2410static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2411				   int aadmax, int wrlen,
2412				   unsigned short op_type)
2413{
2414	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2415
2416	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2417	    dst_nents > MAX_DSGL_ENT ||
2418	    (req->assoclen > aadmax) ||
2419	    (wrlen > SGE_MAX_WR_LEN))
2420		return 1;
2421	return 0;
2422}
2423
2424static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2425{
2426	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2427	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2428	struct aead_request *subreq = aead_request_ctx_dma(req);
2429
2430	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2431	aead_request_set_callback(subreq, req->base.flags,
2432				  req->base.complete, req->base.data);
2433	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2434				 req->iv);
2435	aead_request_set_ad(subreq, req->assoclen);
2436	return op_type ? crypto_aead_decrypt(subreq) :
2437		crypto_aead_encrypt(subreq);
2438}
2439
2440static struct sk_buff *create_authenc_wr(struct aead_request *req,
2441					 unsigned short qid,
2442					 int size)
2443{
2444	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2445	struct chcr_context *ctx = a_ctx(tfm);
2446	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2447	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2448	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2449	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2450	struct sk_buff *skb = NULL;
2451	struct chcr_wr *chcr_req;
2452	struct cpl_rx_phys_dsgl *phys_cpl;
2453	struct ulptx_sgl *ulptx;
2454	unsigned int transhdr_len;
2455	unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2456	unsigned int   kctx_len = 0, dnents, snents;
2457	unsigned int  authsize = crypto_aead_authsize(tfm);
2458	int error = -EINVAL;
2459	u8 *ivptr;
2460	int null = 0;
2461	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2462		GFP_ATOMIC;
2463	struct adapter *adap = padap(ctx->dev);
2464	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2465
2466	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2467	if (req->cryptlen == 0)
2468		return NULL;
2469
2470	reqctx->b0_len = 0;
2471	error = chcr_aead_common_init(req);
2472	if (error)
2473		return ERR_PTR(error);
2474
2475	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2476		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2477		null = 1;
2478	}
2479	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2480		(reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2481	dnents += MIN_AUTH_SG; // For IV
2482	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2483			       CHCR_SRC_SG_SIZE, 0);
2484	dst_size = get_space_for_phys_dsgl(dnents);
2485	kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2486		- sizeof(chcr_req->key_ctx);
2487	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2488	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2489			SGE_MAX_WR_LEN;
2490	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2491			: (sgl_len(snents) * 8);
2492	transhdr_len += temp;
2493	transhdr_len = roundup(transhdr_len, 16);
2494
2495	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2496				    transhdr_len, reqctx->op)) {
2497		atomic_inc(&adap->chcr_stats.fallback);
2498		chcr_aead_common_exit(req);
2499		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2500	}
2501	skb = alloc_skb(transhdr_len, flags);
2502	if (!skb) {
2503		error = -ENOMEM;
2504		goto err;
2505	}
2506
2507	chcr_req = __skb_put_zero(skb, transhdr_len);
2508
2509	temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2510
2511	/*
2512	 * Input order	is AAD,IV and Payload. where IV should be included as
2513	 * the part of authdata. All other fields should be filled according
2514	 * to the hardware spec
2515	 */
2516	chcr_req->sec_cpl.op_ivinsrtofst =
2517				FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2518	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2519	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2520					null ? 0 : 1 + IV,
2521					null ? 0 : IV + req->assoclen,
2522					req->assoclen + IV + 1,
2523					(temp & 0x1F0) >> 4);
2524	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2525					temp & 0xF,
2526					null ? 0 : req->assoclen + IV + 1,
2527					temp, temp);
2528	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2529	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2530		temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2531	else
2532		temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2533	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2534					(reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2535					temp,
2536					actx->auth_mode, aeadctx->hmac_ctrl,
2537					IV >> 1);
2538	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2539					 0, 0, dst_size);
2540
2541	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2542	if (reqctx->op == CHCR_ENCRYPT_OP ||
2543		subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2544		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2545		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2546		       aeadctx->enckey_len);
2547	else
2548		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2549		       aeadctx->enckey_len);
2550
2551	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2552	       actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2553	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2554	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2555	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2556	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2557	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2558		memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2559		memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2560				CTR_RFC3686_IV_SIZE);
2561		*(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2562			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2563	} else {
2564		memcpy(ivptr, req->iv, IV);
2565	}
2566	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2567	chcr_add_aead_src_ent(req, ulptx);
2568	atomic_inc(&adap->chcr_stats.cipher_rqst);
2569	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2570		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2571	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2572		   transhdr_len, temp, 0);
2573	reqctx->skb = skb;
2574
2575	return skb;
2576err:
2577	chcr_aead_common_exit(req);
2578
2579	return ERR_PTR(error);
2580}
2581
2582int chcr_aead_dma_map(struct device *dev,
2583		      struct aead_request *req,
2584		      unsigned short op_type)
2585{
2586	int error;
2587	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2588	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2589	unsigned int authsize = crypto_aead_authsize(tfm);
2590	int src_len, dst_len;
2591
2592	/* calculate and handle src and dst sg length separately
2593	 * for inplace and out-of place operations
2594	 */
2595	if (req->src == req->dst) {
2596		src_len = req->assoclen + req->cryptlen + (op_type ?
2597							0 : authsize);
2598		dst_len = src_len;
2599	} else {
2600		src_len = req->assoclen + req->cryptlen;
2601		dst_len = req->assoclen + req->cryptlen + (op_type ?
2602							-authsize : authsize);
2603	}
2604
2605	if (!req->cryptlen || !src_len || !dst_len)
2606		return 0;
2607	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2608					DMA_BIDIRECTIONAL);
2609	if (dma_mapping_error(dev, reqctx->iv_dma))
2610		return -ENOMEM;
2611	if (reqctx->b0_len)
2612		reqctx->b0_dma = reqctx->iv_dma + IV;
2613	else
2614		reqctx->b0_dma = 0;
2615	if (req->src == req->dst) {
2616		error = dma_map_sg(dev, req->src,
2617				sg_nents_for_len(req->src, src_len),
2618					DMA_BIDIRECTIONAL);
2619		if (!error)
2620			goto err;
2621	} else {
2622		error = dma_map_sg(dev, req->src,
2623				   sg_nents_for_len(req->src, src_len),
2624				   DMA_TO_DEVICE);
2625		if (!error)
2626			goto err;
2627		error = dma_map_sg(dev, req->dst,
2628				   sg_nents_for_len(req->dst, dst_len),
2629				   DMA_FROM_DEVICE);
2630		if (!error) {
2631			dma_unmap_sg(dev, req->src,
2632				     sg_nents_for_len(req->src, src_len),
2633				     DMA_TO_DEVICE);
2634			goto err;
2635		}
2636	}
2637
2638	return 0;
2639err:
2640	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2641	return -ENOMEM;
2642}
2643
2644void chcr_aead_dma_unmap(struct device *dev,
2645			 struct aead_request *req,
2646			 unsigned short op_type)
2647{
2648	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2649	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2650	unsigned int authsize = crypto_aead_authsize(tfm);
2651	int src_len, dst_len;
2652
2653	/* calculate and handle src and dst sg length separately
2654	 * for inplace and out-of place operations
2655	 */
2656	if (req->src == req->dst) {
2657		src_len = req->assoclen + req->cryptlen + (op_type ?
2658							0 : authsize);
2659		dst_len = src_len;
2660	} else {
2661		src_len = req->assoclen + req->cryptlen;
2662		dst_len = req->assoclen + req->cryptlen + (op_type ?
2663						-authsize : authsize);
2664	}
2665
2666	if (!req->cryptlen || !src_len || !dst_len)
2667		return;
2668
2669	dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2670					DMA_BIDIRECTIONAL);
2671	if (req->src == req->dst) {
2672		dma_unmap_sg(dev, req->src,
2673			     sg_nents_for_len(req->src, src_len),
2674			     DMA_BIDIRECTIONAL);
2675	} else {
2676		dma_unmap_sg(dev, req->src,
2677			     sg_nents_for_len(req->src, src_len),
2678			     DMA_TO_DEVICE);
2679		dma_unmap_sg(dev, req->dst,
2680			     sg_nents_for_len(req->dst, dst_len),
2681			     DMA_FROM_DEVICE);
2682	}
2683}
2684
2685void chcr_add_aead_src_ent(struct aead_request *req,
2686			   struct ulptx_sgl *ulptx)
2687{
2688	struct ulptx_walk ulp_walk;
2689	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2690
2691	if (reqctx->imm) {
2692		u8 *buf = (u8 *)ulptx;
2693
2694		if (reqctx->b0_len) {
2695			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2696			buf += reqctx->b0_len;
2697		}
2698		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2699				   buf, req->cryptlen + req->assoclen, 0);
2700	} else {
2701		ulptx_walk_init(&ulp_walk, ulptx);
2702		if (reqctx->b0_len)
2703			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2704					    reqctx->b0_dma);
2705		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2706				  req->assoclen,  0);
2707		ulptx_walk_end(&ulp_walk);
2708	}
2709}
2710
2711void chcr_add_aead_dst_ent(struct aead_request *req,
2712			   struct cpl_rx_phys_dsgl *phys_cpl,
2713			   unsigned short qid)
2714{
2715	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2716	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2717	struct dsgl_walk dsgl_walk;
2718	unsigned int authsize = crypto_aead_authsize(tfm);
2719	struct chcr_context *ctx = a_ctx(tfm);
2720	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2721	u32 temp;
2722	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2723
2724	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2725	dsgl_walk_init(&dsgl_walk, phys_cpl);
2726	dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2727	temp = req->assoclen + req->cryptlen +
2728		(reqctx->op ? -authsize : authsize);
2729	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2730	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2731}
2732
2733void chcr_add_cipher_src_ent(struct skcipher_request *req,
2734			     void *ulptx,
2735			     struct  cipher_wr_param *wrparam)
2736{
2737	struct ulptx_walk ulp_walk;
2738	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2739	u8 *buf = ulptx;
2740
2741	memcpy(buf, reqctx->iv, IV);
2742	buf += IV;
2743	if (reqctx->imm) {
2744		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2745				   buf, wrparam->bytes, reqctx->processed);
2746	} else {
2747		ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2748		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2749				  reqctx->src_ofst);
2750		reqctx->srcsg = ulp_walk.last_sg;
2751		reqctx->src_ofst = ulp_walk.last_sg_len;
2752		ulptx_walk_end(&ulp_walk);
2753	}
2754}
2755
2756void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2757			     struct cpl_rx_phys_dsgl *phys_cpl,
2758			     struct  cipher_wr_param *wrparam,
2759			     unsigned short qid)
2760{
2761	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2762	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2763	struct chcr_context *ctx = c_ctx(tfm);
2764	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2765	struct dsgl_walk dsgl_walk;
2766	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2767
2768	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2769	dsgl_walk_init(&dsgl_walk, phys_cpl);
2770	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2771			 reqctx->dst_ofst);
2772	reqctx->dstsg = dsgl_walk.last_sg;
2773	reqctx->dst_ofst = dsgl_walk.last_sg_len;
2774	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
 
2775}
2776
2777void chcr_add_hash_src_ent(struct ahash_request *req,
2778			   struct ulptx_sgl *ulptx,
2779			   struct hash_wr_param *param)
2780{
2781	struct ulptx_walk ulp_walk;
2782	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2783
2784	if (reqctx->hctx_wr.imm) {
2785		u8 *buf = (u8 *)ulptx;
2786
2787		if (param->bfr_len) {
2788			memcpy(buf, reqctx->reqbfr, param->bfr_len);
2789			buf += param->bfr_len;
2790		}
2791
2792		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2793				   sg_nents(reqctx->hctx_wr.srcsg), buf,
2794				   param->sg_len, 0);
2795	} else {
2796		ulptx_walk_init(&ulp_walk, ulptx);
2797		if (param->bfr_len)
2798			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2799					    reqctx->hctx_wr.dma_addr);
2800		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2801				  param->sg_len, reqctx->hctx_wr.src_ofst);
2802		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2803		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2804		ulptx_walk_end(&ulp_walk);
2805	}
2806}
2807
2808int chcr_hash_dma_map(struct device *dev,
2809		      struct ahash_request *req)
2810{
2811	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2812	int error = 0;
2813
2814	if (!req->nbytes)
2815		return 0;
2816	error = dma_map_sg(dev, req->src, sg_nents(req->src),
2817			   DMA_TO_DEVICE);
2818	if (!error)
2819		return -ENOMEM;
2820	req_ctx->hctx_wr.is_sg_map = 1;
2821	return 0;
2822}
2823
2824void chcr_hash_dma_unmap(struct device *dev,
2825			 struct ahash_request *req)
2826{
2827	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2828
2829	if (!req->nbytes)
2830		return;
2831
2832	dma_unmap_sg(dev, req->src, sg_nents(req->src),
2833			   DMA_TO_DEVICE);
2834	req_ctx->hctx_wr.is_sg_map = 0;
2835
2836}
2837
2838int chcr_cipher_dma_map(struct device *dev,
2839			struct skcipher_request *req)
2840{
2841	int error;
2842
2843	if (req->src == req->dst) {
2844		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2845				   DMA_BIDIRECTIONAL);
2846		if (!error)
2847			goto err;
2848	} else {
2849		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2850				   DMA_TO_DEVICE);
2851		if (!error)
2852			goto err;
2853		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2854				   DMA_FROM_DEVICE);
2855		if (!error) {
2856			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2857				   DMA_TO_DEVICE);
2858			goto err;
2859		}
2860	}
2861
2862	return 0;
2863err:
2864	return -ENOMEM;
2865}
2866
2867void chcr_cipher_dma_unmap(struct device *dev,
2868			   struct skcipher_request *req)
2869{
2870	if (req->src == req->dst) {
2871		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2872				   DMA_BIDIRECTIONAL);
2873	} else {
2874		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2875				   DMA_TO_DEVICE);
2876		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2877				   DMA_FROM_DEVICE);
2878	}
2879}
2880
2881static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2882{
2883	__be32 data;
2884
2885	memset(block, 0, csize);
2886	block += csize;
2887
2888	if (csize >= 4)
2889		csize = 4;
2890	else if (msglen > (unsigned int)(1 << (8 * csize)))
2891		return -EOVERFLOW;
2892
2893	data = cpu_to_be32(msglen);
2894	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2895
2896	return 0;
2897}
2898
2899static int generate_b0(struct aead_request *req, u8 *ivptr,
2900			unsigned short op_type)
2901{
2902	unsigned int l, lp, m;
2903	int rc;
2904	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2905	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2906	u8 *b0 = reqctx->scratch_pad;
2907
2908	m = crypto_aead_authsize(aead);
2909
2910	memcpy(b0, ivptr, 16);
2911
2912	lp = b0[0];
2913	l = lp + 1;
2914
2915	/* set m, bits 3-5 */
2916	*b0 |= (8 * ((m - 2) / 2));
2917
2918	/* set adata, bit 6, if associated data is used */
2919	if (req->assoclen)
2920		*b0 |= 64;
2921	rc = set_msg_len(b0 + 16 - l,
2922			 (op_type == CHCR_DECRYPT_OP) ?
2923			 req->cryptlen - m : req->cryptlen, l);
2924
2925	return rc;
2926}
2927
2928static inline int crypto_ccm_check_iv(const u8 *iv)
2929{
2930	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2931	if (iv[0] < 1 || iv[0] > 7)
2932		return -EINVAL;
2933
2934	return 0;
2935}
2936
2937static int ccm_format_packet(struct aead_request *req,
2938			     u8 *ivptr,
2939			     unsigned int sub_type,
2940			     unsigned short op_type,
2941			     unsigned int assoclen)
2942{
2943	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2944	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2945	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2946	int rc = 0;
2947
2948	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2949		ivptr[0] = 3;
2950		memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2951		memcpy(ivptr + 4, req->iv, 8);
2952		memset(ivptr + 12, 0, 4);
2953	} else {
2954		memcpy(ivptr, req->iv, 16);
2955	}
2956	if (assoclen)
2957		put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
 
2958
2959	rc = generate_b0(req, ivptr, op_type);
2960	/* zero the ctr value */
2961	memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2962	return rc;
2963}
2964
2965static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2966				  unsigned int dst_size,
2967				  struct aead_request *req,
2968				  unsigned short op_type)
2969{
2970	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2971	struct chcr_context *ctx = a_ctx(tfm);
2972	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2973	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2974	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2975	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2976	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2977	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2978	unsigned int ccm_xtra;
2979	unsigned int tag_offset = 0, auth_offset = 0;
2980	unsigned int assoclen;
2981
2982	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2983
2984	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2985		assoclen = req->assoclen - 8;
2986	else
2987		assoclen = req->assoclen;
2988	ccm_xtra = CCM_B0_SIZE +
2989		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2990
2991	auth_offset = req->cryptlen ?
2992		(req->assoclen + IV + 1 + ccm_xtra) : 0;
2993	if (op_type == CHCR_DECRYPT_OP) {
2994		if (crypto_aead_authsize(tfm) != req->cryptlen)
2995			tag_offset = crypto_aead_authsize(tfm);
2996		else
2997			auth_offset = 0;
2998	}
2999
3000	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
 
 
3001	sec_cpl->pldlen =
3002		htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
3003	/* For CCM there wil be b0 always. So AAD start will be 1 always */
3004	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3005				1 + IV,	IV + assoclen + ccm_xtra,
3006				req->assoclen + IV + 1 + ccm_xtra, 0);
3007
3008	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
3009					auth_offset, tag_offset,
3010					(op_type == CHCR_ENCRYPT_OP) ? 0 :
3011					crypto_aead_authsize(tfm));
3012	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
3013					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
3014					cipher_mode, mac_mode,
3015					aeadctx->hmac_ctrl, IV >> 1);
3016
3017	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
3018					0, dst_size);
3019}
3020
3021static int aead_ccm_validate_input(unsigned short op_type,
3022				   struct aead_request *req,
3023				   struct chcr_aead_ctx *aeadctx,
3024				   unsigned int sub_type)
3025{
3026	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
3027		if (crypto_ccm_check_iv(req->iv)) {
3028			pr_err("CCM: IV check fails\n");
3029			return -EINVAL;
3030		}
3031	} else {
3032		if (req->assoclen != 16 && req->assoclen != 20) {
3033			pr_err("RFC4309: Invalid AAD length %d\n",
3034			       req->assoclen);
3035			return -EINVAL;
3036		}
3037	}
3038	return 0;
3039}
3040
3041static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
3042					  unsigned short qid,
3043					  int size)
3044{
3045	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3046	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3047	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3048	struct sk_buff *skb = NULL;
3049	struct chcr_wr *chcr_req;
3050	struct cpl_rx_phys_dsgl *phys_cpl;
3051	struct ulptx_sgl *ulptx;
3052	unsigned int transhdr_len;
3053	unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
3054	unsigned int sub_type, assoclen = req->assoclen;
3055	unsigned int authsize = crypto_aead_authsize(tfm);
3056	int error = -EINVAL;
3057	u8 *ivptr;
3058	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3059		GFP_ATOMIC;
3060	struct adapter *adap = padap(a_ctx(tfm)->dev);
3061
3062	sub_type = get_aead_subtype(tfm);
3063	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
3064		assoclen -= 8;
3065	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
3066	error = chcr_aead_common_init(req);
3067	if (error)
3068		return ERR_PTR(error);
3069
3070	error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
3071	if (error)
3072		goto err;
3073	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3074			+ (reqctx->op ? -authsize : authsize),
3075			CHCR_DST_SG_SIZE, 0);
3076	dnents += MIN_CCM_SG; // For IV and B0
3077	dst_size = get_space_for_phys_dsgl(dnents);
3078	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3079			       CHCR_SRC_SG_SIZE, 0);
3080	snents += MIN_CCM_SG; //For B0
3081	kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3082	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3083	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3084		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
3085	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3086				     reqctx->b0_len, 16) :
3087		(sgl_len(snents) *  8);
3088	transhdr_len += temp;
3089	transhdr_len = roundup(transhdr_len, 16);
3090
3091	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3092				reqctx->b0_len, transhdr_len, reqctx->op)) {
3093		atomic_inc(&adap->chcr_stats.fallback);
3094		chcr_aead_common_exit(req);
3095		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3096	}
3097	skb = alloc_skb(transhdr_len,  flags);
3098
3099	if (!skb) {
3100		error = -ENOMEM;
3101		goto err;
3102	}
3103
3104	chcr_req = __skb_put_zero(skb, transhdr_len);
3105
3106	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3107
3108	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3109	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3110	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3111			aeadctx->key, aeadctx->enckey_len);
3112
3113	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3114	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3115	ulptx = (struct ulptx_sgl *)(ivptr + IV);
3116	error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3117	if (error)
3118		goto dstmap_fail;
3119	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3120	chcr_add_aead_src_ent(req, ulptx);
3121
3122	atomic_inc(&adap->chcr_stats.aead_rqst);
3123	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3124		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3125		reqctx->b0_len) : 0);
3126	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3127		    transhdr_len, temp, 0);
3128	reqctx->skb = skb;
3129
3130	return skb;
3131dstmap_fail:
3132	kfree_skb(skb);
3133err:
3134	chcr_aead_common_exit(req);
3135	return ERR_PTR(error);
3136}
3137
3138static struct sk_buff *create_gcm_wr(struct aead_request *req,
3139				     unsigned short qid,
3140				     int size)
3141{
3142	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3143	struct chcr_context *ctx = a_ctx(tfm);
3144	struct uld_ctx *u_ctx = ULD_CTX(ctx);
3145	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3146	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3147	struct sk_buff *skb = NULL;
3148	struct chcr_wr *chcr_req;
3149	struct cpl_rx_phys_dsgl *phys_cpl;
3150	struct ulptx_sgl *ulptx;
3151	unsigned int transhdr_len, dnents = 0, snents;
3152	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3153	unsigned int authsize = crypto_aead_authsize(tfm);
3154	int error = -EINVAL;
3155	u8 *ivptr;
3156	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3157		GFP_ATOMIC;
3158	struct adapter *adap = padap(ctx->dev);
3159	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3160
3161	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
3162	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3163		assoclen = req->assoclen - 8;
3164
3165	reqctx->b0_len = 0;
3166	error = chcr_aead_common_init(req);
3167	if (error)
3168		return ERR_PTR(error);
3169	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3170				(reqctx->op ? -authsize : authsize),
3171				CHCR_DST_SG_SIZE, 0);
3172	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3173			       CHCR_SRC_SG_SIZE, 0);
3174	dnents += MIN_GCM_SG; // For IV
3175	dst_size = get_space_for_phys_dsgl(dnents);
3176	kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3177	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3178	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3179			SGE_MAX_WR_LEN;
3180	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3181		(sgl_len(snents) * 8);
3182	transhdr_len += temp;
3183	transhdr_len = roundup(transhdr_len, 16);
3184	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3185			    transhdr_len, reqctx->op)) {
3186
3187		atomic_inc(&adap->chcr_stats.fallback);
3188		chcr_aead_common_exit(req);
3189		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3190	}
3191	skb = alloc_skb(transhdr_len, flags);
3192	if (!skb) {
3193		error = -ENOMEM;
3194		goto err;
3195	}
3196
3197	chcr_req = __skb_put_zero(skb, transhdr_len);
3198
3199	//Offset of tag from end
3200	temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3201	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3202						rx_channel_id, 2, 1);
3203	chcr_req->sec_cpl.pldlen =
3204		htonl(req->assoclen + IV + req->cryptlen);
3205	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3206					assoclen ? 1 + IV : 0,
3207					assoclen ? IV + assoclen : 0,
3208					req->assoclen + IV + 1, 0);
3209	chcr_req->sec_cpl.cipherstop_lo_authinsert =
3210			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3211						temp, temp);
3212	chcr_req->sec_cpl.seqno_numivs =
3213			FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3214					CHCR_ENCRYPT_OP) ? 1 : 0,
3215					CHCR_SCMD_CIPHER_MODE_AES_GCM,
3216					CHCR_SCMD_AUTH_MODE_GHASH,
3217					aeadctx->hmac_ctrl, IV >> 1);
3218	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3219					0, 0, dst_size);
3220	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3221	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3222	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3223	       GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3224
3225	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3226	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3227	/* prepare a 16 byte iv */
3228	/* S   A   L  T |  IV | 0x00000001 */
3229	if (get_aead_subtype(tfm) ==
3230	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3231		memcpy(ivptr, aeadctx->salt, 4);
3232		memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3233	} else {
3234		memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3235	}
3236	put_unaligned_be32(0x01, &ivptr[12]);
 
3237	ulptx = (struct ulptx_sgl *)(ivptr + 16);
3238
3239	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3240	chcr_add_aead_src_ent(req, ulptx);
3241	atomic_inc(&adap->chcr_stats.aead_rqst);
3242	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3243		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3244	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3245		    transhdr_len, temp, reqctx->verify);
3246	reqctx->skb = skb;
3247	return skb;
3248
3249err:
3250	chcr_aead_common_exit(req);
3251	return ERR_PTR(error);
3252}
3253
3254
3255
3256static int chcr_aead_cra_init(struct crypto_aead *tfm)
3257{
3258	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3259	struct aead_alg *alg = crypto_aead_alg(tfm);
3260
3261	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3262					       CRYPTO_ALG_NEED_FALLBACK |
3263					       CRYPTO_ALG_ASYNC);
3264	if  (IS_ERR(aeadctx->sw_cipher))
3265		return PTR_ERR(aeadctx->sw_cipher);
3266	crypto_aead_set_reqsize_dma(
3267		tfm, max(sizeof(struct chcr_aead_reqctx),
3268			 sizeof(struct aead_request) +
3269			 crypto_aead_reqsize(aeadctx->sw_cipher)));
3270	return chcr_device_init(a_ctx(tfm));
3271}
3272
3273static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3274{
3275	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3276
3277	crypto_free_aead(aeadctx->sw_cipher);
3278}
3279
3280static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3281					unsigned int authsize)
3282{
3283	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3284
3285	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3286	aeadctx->mayverify = VERIFY_HW;
3287	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3288}
3289static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3290				    unsigned int authsize)
3291{
3292	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3293	u32 maxauth = crypto_aead_maxauthsize(tfm);
3294
3295	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3296	 * true for sha1. authsize == 12 condition should be before
3297	 * authsize == (maxauth >> 1)
3298	 */
3299	if (authsize == ICV_4) {
3300		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3301		aeadctx->mayverify = VERIFY_HW;
3302	} else if (authsize == ICV_6) {
3303		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3304		aeadctx->mayverify = VERIFY_HW;
3305	} else if (authsize == ICV_10) {
3306		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3307		aeadctx->mayverify = VERIFY_HW;
3308	} else if (authsize == ICV_12) {
3309		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3310		aeadctx->mayverify = VERIFY_HW;
3311	} else if (authsize == ICV_14) {
3312		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3313		aeadctx->mayverify = VERIFY_HW;
3314	} else if (authsize == (maxauth >> 1)) {
3315		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3316		aeadctx->mayverify = VERIFY_HW;
3317	} else if (authsize == maxauth) {
3318		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3319		aeadctx->mayverify = VERIFY_HW;
3320	} else {
3321		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3322		aeadctx->mayverify = VERIFY_SW;
3323	}
3324	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3325}
3326
3327
3328static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3329{
3330	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3331
3332	switch (authsize) {
3333	case ICV_4:
3334		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3335		aeadctx->mayverify = VERIFY_HW;
3336		break;
3337	case ICV_8:
3338		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3339		aeadctx->mayverify = VERIFY_HW;
3340		break;
3341	case ICV_12:
3342		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3343		aeadctx->mayverify = VERIFY_HW;
3344		break;
3345	case ICV_14:
3346		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3347		aeadctx->mayverify = VERIFY_HW;
3348		break;
3349	case ICV_16:
3350		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3351		aeadctx->mayverify = VERIFY_HW;
3352		break;
3353	case ICV_13:
3354	case ICV_15:
3355		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3356		aeadctx->mayverify = VERIFY_SW;
3357		break;
3358	default:
 
 
 
3359		return -EINVAL;
3360	}
3361	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3362}
3363
3364static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3365					  unsigned int authsize)
3366{
3367	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3368
3369	switch (authsize) {
3370	case ICV_8:
3371		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3372		aeadctx->mayverify = VERIFY_HW;
3373		break;
3374	case ICV_12:
3375		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3376		aeadctx->mayverify = VERIFY_HW;
3377		break;
3378	case ICV_16:
3379		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3380		aeadctx->mayverify = VERIFY_HW;
3381		break;
3382	default:
 
 
3383		return -EINVAL;
3384	}
3385	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3386}
3387
3388static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3389				unsigned int authsize)
3390{
3391	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3392
3393	switch (authsize) {
3394	case ICV_4:
3395		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3396		aeadctx->mayverify = VERIFY_HW;
3397		break;
3398	case ICV_6:
3399		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3400		aeadctx->mayverify = VERIFY_HW;
3401		break;
3402	case ICV_8:
3403		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3404		aeadctx->mayverify = VERIFY_HW;
3405		break;
3406	case ICV_10:
3407		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3408		aeadctx->mayverify = VERIFY_HW;
3409		break;
3410	case ICV_12:
3411		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3412		aeadctx->mayverify = VERIFY_HW;
3413		break;
3414	case ICV_14:
3415		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3416		aeadctx->mayverify = VERIFY_HW;
3417		break;
3418	case ICV_16:
3419		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3420		aeadctx->mayverify = VERIFY_HW;
3421		break;
3422	default:
 
 
3423		return -EINVAL;
3424	}
3425	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3426}
3427
3428static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3429				const u8 *key,
3430				unsigned int keylen)
3431{
3432	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3433	unsigned char ck_size, mk_size;
3434	int key_ctx_size = 0;
3435
3436	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3437	if (keylen == AES_KEYSIZE_128) {
3438		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3439		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3440	} else if (keylen == AES_KEYSIZE_192) {
3441		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3442		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3443	} else if (keylen == AES_KEYSIZE_256) {
3444		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3445		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3446	} else {
 
 
3447		aeadctx->enckey_len = 0;
3448		return	-EINVAL;
3449	}
3450	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3451						key_ctx_size >> 4);
3452	memcpy(aeadctx->key, key, keylen);
3453	aeadctx->enckey_len = keylen;
3454
3455	return 0;
3456}
3457
3458static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3459				const u8 *key,
3460				unsigned int keylen)
3461{
3462	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3463	int error;
3464
3465	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3466	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3467			      CRYPTO_TFM_REQ_MASK);
3468	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
 
 
 
3469	if (error)
3470		return error;
3471	return chcr_ccm_common_setkey(aead, key, keylen);
3472}
3473
3474static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3475				    unsigned int keylen)
3476{
3477	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3478	int error;
3479
3480	if (keylen < 3) {
 
 
3481		aeadctx->enckey_len = 0;
3482		return	-EINVAL;
3483	}
3484	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3485	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3486			      CRYPTO_TFM_REQ_MASK);
3487	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
 
 
 
3488	if (error)
3489		return error;
3490	keylen -= 3;
3491	memcpy(aeadctx->salt, key + keylen, 3);
3492	return chcr_ccm_common_setkey(aead, key, keylen);
3493}
3494
3495static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3496			   unsigned int keylen)
3497{
3498	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3499	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3500	unsigned int ck_size;
3501	int ret = 0, key_ctx_size = 0;
3502	struct crypto_aes_ctx aes;
3503
3504	aeadctx->enckey_len = 0;
3505	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3506	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3507			      & CRYPTO_TFM_REQ_MASK);
3508	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
 
 
 
3509	if (ret)
3510		goto out;
3511
3512	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3513	    keylen > 3) {
3514		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3515		memcpy(aeadctx->salt, key + keylen, 4);
3516	}
3517	if (keylen == AES_KEYSIZE_128) {
3518		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3519	} else if (keylen == AES_KEYSIZE_192) {
3520		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3521	} else if (keylen == AES_KEYSIZE_256) {
3522		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3523	} else {
 
 
3524		pr_err("GCM: Invalid key length %d\n", keylen);
3525		ret = -EINVAL;
3526		goto out;
3527	}
3528
3529	memcpy(aeadctx->key, key, keylen);
3530	aeadctx->enckey_len = keylen;
3531	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3532		AEAD_H_SIZE;
3533	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3534						CHCR_KEYCTX_MAC_KEY_SIZE_128,
3535						0, 0,
3536						key_ctx_size >> 4);
3537	/* Calculate the H = CIPH(K, 0 repeated 16 times).
3538	 * It will go in key context
3539	 */
3540	ret = aes_expandkey(&aes, key, keylen);
3541	if (ret) {
3542		aeadctx->enckey_len = 0;
3543		goto out;
3544	}
3545	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3546	aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3547	memzero_explicit(&aes, sizeof(aes));
3548
3549out:
3550	return ret;
3551}
3552
3553static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3554				   unsigned int keylen)
3555{
3556	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3557	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3558	/* it contains auth and cipher key both*/
3559	struct crypto_authenc_keys keys;
3560	unsigned int bs, subtype;
3561	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3562	int err = 0, i, key_ctx_len = 0;
3563	unsigned char ck_size = 0;
3564	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3565	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3566	struct algo_param param;
3567	int align;
3568	u8 *o_ptr = NULL;
3569
3570	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3571	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3572			      & CRYPTO_TFM_REQ_MASK);
3573	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
 
 
 
3574	if (err)
3575		goto out;
3576
3577	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 
3578		goto out;
 
3579
3580	if (get_alg_config(&param, max_authsize)) {
3581		pr_err("Unsupported digest size\n");
3582		goto out;
3583	}
3584	subtype = get_aead_subtype(authenc);
3585	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3586		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3587		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3588			goto out;
3589		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3590		- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3591		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3592	}
3593	if (keys.enckeylen == AES_KEYSIZE_128) {
3594		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3595	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3596		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3597	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3598		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3599	} else {
3600		pr_err("Unsupported cipher key\n");
3601		goto out;
3602	}
3603
3604	/* Copy only encryption key. We use authkey to generate h(ipad) and
3605	 * h(opad) so authkey is not needed again. authkeylen size have the
3606	 * size of the hash digest size.
3607	 */
3608	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3609	aeadctx->enckey_len = keys.enckeylen;
3610	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3611		subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3612
3613		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3614			    aeadctx->enckey_len << 3);
3615	}
3616	base_hash  = chcr_alloc_shash(max_authsize);
3617	if (IS_ERR(base_hash)) {
3618		pr_err("Base driver cannot be loaded\n");
3619		goto out;
 
 
3620	}
3621	{
3622		SHASH_DESC_ON_STACK(shash, base_hash);
3623
3624		shash->tfm = base_hash;
3625		bs = crypto_shash_blocksize(base_hash);
3626		align = KEYCTX_ALIGN_PAD(max_authsize);
3627		o_ptr =  actx->h_iopad + param.result_size + align;
3628
3629		if (keys.authkeylen > bs) {
3630			err = crypto_shash_digest(shash, keys.authkey,
3631						  keys.authkeylen,
3632						  o_ptr);
3633			if (err) {
3634				pr_err("Base driver cannot be loaded\n");
3635				goto out;
3636			}
3637			keys.authkeylen = max_authsize;
3638		} else
3639			memcpy(o_ptr, keys.authkey, keys.authkeylen);
3640
3641		/* Compute the ipad-digest*/
3642		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3643		memcpy(pad, o_ptr, keys.authkeylen);
3644		for (i = 0; i < bs >> 2; i++)
3645			*((unsigned int *)pad + i) ^= IPAD_DATA;
3646
3647		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3648					      max_authsize))
3649			goto out;
3650		/* Compute the opad-digest */
3651		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3652		memcpy(pad, o_ptr, keys.authkeylen);
3653		for (i = 0; i < bs >> 2; i++)
3654			*((unsigned int *)pad + i) ^= OPAD_DATA;
3655
3656		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3657			goto out;
3658
3659		/* convert the ipad and opad digest to network order */
3660		chcr_change_order(actx->h_iopad, param.result_size);
3661		chcr_change_order(o_ptr, param.result_size);
3662		key_ctx_len = sizeof(struct _key_ctx) +
3663			roundup(keys.enckeylen, 16) +
3664			(param.result_size + align) * 2;
3665		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3666						0, 1, key_ctx_len >> 4);
3667		actx->auth_mode = param.auth_mode;
3668		chcr_free_shash(base_hash);
3669
3670		memzero_explicit(&keys, sizeof(keys));
3671		return 0;
3672	}
3673out:
3674	aeadctx->enckey_len = 0;
3675	memzero_explicit(&keys, sizeof(keys));
3676	if (!IS_ERR(base_hash))
3677		chcr_free_shash(base_hash);
3678	return -EINVAL;
3679}
3680
3681static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3682					const u8 *key, unsigned int keylen)
3683{
3684	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3685	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3686	struct crypto_authenc_keys keys;
3687	int err;
3688	/* it contains auth and cipher key both*/
3689	unsigned int subtype;
3690	int key_ctx_len = 0;
3691	unsigned char ck_size = 0;
3692
3693	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3694	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3695			      & CRYPTO_TFM_REQ_MASK);
3696	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
 
 
 
3697	if (err)
3698		goto out;
3699
3700	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 
3701		goto out;
3702
3703	subtype = get_aead_subtype(authenc);
3704	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3705	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3706		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3707			goto out;
3708		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3709			- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3710		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3711	}
3712	if (keys.enckeylen == AES_KEYSIZE_128) {
3713		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3714	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3715		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3716	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3717		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3718	} else {
3719		pr_err("Unsupported cipher key %d\n", keys.enckeylen);
3720		goto out;
3721	}
3722	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3723	aeadctx->enckey_len = keys.enckeylen;
3724	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3725	    subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3726		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3727				aeadctx->enckey_len << 3);
3728	}
3729	key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3730
3731	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3732						0, key_ctx_len >> 4);
3733	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3734	memzero_explicit(&keys, sizeof(keys));
3735	return 0;
3736out:
3737	aeadctx->enckey_len = 0;
3738	memzero_explicit(&keys, sizeof(keys));
3739	return -EINVAL;
3740}
3741
3742static int chcr_aead_op(struct aead_request *req,
3743			int size,
3744			create_wr_t create_wr_fn)
3745{
3746	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3747	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3748	struct chcr_context *ctx = a_ctx(tfm);
3749	struct uld_ctx *u_ctx = ULD_CTX(ctx);
3750	struct sk_buff *skb;
 
3751	struct chcr_dev *cdev;
3752
3753	cdev = a_ctx(tfm)->dev;
3754	if (!cdev) {
3755		pr_err("%s : No crypto device.\n", __func__);
3756		return -ENXIO;
3757	}
3758
3759	if (chcr_inc_wrcount(cdev)) {
3760	/* Detach state for CHCR means lldi or padap is freed.
3761	 * We cannot increment fallback here.
3762	 */
3763		return chcr_aead_fallback(req, reqctx->op);
3764	}
3765
 
3766	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3767					reqctx->txqidx) &&
3768		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
 
3769			chcr_dec_wrcount(cdev);
3770			return -ENOSPC;
3771	}
3772
3773	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3774	    crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3775		pr_err("RFC4106: Invalid value of assoclen %d\n",
3776		       req->assoclen);
3777		return -EINVAL;
3778	}
3779
3780	/* Form a WR from req */
3781	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3782
3783	if (IS_ERR_OR_NULL(skb)) {
3784		chcr_dec_wrcount(cdev);
3785		return PTR_ERR_OR_ZERO(skb);
3786	}
3787
3788	skb->dev = u_ctx->lldi.ports[0];
3789	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3790	chcr_send_wr(skb);
3791	return -EINPROGRESS;
3792}
3793
3794static int chcr_aead_encrypt(struct aead_request *req)
3795{
3796	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3797	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3798	struct chcr_context *ctx = a_ctx(tfm);
3799	unsigned int cpu;
3800
3801	cpu = get_cpu();
3802	reqctx->txqidx = cpu % ctx->ntxq;
3803	reqctx->rxqidx = cpu % ctx->nrxq;
3804	put_cpu();
3805
3806	reqctx->verify = VERIFY_HW;
3807	reqctx->op = CHCR_ENCRYPT_OP;
3808
3809	switch (get_aead_subtype(tfm)) {
3810	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3811	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3812	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3813	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3814		return chcr_aead_op(req, 0, create_authenc_wr);
3815	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3816	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3817		return chcr_aead_op(req, 0, create_aead_ccm_wr);
3818	default:
3819		return chcr_aead_op(req, 0, create_gcm_wr);
3820	}
3821}
3822
3823static int chcr_aead_decrypt(struct aead_request *req)
3824{
3825	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3826	struct chcr_context *ctx = a_ctx(tfm);
3827	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3828	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3829	int size;
3830	unsigned int cpu;
3831
3832	cpu = get_cpu();
3833	reqctx->txqidx = cpu % ctx->ntxq;
3834	reqctx->rxqidx = cpu % ctx->nrxq;
3835	put_cpu();
3836
3837	if (aeadctx->mayverify == VERIFY_SW) {
3838		size = crypto_aead_maxauthsize(tfm);
3839		reqctx->verify = VERIFY_SW;
3840	} else {
3841		size = 0;
3842		reqctx->verify = VERIFY_HW;
3843	}
3844	reqctx->op = CHCR_DECRYPT_OP;
3845	switch (get_aead_subtype(tfm)) {
3846	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3847	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3848	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3849	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3850		return chcr_aead_op(req, size, create_authenc_wr);
3851	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3852	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3853		return chcr_aead_op(req, size, create_aead_ccm_wr);
3854	default:
3855		return chcr_aead_op(req, size, create_gcm_wr);
3856	}
3857}
3858
3859static struct chcr_alg_template driver_algs[] = {
3860	/* AES-CBC */
3861	{
3862		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3863		.is_registered = 0,
3864		.alg.skcipher = {
3865			.base.cra_name		= "cbc(aes)",
3866			.base.cra_driver_name	= "cbc-aes-chcr",
3867			.base.cra_blocksize	= AES_BLOCK_SIZE,
3868
3869			.init			= chcr_init_tfm,
3870			.exit			= chcr_exit_tfm,
3871			.min_keysize		= AES_MIN_KEY_SIZE,
3872			.max_keysize		= AES_MAX_KEY_SIZE,
3873			.ivsize			= AES_BLOCK_SIZE,
3874			.setkey			= chcr_aes_cbc_setkey,
3875			.encrypt		= chcr_aes_encrypt,
3876			.decrypt		= chcr_aes_decrypt,
3877			}
 
3878	},
3879	{
3880		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3881		.is_registered = 0,
3882		.alg.skcipher = {
3883			.base.cra_name		= "xts(aes)",
3884			.base.cra_driver_name	= "xts-aes-chcr",
3885			.base.cra_blocksize	= AES_BLOCK_SIZE,
3886
3887			.init			= chcr_init_tfm,
3888			.exit			= chcr_exit_tfm,
3889			.min_keysize		= 2 * AES_MIN_KEY_SIZE,
3890			.max_keysize		= 2 * AES_MAX_KEY_SIZE,
3891			.ivsize			= AES_BLOCK_SIZE,
3892			.setkey			= chcr_aes_xts_setkey,
3893			.encrypt		= chcr_aes_encrypt,
3894			.decrypt		= chcr_aes_decrypt,
 
3895			}
3896	},
3897	{
3898		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3899		.is_registered = 0,
3900		.alg.skcipher = {
3901			.base.cra_name		= "ctr(aes)",
3902			.base.cra_driver_name	= "ctr-aes-chcr",
3903			.base.cra_blocksize	= 1,
3904
3905			.init			= chcr_init_tfm,
3906			.exit			= chcr_exit_tfm,
3907			.min_keysize		= AES_MIN_KEY_SIZE,
3908			.max_keysize		= AES_MAX_KEY_SIZE,
3909			.ivsize			= AES_BLOCK_SIZE,
3910			.setkey			= chcr_aes_ctr_setkey,
3911			.encrypt		= chcr_aes_encrypt,
3912			.decrypt		= chcr_aes_decrypt,
 
3913		}
3914	},
3915	{
3916		.type = CRYPTO_ALG_TYPE_SKCIPHER |
3917			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3918		.is_registered = 0,
3919		.alg.skcipher = {
3920			.base.cra_name		= "rfc3686(ctr(aes))",
3921			.base.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3922			.base.cra_blocksize	= 1,
3923
3924			.init			= chcr_rfc3686_init,
3925			.exit			= chcr_exit_tfm,
3926			.min_keysize		= AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3927			.max_keysize		= AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3928			.ivsize			= CTR_RFC3686_IV_SIZE,
3929			.setkey			= chcr_aes_rfc3686_setkey,
3930			.encrypt		= chcr_aes_encrypt,
3931			.decrypt		= chcr_aes_decrypt,
 
 
 
3932		}
3933	},
3934	/* SHA */
3935	{
3936		.type = CRYPTO_ALG_TYPE_AHASH,
3937		.is_registered = 0,
3938		.alg.hash = {
3939			.halg.digestsize = SHA1_DIGEST_SIZE,
3940			.halg.base = {
3941				.cra_name = "sha1",
3942				.cra_driver_name = "sha1-chcr",
3943				.cra_blocksize = SHA1_BLOCK_SIZE,
3944			}
3945		}
3946	},
3947	{
3948		.type = CRYPTO_ALG_TYPE_AHASH,
3949		.is_registered = 0,
3950		.alg.hash = {
3951			.halg.digestsize = SHA256_DIGEST_SIZE,
3952			.halg.base = {
3953				.cra_name = "sha256",
3954				.cra_driver_name = "sha256-chcr",
3955				.cra_blocksize = SHA256_BLOCK_SIZE,
3956			}
3957		}
3958	},
3959	{
3960		.type = CRYPTO_ALG_TYPE_AHASH,
3961		.is_registered = 0,
3962		.alg.hash = {
3963			.halg.digestsize = SHA224_DIGEST_SIZE,
3964			.halg.base = {
3965				.cra_name = "sha224",
3966				.cra_driver_name = "sha224-chcr",
3967				.cra_blocksize = SHA224_BLOCK_SIZE,
3968			}
3969		}
3970	},
3971	{
3972		.type = CRYPTO_ALG_TYPE_AHASH,
3973		.is_registered = 0,
3974		.alg.hash = {
3975			.halg.digestsize = SHA384_DIGEST_SIZE,
3976			.halg.base = {
3977				.cra_name = "sha384",
3978				.cra_driver_name = "sha384-chcr",
3979				.cra_blocksize = SHA384_BLOCK_SIZE,
3980			}
3981		}
3982	},
3983	{
3984		.type = CRYPTO_ALG_TYPE_AHASH,
3985		.is_registered = 0,
3986		.alg.hash = {
3987			.halg.digestsize = SHA512_DIGEST_SIZE,
3988			.halg.base = {
3989				.cra_name = "sha512",
3990				.cra_driver_name = "sha512-chcr",
3991				.cra_blocksize = SHA512_BLOCK_SIZE,
3992			}
3993		}
3994	},
3995	/* HMAC */
3996	{
3997		.type = CRYPTO_ALG_TYPE_HMAC,
3998		.is_registered = 0,
3999		.alg.hash = {
4000			.halg.digestsize = SHA1_DIGEST_SIZE,
4001			.halg.base = {
4002				.cra_name = "hmac(sha1)",
4003				.cra_driver_name = "hmac-sha1-chcr",
4004				.cra_blocksize = SHA1_BLOCK_SIZE,
4005			}
4006		}
4007	},
4008	{
4009		.type = CRYPTO_ALG_TYPE_HMAC,
4010		.is_registered = 0,
4011		.alg.hash = {
4012			.halg.digestsize = SHA224_DIGEST_SIZE,
4013			.halg.base = {
4014				.cra_name = "hmac(sha224)",
4015				.cra_driver_name = "hmac-sha224-chcr",
4016				.cra_blocksize = SHA224_BLOCK_SIZE,
4017			}
4018		}
4019	},
4020	{
4021		.type = CRYPTO_ALG_TYPE_HMAC,
4022		.is_registered = 0,
4023		.alg.hash = {
4024			.halg.digestsize = SHA256_DIGEST_SIZE,
4025			.halg.base = {
4026				.cra_name = "hmac(sha256)",
4027				.cra_driver_name = "hmac-sha256-chcr",
4028				.cra_blocksize = SHA256_BLOCK_SIZE,
4029			}
4030		}
4031	},
4032	{
4033		.type = CRYPTO_ALG_TYPE_HMAC,
4034		.is_registered = 0,
4035		.alg.hash = {
4036			.halg.digestsize = SHA384_DIGEST_SIZE,
4037			.halg.base = {
4038				.cra_name = "hmac(sha384)",
4039				.cra_driver_name = "hmac-sha384-chcr",
4040				.cra_blocksize = SHA384_BLOCK_SIZE,
4041			}
4042		}
4043	},
4044	{
4045		.type = CRYPTO_ALG_TYPE_HMAC,
4046		.is_registered = 0,
4047		.alg.hash = {
4048			.halg.digestsize = SHA512_DIGEST_SIZE,
4049			.halg.base = {
4050				.cra_name = "hmac(sha512)",
4051				.cra_driver_name = "hmac-sha512-chcr",
4052				.cra_blocksize = SHA512_BLOCK_SIZE,
4053			}
4054		}
4055	},
4056	/* Add AEAD Algorithms */
4057	{
4058		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
4059		.is_registered = 0,
4060		.alg.aead = {
4061			.base = {
4062				.cra_name = "gcm(aes)",
4063				.cra_driver_name = "gcm-aes-chcr",
4064				.cra_blocksize	= 1,
4065				.cra_priority = CHCR_AEAD_PRIORITY,
4066				.cra_ctxsize =	sizeof(struct chcr_context) +
4067						sizeof(struct chcr_aead_ctx) +
4068						sizeof(struct chcr_gcm_ctx),
4069			},
4070			.ivsize = GCM_AES_IV_SIZE,
4071			.maxauthsize = GHASH_DIGEST_SIZE,
4072			.setkey = chcr_gcm_setkey,
4073			.setauthsize = chcr_gcm_setauthsize,
4074		}
4075	},
4076	{
4077		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4078		.is_registered = 0,
4079		.alg.aead = {
4080			.base = {
4081				.cra_name = "rfc4106(gcm(aes))",
4082				.cra_driver_name = "rfc4106-gcm-aes-chcr",
4083				.cra_blocksize	 = 1,
4084				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4085				.cra_ctxsize =	sizeof(struct chcr_context) +
4086						sizeof(struct chcr_aead_ctx) +
4087						sizeof(struct chcr_gcm_ctx),
4088
4089			},
4090			.ivsize = GCM_RFC4106_IV_SIZE,
4091			.maxauthsize	= GHASH_DIGEST_SIZE,
4092			.setkey = chcr_gcm_setkey,
4093			.setauthsize	= chcr_4106_4309_setauthsize,
4094		}
4095	},
4096	{
4097		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4098		.is_registered = 0,
4099		.alg.aead = {
4100			.base = {
4101				.cra_name = "ccm(aes)",
4102				.cra_driver_name = "ccm-aes-chcr",
4103				.cra_blocksize	 = 1,
4104				.cra_priority = CHCR_AEAD_PRIORITY,
4105				.cra_ctxsize =	sizeof(struct chcr_context) +
4106						sizeof(struct chcr_aead_ctx),
4107
4108			},
4109			.ivsize = AES_BLOCK_SIZE,
4110			.maxauthsize	= GHASH_DIGEST_SIZE,
4111			.setkey = chcr_aead_ccm_setkey,
4112			.setauthsize	= chcr_ccm_setauthsize,
4113		}
4114	},
4115	{
4116		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4117		.is_registered = 0,
4118		.alg.aead = {
4119			.base = {
4120				.cra_name = "rfc4309(ccm(aes))",
4121				.cra_driver_name = "rfc4309-ccm-aes-chcr",
4122				.cra_blocksize	 = 1,
4123				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4124				.cra_ctxsize =	sizeof(struct chcr_context) +
4125						sizeof(struct chcr_aead_ctx),
4126
4127			},
4128			.ivsize = 8,
4129			.maxauthsize	= GHASH_DIGEST_SIZE,
4130			.setkey = chcr_aead_rfc4309_setkey,
4131			.setauthsize = chcr_4106_4309_setauthsize,
4132		}
4133	},
4134	{
4135		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4136		.is_registered = 0,
4137		.alg.aead = {
4138			.base = {
4139				.cra_name = "authenc(hmac(sha1),cbc(aes))",
4140				.cra_driver_name =
4141					"authenc-hmac-sha1-cbc-aes-chcr",
4142				.cra_blocksize	 = AES_BLOCK_SIZE,
4143				.cra_priority = CHCR_AEAD_PRIORITY,
4144				.cra_ctxsize =	sizeof(struct chcr_context) +
4145						sizeof(struct chcr_aead_ctx) +
4146						sizeof(struct chcr_authenc_ctx),
4147
4148			},
4149			.ivsize = AES_BLOCK_SIZE,
4150			.maxauthsize = SHA1_DIGEST_SIZE,
4151			.setkey = chcr_authenc_setkey,
4152			.setauthsize = chcr_authenc_setauthsize,
4153		}
4154	},
4155	{
4156		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4157		.is_registered = 0,
4158		.alg.aead = {
4159			.base = {
4160
4161				.cra_name = "authenc(hmac(sha256),cbc(aes))",
4162				.cra_driver_name =
4163					"authenc-hmac-sha256-cbc-aes-chcr",
4164				.cra_blocksize	 = AES_BLOCK_SIZE,
4165				.cra_priority = CHCR_AEAD_PRIORITY,
4166				.cra_ctxsize =	sizeof(struct chcr_context) +
4167						sizeof(struct chcr_aead_ctx) +
4168						sizeof(struct chcr_authenc_ctx),
4169
4170			},
4171			.ivsize = AES_BLOCK_SIZE,
4172			.maxauthsize	= SHA256_DIGEST_SIZE,
4173			.setkey = chcr_authenc_setkey,
4174			.setauthsize = chcr_authenc_setauthsize,
4175		}
4176	},
4177	{
4178		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4179		.is_registered = 0,
4180		.alg.aead = {
4181			.base = {
4182				.cra_name = "authenc(hmac(sha224),cbc(aes))",
4183				.cra_driver_name =
4184					"authenc-hmac-sha224-cbc-aes-chcr",
4185				.cra_blocksize	 = AES_BLOCK_SIZE,
4186				.cra_priority = CHCR_AEAD_PRIORITY,
4187				.cra_ctxsize =	sizeof(struct chcr_context) +
4188						sizeof(struct chcr_aead_ctx) +
4189						sizeof(struct chcr_authenc_ctx),
4190			},
4191			.ivsize = AES_BLOCK_SIZE,
4192			.maxauthsize = SHA224_DIGEST_SIZE,
4193			.setkey = chcr_authenc_setkey,
4194			.setauthsize = chcr_authenc_setauthsize,
4195		}
4196	},
4197	{
4198		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4199		.is_registered = 0,
4200		.alg.aead = {
4201			.base = {
4202				.cra_name = "authenc(hmac(sha384),cbc(aes))",
4203				.cra_driver_name =
4204					"authenc-hmac-sha384-cbc-aes-chcr",
4205				.cra_blocksize	 = AES_BLOCK_SIZE,
4206				.cra_priority = CHCR_AEAD_PRIORITY,
4207				.cra_ctxsize =	sizeof(struct chcr_context) +
4208						sizeof(struct chcr_aead_ctx) +
4209						sizeof(struct chcr_authenc_ctx),
4210
4211			},
4212			.ivsize = AES_BLOCK_SIZE,
4213			.maxauthsize = SHA384_DIGEST_SIZE,
4214			.setkey = chcr_authenc_setkey,
4215			.setauthsize = chcr_authenc_setauthsize,
4216		}
4217	},
4218	{
4219		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4220		.is_registered = 0,
4221		.alg.aead = {
4222			.base = {
4223				.cra_name = "authenc(hmac(sha512),cbc(aes))",
4224				.cra_driver_name =
4225					"authenc-hmac-sha512-cbc-aes-chcr",
4226				.cra_blocksize	 = AES_BLOCK_SIZE,
4227				.cra_priority = CHCR_AEAD_PRIORITY,
4228				.cra_ctxsize =	sizeof(struct chcr_context) +
4229						sizeof(struct chcr_aead_ctx) +
4230						sizeof(struct chcr_authenc_ctx),
4231
4232			},
4233			.ivsize = AES_BLOCK_SIZE,
4234			.maxauthsize = SHA512_DIGEST_SIZE,
4235			.setkey = chcr_authenc_setkey,
4236			.setauthsize = chcr_authenc_setauthsize,
4237		}
4238	},
4239	{
4240		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4241		.is_registered = 0,
4242		.alg.aead = {
4243			.base = {
4244				.cra_name = "authenc(digest_null,cbc(aes))",
4245				.cra_driver_name =
4246					"authenc-digest_null-cbc-aes-chcr",
4247				.cra_blocksize	 = AES_BLOCK_SIZE,
4248				.cra_priority = CHCR_AEAD_PRIORITY,
4249				.cra_ctxsize =	sizeof(struct chcr_context) +
4250						sizeof(struct chcr_aead_ctx) +
4251						sizeof(struct chcr_authenc_ctx),
4252
4253			},
4254			.ivsize  = AES_BLOCK_SIZE,
4255			.maxauthsize = 0,
4256			.setkey  = chcr_aead_digest_null_setkey,
4257			.setauthsize = chcr_authenc_null_setauthsize,
4258		}
4259	},
4260	{
4261		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4262		.is_registered = 0,
4263		.alg.aead = {
4264			.base = {
4265				.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4266				.cra_driver_name =
4267				"authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4268				.cra_blocksize	 = 1,
4269				.cra_priority = CHCR_AEAD_PRIORITY,
4270				.cra_ctxsize =	sizeof(struct chcr_context) +
4271						sizeof(struct chcr_aead_ctx) +
4272						sizeof(struct chcr_authenc_ctx),
4273
4274			},
4275			.ivsize = CTR_RFC3686_IV_SIZE,
4276			.maxauthsize = SHA1_DIGEST_SIZE,
4277			.setkey = chcr_authenc_setkey,
4278			.setauthsize = chcr_authenc_setauthsize,
4279		}
4280	},
4281	{
4282		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4283		.is_registered = 0,
4284		.alg.aead = {
4285			.base = {
4286
4287				.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4288				.cra_driver_name =
4289				"authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4290				.cra_blocksize	 = 1,
4291				.cra_priority = CHCR_AEAD_PRIORITY,
4292				.cra_ctxsize =	sizeof(struct chcr_context) +
4293						sizeof(struct chcr_aead_ctx) +
4294						sizeof(struct chcr_authenc_ctx),
4295
4296			},
4297			.ivsize = CTR_RFC3686_IV_SIZE,
4298			.maxauthsize	= SHA256_DIGEST_SIZE,
4299			.setkey = chcr_authenc_setkey,
4300			.setauthsize = chcr_authenc_setauthsize,
4301		}
4302	},
4303	{
4304		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4305		.is_registered = 0,
4306		.alg.aead = {
4307			.base = {
4308				.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4309				.cra_driver_name =
4310				"authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4311				.cra_blocksize	 = 1,
4312				.cra_priority = CHCR_AEAD_PRIORITY,
4313				.cra_ctxsize =	sizeof(struct chcr_context) +
4314						sizeof(struct chcr_aead_ctx) +
4315						sizeof(struct chcr_authenc_ctx),
4316			},
4317			.ivsize = CTR_RFC3686_IV_SIZE,
4318			.maxauthsize = SHA224_DIGEST_SIZE,
4319			.setkey = chcr_authenc_setkey,
4320			.setauthsize = chcr_authenc_setauthsize,
4321		}
4322	},
4323	{
4324		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4325		.is_registered = 0,
4326		.alg.aead = {
4327			.base = {
4328				.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4329				.cra_driver_name =
4330				"authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4331				.cra_blocksize	 = 1,
4332				.cra_priority = CHCR_AEAD_PRIORITY,
4333				.cra_ctxsize =	sizeof(struct chcr_context) +
4334						sizeof(struct chcr_aead_ctx) +
4335						sizeof(struct chcr_authenc_ctx),
4336
4337			},
4338			.ivsize = CTR_RFC3686_IV_SIZE,
4339			.maxauthsize = SHA384_DIGEST_SIZE,
4340			.setkey = chcr_authenc_setkey,
4341			.setauthsize = chcr_authenc_setauthsize,
4342		}
4343	},
4344	{
4345		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4346		.is_registered = 0,
4347		.alg.aead = {
4348			.base = {
4349				.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4350				.cra_driver_name =
4351				"authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4352				.cra_blocksize	 = 1,
4353				.cra_priority = CHCR_AEAD_PRIORITY,
4354				.cra_ctxsize =	sizeof(struct chcr_context) +
4355						sizeof(struct chcr_aead_ctx) +
4356						sizeof(struct chcr_authenc_ctx),
4357
4358			},
4359			.ivsize = CTR_RFC3686_IV_SIZE,
4360			.maxauthsize = SHA512_DIGEST_SIZE,
4361			.setkey = chcr_authenc_setkey,
4362			.setauthsize = chcr_authenc_setauthsize,
4363		}
4364	},
4365	{
4366		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4367		.is_registered = 0,
4368		.alg.aead = {
4369			.base = {
4370				.cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4371				.cra_driver_name =
4372				"authenc-digest_null-rfc3686-ctr-aes-chcr",
4373				.cra_blocksize	 = 1,
4374				.cra_priority = CHCR_AEAD_PRIORITY,
4375				.cra_ctxsize =	sizeof(struct chcr_context) +
4376						sizeof(struct chcr_aead_ctx) +
4377						sizeof(struct chcr_authenc_ctx),
4378
4379			},
4380			.ivsize  = CTR_RFC3686_IV_SIZE,
4381			.maxauthsize = 0,
4382			.setkey  = chcr_aead_digest_null_setkey,
4383			.setauthsize = chcr_authenc_null_setauthsize,
4384		}
4385	},
4386};
4387
4388/*
4389 *	chcr_unregister_alg - Deregister crypto algorithms with
4390 *	kernel framework.
4391 */
4392static int chcr_unregister_alg(void)
4393{
4394	int i;
4395
4396	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4397		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4398		case CRYPTO_ALG_TYPE_SKCIPHER:
4399			if (driver_algs[i].is_registered && refcount_read(
4400			    &driver_algs[i].alg.skcipher.base.cra_refcnt)
4401			    == 1) {
4402				crypto_unregister_skcipher(
4403						&driver_algs[i].alg.skcipher);
4404				driver_algs[i].is_registered = 0;
4405			}
4406			break;
4407		case CRYPTO_ALG_TYPE_AEAD:
4408			if (driver_algs[i].is_registered && refcount_read(
4409			    &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
4410				crypto_unregister_aead(
4411						&driver_algs[i].alg.aead);
4412				driver_algs[i].is_registered = 0;
4413			}
4414			break;
4415		case CRYPTO_ALG_TYPE_AHASH:
4416			if (driver_algs[i].is_registered && refcount_read(
4417			    &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4418			    == 1) {
4419				crypto_unregister_ahash(
4420						&driver_algs[i].alg.hash);
4421				driver_algs[i].is_registered = 0;
4422			}
4423			break;
4424		}
 
4425	}
4426	return 0;
4427}
4428
4429#define SZ_AHASH_CTX sizeof(struct chcr_context)
4430#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4431#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4432
4433/*
4434 *	chcr_register_alg - Register crypto algorithms with kernel framework.
4435 */
4436static int chcr_register_alg(void)
4437{
4438	struct crypto_alg ai;
4439	struct ahash_alg *a_hash;
4440	int err = 0, i;
4441	char *name = NULL;
4442
4443	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4444		if (driver_algs[i].is_registered)
4445			continue;
4446		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4447		case CRYPTO_ALG_TYPE_SKCIPHER:
4448			driver_algs[i].alg.skcipher.base.cra_priority =
4449				CHCR_CRA_PRIORITY;
4450			driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4451			driver_algs[i].alg.skcipher.base.cra_flags =
4452				CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4453				CRYPTO_ALG_ALLOCATES_MEMORY |
4454				CRYPTO_ALG_NEED_FALLBACK;
4455			driver_algs[i].alg.skcipher.base.cra_ctxsize =
4456				sizeof(struct chcr_context) +
4457				sizeof(struct ablk_ctx);
4458			driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4459
4460			err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4461			name = driver_algs[i].alg.skcipher.base.cra_driver_name;
 
4462			break;
4463		case CRYPTO_ALG_TYPE_AEAD:
4464			driver_algs[i].alg.aead.base.cra_flags =
4465				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4466				CRYPTO_ALG_ALLOCATES_MEMORY;
4467			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4468			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4469			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4470			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4471			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4472			err = crypto_register_aead(&driver_algs[i].alg.aead);
4473			name = driver_algs[i].alg.aead.base.cra_driver_name;
4474			break;
4475		case CRYPTO_ALG_TYPE_AHASH:
4476			a_hash = &driver_algs[i].alg.hash;
4477			a_hash->update = chcr_ahash_update;
4478			a_hash->final = chcr_ahash_final;
4479			a_hash->finup = chcr_ahash_finup;
4480			a_hash->digest = chcr_ahash_digest;
4481			a_hash->export = chcr_ahash_export;
4482			a_hash->import = chcr_ahash_import;
4483			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4484			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4485			a_hash->halg.base.cra_module = THIS_MODULE;
4486			a_hash->halg.base.cra_flags =
4487				CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4488			a_hash->halg.base.cra_alignmask = 0;
4489			a_hash->halg.base.cra_exit = NULL;
4490
4491			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4492				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4493				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4494				a_hash->init = chcr_hmac_init;
4495				a_hash->setkey = chcr_ahash_setkey;
4496				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4497			} else {
4498				a_hash->init = chcr_sha_init;
4499				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4500				a_hash->halg.base.cra_init = chcr_sha_cra_init;
4501			}
4502			err = crypto_register_ahash(&driver_algs[i].alg.hash);
4503			ai = driver_algs[i].alg.hash.halg.base;
4504			name = ai.cra_driver_name;
4505			break;
4506		}
4507		if (err) {
4508			pr_err("%s : Algorithm registration failed\n", name);
 
4509			goto register_err;
4510		} else {
4511			driver_algs[i].is_registered = 1;
4512		}
4513	}
4514	return 0;
4515
4516register_err:
4517	chcr_unregister_alg();
4518	return err;
4519}
4520
4521/*
4522 *	start_crypto - Register the crypto algorithms.
4523 *	This should called once when the first device comesup. After this
4524 *	kernel will start calling driver APIs for crypto operations.
4525 */
4526int start_crypto(void)
4527{
4528	return chcr_register_alg();
4529}
4530
4531/*
4532 *	stop_crypto - Deregister all the crypto algorithms with kernel.
4533 *	This should be called once when the last device goes down. After this
4534 *	kernel will not call the driver API for crypto operations.
4535 */
4536int stop_crypto(void)
4537{
4538	chcr_unregister_alg();
4539	return 0;
4540}
v5.4
   1/*
   2 * This file is part of the Chelsio T6 Crypto driver for Linux.
   3 *
   4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 *
  34 * Written and Maintained by:
  35 *	Manoj Malviya (manojmalviya@chelsio.com)
  36 *	Atul Gupta (atul.gupta@chelsio.com)
  37 *	Jitendra Lulla (jlulla@chelsio.com)
  38 *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  39 *	Harsh Jain (harsh@chelsio.com)
  40 */
  41
  42#define pr_fmt(fmt) "chcr:" fmt
  43
  44#include <linux/kernel.h>
  45#include <linux/module.h>
  46#include <linux/crypto.h>
  47#include <linux/cryptohash.h>
  48#include <linux/skbuff.h>
  49#include <linux/rtnetlink.h>
  50#include <linux/highmem.h>
  51#include <linux/scatterlist.h>
  52
  53#include <crypto/aes.h>
  54#include <crypto/algapi.h>
  55#include <crypto/hash.h>
  56#include <crypto/gcm.h>
  57#include <crypto/sha.h>
 
  58#include <crypto/authenc.h>
  59#include <crypto/ctr.h>
  60#include <crypto/gf128mul.h>
  61#include <crypto/internal/aead.h>
  62#include <crypto/null.h>
  63#include <crypto/internal/skcipher.h>
  64#include <crypto/aead.h>
  65#include <crypto/scatterwalk.h>
  66#include <crypto/internal/hash.h>
  67
  68#include "t4fw_api.h"
  69#include "t4_msg.h"
  70#include "chcr_core.h"
  71#include "chcr_algo.h"
  72#include "chcr_crypto.h"
  73
  74#define IV AES_BLOCK_SIZE
  75
  76static unsigned int sgl_ent_len[] = {
  77	0, 0, 16, 24, 40, 48, 64, 72, 88,
  78	96, 112, 120, 136, 144, 160, 168, 184,
  79	192, 208, 216, 232, 240, 256, 264, 280,
  80	288, 304, 312, 328, 336, 352, 360, 376
  81};
  82
  83static unsigned int dsgl_ent_len[] = {
  84	0, 32, 32, 48, 48, 64, 64, 80, 80,
  85	112, 112, 128, 128, 144, 144, 160, 160,
  86	192, 192, 208, 208, 224, 224, 240, 240,
  87	272, 272, 288, 288, 304, 304, 320, 320
  88};
  89
  90static u32 round_constant[11] = {
  91	0x01000000, 0x02000000, 0x04000000, 0x08000000,
  92	0x10000000, 0x20000000, 0x40000000, 0x80000000,
  93	0x1B000000, 0x36000000, 0x6C000000
  94};
  95
  96static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
  97				   unsigned char *input, int err);
  98
  99static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
 100{
 101	return ctx->crypto_ctx->aeadctx;
 102}
 103
 104static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
 105{
 106	return ctx->crypto_ctx->ablkctx;
 107}
 108
 109static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
 110{
 111	return ctx->crypto_ctx->hmacctx;
 112}
 113
 114static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
 115{
 116	return gctx->ctx->gcm;
 117}
 118
 119static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
 120{
 121	return gctx->ctx->authenc;
 122}
 123
 124static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
 125{
 126	return container_of(ctx->dev, struct uld_ctx, dev);
 127}
 128
 129static inline int is_ofld_imm(const struct sk_buff *skb)
 130{
 131	return (skb->len <= SGE_MAX_WR_LEN);
 132}
 133
 134static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
 135{
 136	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
 137}
 138
 139static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
 140			 unsigned int entlen,
 141			 unsigned int skip)
 142{
 143	int nents = 0;
 144	unsigned int less;
 145	unsigned int skip_len = 0;
 146
 147	while (sg && skip) {
 148		if (sg_dma_len(sg) <= skip) {
 149			skip -= sg_dma_len(sg);
 150			skip_len = 0;
 151			sg = sg_next(sg);
 152		} else {
 153			skip_len = skip;
 154			skip = 0;
 155		}
 156	}
 157
 158	while (sg && reqlen) {
 159		less = min(reqlen, sg_dma_len(sg) - skip_len);
 160		nents += DIV_ROUND_UP(less, entlen);
 161		reqlen -= less;
 162		skip_len = 0;
 163		sg = sg_next(sg);
 164	}
 165	return nents;
 166}
 167
 168static inline int get_aead_subtype(struct crypto_aead *aead)
 169{
 170	struct aead_alg *alg = crypto_aead_alg(aead);
 171	struct chcr_alg_template *chcr_crypto_alg =
 172		container_of(alg, struct chcr_alg_template, alg.aead);
 173	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 174}
 175
 176void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
 177{
 178	u8 temp[SHA512_DIGEST_SIZE];
 179	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 180	int authsize = crypto_aead_authsize(tfm);
 181	struct cpl_fw6_pld *fw6_pld;
 182	int cmp = 0;
 183
 184	fw6_pld = (struct cpl_fw6_pld *)input;
 185	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
 186	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
 187		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
 188	} else {
 189
 190		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
 191				authsize, req->assoclen +
 192				req->cryptlen - authsize);
 193		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
 194	}
 195	if (cmp)
 196		*err = -EBADMSG;
 197	else
 198		*err = 0;
 199}
 200
 201static int chcr_inc_wrcount(struct chcr_dev *dev)
 202{
 203	if (dev->state == CHCR_DETACH)
 204		return 1;
 205	atomic_inc(&dev->inflight);
 206	return 0;
 207}
 208
 209static inline void chcr_dec_wrcount(struct chcr_dev *dev)
 210{
 211	atomic_dec(&dev->inflight);
 212}
 213
 214static inline int chcr_handle_aead_resp(struct aead_request *req,
 215					 unsigned char *input,
 216					 int err)
 217{
 218	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
 219	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 220	struct chcr_dev *dev = a_ctx(tfm)->dev;
 221
 222	chcr_aead_common_exit(req);
 223	if (reqctx->verify == VERIFY_SW) {
 224		chcr_verify_tag(req, input, &err);
 225		reqctx->verify = VERIFY_HW;
 226	}
 227	chcr_dec_wrcount(dev);
 228	req->base.complete(&req->base, err);
 229
 230	return err;
 231}
 232
 233static void get_aes_decrypt_key(unsigned char *dec_key,
 234				       const unsigned char *key,
 235				       unsigned int keylength)
 236{
 237	u32 temp;
 238	u32 w_ring[MAX_NK];
 239	int i, j, k;
 240	u8  nr, nk;
 241
 242	switch (keylength) {
 243	case AES_KEYLENGTH_128BIT:
 244		nk = KEYLENGTH_4BYTES;
 245		nr = NUMBER_OF_ROUNDS_10;
 246		break;
 247	case AES_KEYLENGTH_192BIT:
 248		nk = KEYLENGTH_6BYTES;
 249		nr = NUMBER_OF_ROUNDS_12;
 250		break;
 251	case AES_KEYLENGTH_256BIT:
 252		nk = KEYLENGTH_8BYTES;
 253		nr = NUMBER_OF_ROUNDS_14;
 254		break;
 255	default:
 256		return;
 257	}
 258	for (i = 0; i < nk; i++)
 259		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
 260
 261	i = 0;
 262	temp = w_ring[nk - 1];
 263	while (i + nk < (nr + 1) * 4) {
 264		if (!(i % nk)) {
 265			/* RotWord(temp) */
 266			temp = (temp << 8) | (temp >> 24);
 267			temp = aes_ks_subword(temp);
 268			temp ^= round_constant[i / nk];
 269		} else if (nk == 8 && (i % 4 == 0)) {
 270			temp = aes_ks_subword(temp);
 271		}
 272		w_ring[i % nk] ^= temp;
 273		temp = w_ring[i % nk];
 274		i++;
 275	}
 276	i--;
 277	for (k = 0, j = i % nk; k < nk; k++) {
 278		*((u32 *)dec_key + k) = htonl(w_ring[j]);
 279		j--;
 280		if (j < 0)
 281			j += nk;
 282	}
 283}
 284
 285static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
 286{
 287	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
 288
 289	switch (ds) {
 290	case SHA1_DIGEST_SIZE:
 291		base_hash = crypto_alloc_shash("sha1", 0, 0);
 292		break;
 293	case SHA224_DIGEST_SIZE:
 294		base_hash = crypto_alloc_shash("sha224", 0, 0);
 295		break;
 296	case SHA256_DIGEST_SIZE:
 297		base_hash = crypto_alloc_shash("sha256", 0, 0);
 298		break;
 299	case SHA384_DIGEST_SIZE:
 300		base_hash = crypto_alloc_shash("sha384", 0, 0);
 301		break;
 302	case SHA512_DIGEST_SIZE:
 303		base_hash = crypto_alloc_shash("sha512", 0, 0);
 304		break;
 305	}
 306
 307	return base_hash;
 308}
 309
 310static int chcr_compute_partial_hash(struct shash_desc *desc,
 311				     char *iopad, char *result_hash,
 312				     int digest_size)
 313{
 314	struct sha1_state sha1_st;
 315	struct sha256_state sha256_st;
 316	struct sha512_state sha512_st;
 317	int error;
 318
 319	if (digest_size == SHA1_DIGEST_SIZE) {
 320		error = crypto_shash_init(desc) ?:
 321			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
 322			crypto_shash_export(desc, (void *)&sha1_st);
 323		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
 324	} else if (digest_size == SHA224_DIGEST_SIZE) {
 325		error = crypto_shash_init(desc) ?:
 326			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 327			crypto_shash_export(desc, (void *)&sha256_st);
 328		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 329
 330	} else if (digest_size == SHA256_DIGEST_SIZE) {
 331		error = crypto_shash_init(desc) ?:
 332			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 333			crypto_shash_export(desc, (void *)&sha256_st);
 334		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 335
 336	} else if (digest_size == SHA384_DIGEST_SIZE) {
 337		error = crypto_shash_init(desc) ?:
 338			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 339			crypto_shash_export(desc, (void *)&sha512_st);
 340		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 341
 342	} else if (digest_size == SHA512_DIGEST_SIZE) {
 343		error = crypto_shash_init(desc) ?:
 344			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 345			crypto_shash_export(desc, (void *)&sha512_st);
 346		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 347	} else {
 348		error = -EINVAL;
 349		pr_err("Unknown digest size %d\n", digest_size);
 350	}
 351	return error;
 352}
 353
 354static void chcr_change_order(char *buf, int ds)
 355{
 356	int i;
 357
 358	if (ds == SHA512_DIGEST_SIZE) {
 359		for (i = 0; i < (ds / sizeof(u64)); i++)
 360			*((__be64 *)buf + i) =
 361				cpu_to_be64(*((u64 *)buf + i));
 362	} else {
 363		for (i = 0; i < (ds / sizeof(u32)); i++)
 364			*((__be32 *)buf + i) =
 365				cpu_to_be32(*((u32 *)buf + i));
 366	}
 367}
 368
 369static inline int is_hmac(struct crypto_tfm *tfm)
 370{
 371	struct crypto_alg *alg = tfm->__crt_alg;
 372	struct chcr_alg_template *chcr_crypto_alg =
 373		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
 374			     alg.hash);
 375	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
 376		return 1;
 377	return 0;
 378}
 379
 380static inline void dsgl_walk_init(struct dsgl_walk *walk,
 381				   struct cpl_rx_phys_dsgl *dsgl)
 382{
 383	walk->dsgl = dsgl;
 384	walk->nents = 0;
 385	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
 386}
 387
 388static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
 389				 int pci_chan_id)
 390{
 391	struct cpl_rx_phys_dsgl *phys_cpl;
 392
 393	phys_cpl = walk->dsgl;
 394
 395	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
 396				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
 397	phys_cpl->pcirlxorder_to_noofsgentr =
 398		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
 399		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
 400		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
 401		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
 402		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
 403		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
 404	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
 405	phys_cpl->rss_hdr_int.qid = htons(qid);
 406	phys_cpl->rss_hdr_int.hash_val = 0;
 407	phys_cpl->rss_hdr_int.channel = pci_chan_id;
 408}
 409
 410static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
 411					size_t size,
 412					dma_addr_t addr)
 413{
 414	int j;
 415
 416	if (!size)
 417		return;
 418	j = walk->nents;
 419	walk->to->len[j % 8] = htons(size);
 420	walk->to->addr[j % 8] = cpu_to_be64(addr);
 421	j++;
 422	if ((j % 8) == 0)
 423		walk->to++;
 424	walk->nents = j;
 425}
 426
 427static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
 428			   struct scatterlist *sg,
 429			      unsigned int slen,
 430			      unsigned int skip)
 431{
 432	int skip_len = 0;
 433	unsigned int left_size = slen, len = 0;
 434	unsigned int j = walk->nents;
 435	int offset, ent_len;
 436
 437	if (!slen)
 438		return;
 439	while (sg && skip) {
 440		if (sg_dma_len(sg) <= skip) {
 441			skip -= sg_dma_len(sg);
 442			skip_len = 0;
 443			sg = sg_next(sg);
 444		} else {
 445			skip_len = skip;
 446			skip = 0;
 447		}
 448	}
 449
 450	while (left_size && sg) {
 451		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 452		offset = 0;
 453		while (len) {
 454			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
 455			walk->to->len[j % 8] = htons(ent_len);
 456			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
 457						      offset + skip_len);
 458			offset += ent_len;
 459			len -= ent_len;
 460			j++;
 461			if ((j % 8) == 0)
 462				walk->to++;
 463		}
 464		walk->last_sg = sg;
 465		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
 466					  skip_len) + skip_len;
 467		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 468		skip_len = 0;
 469		sg = sg_next(sg);
 470	}
 471	walk->nents = j;
 472}
 473
 474static inline void ulptx_walk_init(struct ulptx_walk *walk,
 475				   struct ulptx_sgl *ulp)
 476{
 477	walk->sgl = ulp;
 478	walk->nents = 0;
 479	walk->pair_idx = 0;
 480	walk->pair = ulp->sge;
 481	walk->last_sg = NULL;
 482	walk->last_sg_len = 0;
 483}
 484
 485static inline void ulptx_walk_end(struct ulptx_walk *walk)
 486{
 487	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
 488			      ULPTX_NSGE_V(walk->nents));
 489}
 490
 491
 492static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
 493					size_t size,
 494					dma_addr_t addr)
 495{
 496	if (!size)
 497		return;
 498
 499	if (walk->nents == 0) {
 500		walk->sgl->len0 = cpu_to_be32(size);
 501		walk->sgl->addr0 = cpu_to_be64(addr);
 502	} else {
 503		walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
 504		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
 505		walk->pair_idx = !walk->pair_idx;
 506		if (!walk->pair_idx)
 507			walk->pair++;
 508	}
 509	walk->nents++;
 510}
 511
 512static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
 513					struct scatterlist *sg,
 514			       unsigned int len,
 515			       unsigned int skip)
 516{
 517	int small;
 518	int skip_len = 0;
 519	unsigned int sgmin;
 520
 521	if (!len)
 522		return;
 523	while (sg && skip) {
 524		if (sg_dma_len(sg) <= skip) {
 525			skip -= sg_dma_len(sg);
 526			skip_len = 0;
 527			sg = sg_next(sg);
 528		} else {
 529			skip_len = skip;
 530			skip = 0;
 531		}
 532	}
 533	WARN(!sg, "SG should not be null here\n");
 534	if (sg && (walk->nents == 0)) {
 535		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
 536		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 537		walk->sgl->len0 = cpu_to_be32(sgmin);
 538		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
 539		walk->nents++;
 540		len -= sgmin;
 541		walk->last_sg = sg;
 542		walk->last_sg_len = sgmin + skip_len;
 543		skip_len += sgmin;
 544		if (sg_dma_len(sg) == skip_len) {
 545			sg = sg_next(sg);
 546			skip_len = 0;
 547		}
 548	}
 549
 550	while (sg && len) {
 551		small = min(sg_dma_len(sg) - skip_len, len);
 552		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 553		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
 554		walk->pair->addr[walk->pair_idx] =
 555			cpu_to_be64(sg_dma_address(sg) + skip_len);
 556		walk->pair_idx = !walk->pair_idx;
 557		walk->nents++;
 558		if (!walk->pair_idx)
 559			walk->pair++;
 560		len -= sgmin;
 561		skip_len += sgmin;
 562		walk->last_sg = sg;
 563		walk->last_sg_len = skip_len;
 564		if (sg_dma_len(sg) == skip_len) {
 565			sg = sg_next(sg);
 566			skip_len = 0;
 567		}
 568	}
 569}
 570
 571static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
 572{
 573	struct crypto_alg *alg = tfm->__crt_alg;
 574	struct chcr_alg_template *chcr_crypto_alg =
 575		container_of(alg, struct chcr_alg_template, alg.crypto);
 576
 577	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 578}
 579
 580static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
 581{
 582	struct adapter *adap = netdev2adap(dev);
 583	struct sge_uld_txq_info *txq_info =
 584		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
 585	struct sge_uld_txq *txq;
 586	int ret = 0;
 587
 588	local_bh_disable();
 589	txq = &txq_info->uldtxq[idx];
 590	spin_lock(&txq->sendq.lock);
 591	if (txq->full)
 592		ret = -1;
 593	spin_unlock(&txq->sendq.lock);
 594	local_bh_enable();
 595	return ret;
 596}
 597
 598static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
 599			       struct _key_ctx *key_ctx)
 600{
 601	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
 602		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
 603	} else {
 604		memcpy(key_ctx->key,
 605		       ablkctx->key + (ablkctx->enckey_len >> 1),
 606		       ablkctx->enckey_len >> 1);
 607		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
 608		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
 609	}
 610	return 0;
 611}
 612
 613static int chcr_hash_ent_in_wr(struct scatterlist *src,
 614			     unsigned int minsg,
 615			     unsigned int space,
 616			     unsigned int srcskip)
 617{
 618	int srclen = 0;
 619	int srcsg = minsg;
 620	int soffset = 0, sless;
 621
 622	if (sg_dma_len(src) == srcskip) {
 623		src = sg_next(src);
 624		srcskip = 0;
 625	}
 626	while (src && space > (sgl_ent_len[srcsg + 1])) {
 627		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
 628							CHCR_SRC_SG_SIZE);
 629		srclen += sless;
 630		soffset += sless;
 631		srcsg++;
 632		if (sg_dma_len(src) == (soffset + srcskip)) {
 633			src = sg_next(src);
 634			soffset = 0;
 635			srcskip = 0;
 636		}
 637	}
 638	return srclen;
 639}
 640
 641static int chcr_sg_ent_in_wr(struct scatterlist *src,
 642			     struct scatterlist *dst,
 643			     unsigned int minsg,
 644			     unsigned int space,
 645			     unsigned int srcskip,
 646			     unsigned int dstskip)
 647{
 648	int srclen = 0, dstlen = 0;
 649	int srcsg = minsg, dstsg = minsg;
 650	int offset = 0, soffset = 0, less, sless = 0;
 651
 652	if (sg_dma_len(src) == srcskip) {
 653		src = sg_next(src);
 654		srcskip = 0;
 655	}
 656	if (sg_dma_len(dst) == dstskip) {
 657		dst = sg_next(dst);
 658		dstskip = 0;
 659	}
 660
 661	while (src && dst &&
 662	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
 663		sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
 664				CHCR_SRC_SG_SIZE);
 665		srclen += sless;
 666		srcsg++;
 667		offset = 0;
 668		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
 669		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
 670			if (srclen <= dstlen)
 671				break;
 672			less = min_t(unsigned int, sg_dma_len(dst) - offset -
 673				     dstskip, CHCR_DST_SG_SIZE);
 674			dstlen += less;
 675			offset += less;
 676			if ((offset + dstskip) == sg_dma_len(dst)) {
 677				dst = sg_next(dst);
 678				offset = 0;
 679			}
 680			dstsg++;
 681			dstskip = 0;
 682		}
 683		soffset += sless;
 684		if ((soffset + srcskip) == sg_dma_len(src)) {
 685			src = sg_next(src);
 686			srcskip = 0;
 687			soffset = 0;
 688		}
 689
 690	}
 691	return min(srclen, dstlen);
 692}
 693
 694static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
 695				u32 flags,
 696				struct scatterlist *src,
 697				struct scatterlist *dst,
 698				unsigned int nbytes,
 699				u8 *iv,
 700				unsigned short op_type)
 701{
 
 702	int err;
 703
 704	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
 
 
 
 
 705
 706	skcipher_request_set_sync_tfm(subreq, cipher);
 707	skcipher_request_set_callback(subreq, flags, NULL, NULL);
 708	skcipher_request_set_crypt(subreq, src, dst,
 709				   nbytes, iv);
 710
 711	err = op_type ? crypto_skcipher_decrypt(subreq) :
 712		crypto_skcipher_encrypt(subreq);
 713	skcipher_request_zero(subreq);
 714
 715	return err;
 716
 717}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 718static inline void create_wreq(struct chcr_context *ctx,
 719			       struct chcr_wr *chcr_req,
 720			       struct crypto_async_request *req,
 721			       unsigned int imm,
 722			       int hash_sz,
 723			       unsigned int len16,
 724			       unsigned int sc_len,
 725			       unsigned int lcb)
 726{
 727	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 728	int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
 
 
 
 
 
 
 
 
 
 729
 730
 731	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
 732	chcr_req->wreq.pld_size_hash_size =
 733		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
 734	chcr_req->wreq.len16_pkd =
 735		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
 736	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
 737	chcr_req->wreq.rx_chid_to_rx_q_id =
 738		FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
 739				!!lcb, ctx->tx_qidx);
 740
 741	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
 742						       qid);
 743	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
 744				     ((sizeof(chcr_req->wreq)) >> 4)));
 745
 746	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
 747	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
 748					   sizeof(chcr_req->key_ctx) + sc_len);
 749}
 750
 751/**
 752 *	create_cipher_wr - form the WR for cipher operations
 753 *	@req: cipher req.
 754 *	@ctx: crypto driver context of the request.
 755 *	@qid: ingress qid where response of this WR should be received.
 756 *	@op_type:	encryption or decryption
 757 */
 758static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
 759{
 760	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
 761	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
 
 
 762	struct sk_buff *skb = NULL;
 763	struct chcr_wr *chcr_req;
 764	struct cpl_rx_phys_dsgl *phys_cpl;
 765	struct ulptx_sgl *ulptx;
 766	struct chcr_blkcipher_req_ctx *reqctx =
 767		ablkcipher_request_ctx(wrparam->req);
 768	unsigned int temp = 0, transhdr_len, dst_size;
 769	int error;
 770	int nents;
 771	unsigned int kctx_len;
 772	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 773			GFP_KERNEL : GFP_ATOMIC;
 774	struct adapter *adap = padap(c_ctx(tfm)->dev);
 
 775
 
 776	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
 777			      reqctx->dst_ofst);
 778	dst_size = get_space_for_phys_dsgl(nents);
 779	kctx_len = roundup(ablkctx->enckey_len, 16);
 780	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
 781	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
 782				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
 783	temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
 784				     (sgl_len(nents) * 8);
 785	transhdr_len += temp;
 786	transhdr_len = roundup(transhdr_len, 16);
 787	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
 788	if (!skb) {
 789		error = -ENOMEM;
 790		goto err;
 791	}
 792	chcr_req = __skb_put_zero(skb, transhdr_len);
 793	chcr_req->sec_cpl.op_ivinsrtofst =
 794		FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
 795
 796	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
 797	chcr_req->sec_cpl.aadstart_cipherstop_hi =
 798			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
 799
 800	chcr_req->sec_cpl.cipherstop_lo_authinsert =
 801			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
 802	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
 803							 ablkctx->ciph_mode,
 804							 0, 0, IV >> 1);
 805	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
 806							  0, 1, dst_size);
 807
 808	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
 809	if ((reqctx->op == CHCR_DECRYPT_OP) &&
 810	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
 811	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
 812	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
 813	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
 814		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
 815	} else {
 816		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
 817		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
 818			memcpy(chcr_req->key_ctx.key, ablkctx->key,
 819			       ablkctx->enckey_len);
 820		} else {
 821			memcpy(chcr_req->key_ctx.key, ablkctx->key +
 822			       (ablkctx->enckey_len >> 1),
 823			       ablkctx->enckey_len >> 1);
 824			memcpy(chcr_req->key_ctx.key +
 825			       (ablkctx->enckey_len >> 1),
 826			       ablkctx->key,
 827			       ablkctx->enckey_len >> 1);
 828		}
 829	}
 830	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
 831	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
 832	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
 833	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
 834
 835	atomic_inc(&adap->chcr_stats.cipher_rqst);
 836	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
 837		+ (reqctx->imm ? (wrparam->bytes) : 0);
 838	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
 839		    transhdr_len, temp,
 840			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
 841	reqctx->skb = skb;
 842
 843	if (reqctx->op && (ablkctx->ciph_mode ==
 844			   CHCR_SCMD_CIPHER_MODE_AES_CBC))
 845		sg_pcopy_to_buffer(wrparam->req->src,
 846			sg_nents(wrparam->req->src), wrparam->req->info, 16,
 847			reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
 848
 849	return skb;
 850err:
 851	return ERR_PTR(error);
 852}
 853
 854static inline int chcr_keyctx_ck_size(unsigned int keylen)
 855{
 856	int ck_size = 0;
 857
 858	if (keylen == AES_KEYSIZE_128)
 859		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
 860	else if (keylen == AES_KEYSIZE_192)
 861		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
 862	else if (keylen == AES_KEYSIZE_256)
 863		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
 864	else
 865		ck_size = 0;
 866
 867	return ck_size;
 868}
 869static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
 870				       const u8 *key,
 871				       unsigned int keylen)
 872{
 873	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
 874	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 875	int err = 0;
 876
 877	crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
 878				CRYPTO_TFM_REQ_MASK);
 879	crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
 880				cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 881	err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
 882	tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
 883	tfm->crt_flags |=
 884		crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
 885		CRYPTO_TFM_RES_MASK;
 886	return err;
 887}
 888
 889static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
 890			       const u8 *key,
 891			       unsigned int keylen)
 892{
 893	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 894	unsigned int ck_size, context_size;
 895	u16 alignment = 0;
 896	int err;
 897
 898	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 899	if (err)
 900		goto badkey_err;
 901
 902	ck_size = chcr_keyctx_ck_size(keylen);
 903	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
 904	memcpy(ablkctx->key, key, keylen);
 905	ablkctx->enckey_len = keylen;
 906	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
 907	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 908			keylen + alignment) >> 4;
 909
 910	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 911						0, 0, context_size);
 912	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
 913	return 0;
 914badkey_err:
 915	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 916	ablkctx->enckey_len = 0;
 917
 918	return err;
 919}
 920
 921static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
 922				   const u8 *key,
 923				   unsigned int keylen)
 924{
 925	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 926	unsigned int ck_size, context_size;
 927	u16 alignment = 0;
 928	int err;
 929
 930	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 931	if (err)
 932		goto badkey_err;
 933	ck_size = chcr_keyctx_ck_size(keylen);
 934	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
 935	memcpy(ablkctx->key, key, keylen);
 936	ablkctx->enckey_len = keylen;
 937	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 938			keylen + alignment) >> 4;
 939
 940	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 941						0, 0, context_size);
 942	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
 943
 944	return 0;
 945badkey_err:
 946	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 947	ablkctx->enckey_len = 0;
 948
 949	return err;
 950}
 951
 952static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
 953				   const u8 *key,
 954				   unsigned int keylen)
 955{
 956	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 957	unsigned int ck_size, context_size;
 958	u16 alignment = 0;
 959	int err;
 960
 961	if (keylen < CTR_RFC3686_NONCE_SIZE)
 962		return -EINVAL;
 963	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
 964	       CTR_RFC3686_NONCE_SIZE);
 965
 966	keylen -= CTR_RFC3686_NONCE_SIZE;
 967	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 968	if (err)
 969		goto badkey_err;
 970
 971	ck_size = chcr_keyctx_ck_size(keylen);
 972	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
 973	memcpy(ablkctx->key, key, keylen);
 974	ablkctx->enckey_len = keylen;
 975	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 976			keylen + alignment) >> 4;
 977
 978	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 979						0, 0, context_size);
 980	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
 981
 982	return 0;
 983badkey_err:
 984	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 985	ablkctx->enckey_len = 0;
 986
 987	return err;
 988}
 989static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
 990{
 991	unsigned int size = AES_BLOCK_SIZE;
 992	__be32 *b = (__be32 *)(dstiv + size);
 993	u32 c, prev;
 994
 995	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
 996	for (; size >= 4; size -= 4) {
 997		prev = be32_to_cpu(*--b);
 998		c = prev + add;
 999		*b = cpu_to_be32(c);
1000		if (prev < c)
1001			break;
1002		add = 1;
1003	}
1004
1005}
1006
1007static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1008{
1009	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1010	u64 c;
1011	u32 temp = be32_to_cpu(*--b);
1012
1013	temp = ~temp;
1014	c = (u64)temp +  1; // No of block can processed withou overflow
1015	if ((bytes / AES_BLOCK_SIZE) > c)
1016		bytes = c * AES_BLOCK_SIZE;
1017	return bytes;
1018}
1019
1020static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1021			     u32 isfinal)
1022{
1023	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1024	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1025	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1026	struct crypto_aes_ctx aes;
1027	int ret, i;
1028	u8 *key;
1029	unsigned int keylen;
1030	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1031	int round8 = round / 8;
1032
1033	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1034
1035	keylen = ablkctx->enckey_len / 2;
1036	key = ablkctx->key + keylen;
1037	ret = aes_expandkey(&aes, key, keylen);
 
 
 
 
 
 
 
1038	if (ret)
1039		return ret;
1040	aes_encrypt(&aes, iv, iv);
1041	for (i = 0; i < round8; i++)
1042		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1043
1044	for (i = 0; i < (round % 8); i++)
1045		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1046
1047	if (!isfinal)
1048		aes_decrypt(&aes, iv, iv);
1049
1050	memzero_explicit(&aes, sizeof(aes));
1051	return 0;
1052}
1053
1054static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1055				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1056{
1057	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1058	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1059	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1060	int ret = 0;
1061
1062	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1063		ctr_add_iv(iv, req->info, (reqctx->processed /
1064			   AES_BLOCK_SIZE));
1065	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1066		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1067			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1068						AES_BLOCK_SIZE) + 1);
1069	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1070		ret = chcr_update_tweak(req, iv, 0);
1071	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1072		if (reqctx->op)
1073			/*Updated before sending last WR*/
1074			memcpy(iv, req->info, AES_BLOCK_SIZE);
1075		else
1076			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1077	}
1078
1079	return ret;
1080
1081}
1082
1083/* We need separate function for final iv because in rfc3686  Initial counter
1084 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1085 * for subsequent update requests
1086 */
1087
1088static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1089				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1090{
1091	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1092	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1093	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1094	int ret = 0;
1095
1096	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1097		ctr_add_iv(iv, req->info, DIV_ROUND_UP(reqctx->processed,
1098						       AES_BLOCK_SIZE));
1099	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1100		ret = chcr_update_tweak(req, iv, 1);
 
 
 
 
1101	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1102		/*Already updated for Decrypt*/
1103		if (!reqctx->op)
1104			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1105
1106	}
1107	return ret;
1108
1109}
1110
1111static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1112				   unsigned char *input, int err)
1113{
1114	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 
 
 
1115	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1116	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
 
 
 
1117	struct sk_buff *skb;
1118	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1119	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1120	struct  cipher_wr_param wrparam;
1121	struct chcr_dev *dev = c_ctx(tfm)->dev;
1122	int bytes;
1123
1124	if (err)
1125		goto unmap;
1126	if (req->nbytes == reqctx->processed) {
1127		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1128				      req);
1129		err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1130		goto complete;
1131	}
1132
1133	if (!reqctx->imm) {
1134		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1135					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1136					  reqctx->src_ofst, reqctx->dst_ofst);
1137		if ((bytes + reqctx->processed) >= req->nbytes)
1138			bytes  = req->nbytes - reqctx->processed;
1139		else
1140			bytes = rounddown(bytes, 16);
1141	} else {
1142		/*CTR mode counter overfloa*/
1143		bytes  = req->nbytes - reqctx->processed;
1144	}
1145	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1146	if (err)
1147		goto unmap;
1148
1149	if (unlikely(bytes == 0)) {
1150		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1151				      req);
1152		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1153				     req->base.flags,
1154				     req->src,
1155				     req->dst,
1156				     req->nbytes,
1157				     req->info,
1158				     reqctx->op);
1159		goto complete;
1160	}
1161
1162	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1163	    CRYPTO_ALG_SUB_TYPE_CTR)
1164		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1165	wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1166	wrparam.req = req;
1167	wrparam.bytes = bytes;
1168	skb = create_cipher_wr(&wrparam);
1169	if (IS_ERR(skb)) {
1170		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1171		err = PTR_ERR(skb);
1172		goto unmap;
1173	}
1174	skb->dev = u_ctx->lldi.ports[0];
1175	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1176	chcr_send_wr(skb);
1177	reqctx->last_req_len = bytes;
1178	reqctx->processed += bytes;
 
 
 
 
 
1179	return 0;
1180unmap:
1181	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1182complete:
 
 
 
 
 
1183	chcr_dec_wrcount(dev);
1184	req->base.complete(&req->base, err);
1185	return err;
1186}
1187
1188static int process_cipher(struct ablkcipher_request *req,
1189				  unsigned short qid,
1190				  struct sk_buff **skb,
1191				  unsigned short op_type)
1192{
1193	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1194	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1195	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1196	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
 
1197	struct	cipher_wr_param wrparam;
1198	int bytes, err = -EINVAL;
 
1199
1200	reqctx->processed = 0;
1201	if (!req->info)
 
1202		goto error;
 
1203	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1204	    (req->nbytes == 0) ||
1205	    (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
 
 
 
 
 
1206		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1207		       ablkctx->enckey_len, req->nbytes, ivsize);
1208		goto error;
1209	}
1210
1211	err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1212	if (err)
1213		goto error;
1214	if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1215					    AES_MIN_KEY_SIZE +
1216					    sizeof(struct cpl_rx_phys_dsgl) +
1217					/*Min dsgl size*/
1218					    32))) {
1219		/* Can be sent as Imm*/
1220		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1221
1222		dnents = sg_nents_xlen(req->dst, req->nbytes,
1223				       CHCR_DST_SG_SIZE, 0);
1224		phys_dsgl = get_space_for_phys_dsgl(dnents);
1225		kctx_len = roundup(ablkctx->enckey_len, 16);
1226		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1227		reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1228			SGE_MAX_WR_LEN;
1229		bytes = IV + req->nbytes;
1230
1231	} else {
1232		reqctx->imm = 0;
1233	}
1234
1235	if (!reqctx->imm) {
1236		bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1237					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1238					  0, 0);
1239		if ((bytes + reqctx->processed) >= req->nbytes)
1240			bytes  = req->nbytes - reqctx->processed;
1241		else
1242			bytes = rounddown(bytes, 16);
1243	} else {
1244		bytes = req->nbytes;
1245	}
1246	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1247	    CRYPTO_ALG_SUB_TYPE_CTR) {
1248		bytes = adjust_ctr_overflow(req->info, bytes);
1249	}
1250	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1251	    CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1252		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1253		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1254				CTR_RFC3686_IV_SIZE);
1255
1256		/* initialize counter portion of counter block */
1257		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1258			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
 
1259
1260	} else {
1261
1262		memcpy(reqctx->iv, req->info, IV);
 
1263	}
1264	if (unlikely(bytes == 0)) {
1265		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1266				      req);
1267		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1268					   req->base.flags,
1269					   req->src,
1270					   req->dst,
1271					   req->nbytes,
1272					   reqctx->iv,
1273					   op_type);
1274		goto error;
1275	}
1276	reqctx->op = op_type;
1277	reqctx->srcsg = req->src;
1278	reqctx->dstsg = req->dst;
1279	reqctx->src_ofst = 0;
1280	reqctx->dst_ofst = 0;
1281	wrparam.qid = qid;
1282	wrparam.req = req;
1283	wrparam.bytes = bytes;
1284	*skb = create_cipher_wr(&wrparam);
1285	if (IS_ERR(*skb)) {
1286		err = PTR_ERR(*skb);
1287		goto unmap;
1288	}
1289	reqctx->processed = bytes;
1290	reqctx->last_req_len = bytes;
 
1291
1292	return 0;
1293unmap:
1294	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1295error:
1296	return err;
1297}
1298
1299static int chcr_aes_encrypt(struct ablkcipher_request *req)
1300{
1301	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 
1302	struct chcr_dev *dev = c_ctx(tfm)->dev;
1303	struct sk_buff *skb = NULL;
1304	int err, isfull = 0;
1305	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
 
 
 
 
 
 
 
1306
1307	err = chcr_inc_wrcount(dev);
1308	if (err)
1309		return -ENXIO;
1310	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1311					    c_ctx(tfm)->tx_qidx))) {
1312		isfull = 1;
1313		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1314			err = -ENOSPC;
1315			goto error;
1316		}
1317	}
1318
1319	err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1320			     &skb, CHCR_ENCRYPT_OP);
1321	if (err || !skb)
1322		return  err;
1323	skb->dev = u_ctx->lldi.ports[0];
1324	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1325	chcr_send_wr(skb);
1326	return isfull ? -EBUSY : -EINPROGRESS;
 
 
 
 
 
 
1327error:
1328	chcr_dec_wrcount(dev);
1329	return err;
1330}
1331
1332static int chcr_aes_decrypt(struct ablkcipher_request *req)
1333{
1334	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 
1335	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1336	struct chcr_dev *dev = c_ctx(tfm)->dev;
1337	struct sk_buff *skb = NULL;
1338	int err, isfull = 0;
 
 
 
 
 
 
 
1339
1340	err = chcr_inc_wrcount(dev);
1341	if (err)
1342		return -ENXIO;
1343
1344	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1345					    c_ctx(tfm)->tx_qidx))) {
1346		isfull = 1;
1347		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1348			return -ENOSPC;
1349	}
1350
1351	err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1352			     &skb, CHCR_DECRYPT_OP);
1353	if (err || !skb)
1354		return err;
1355	skb->dev = u_ctx->lldi.ports[0];
1356	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1357	chcr_send_wr(skb);
1358	return isfull ? -EBUSY : -EINPROGRESS;
1359}
1360
1361static int chcr_device_init(struct chcr_context *ctx)
1362{
1363	struct uld_ctx *u_ctx = NULL;
1364	unsigned int id;
1365	int txq_perchan, txq_idx, ntxq;
1366	int err = 0, rxq_perchan, rxq_idx;
1367
1368	id = smp_processor_id();
1369	if (!ctx->dev) {
1370		u_ctx = assign_chcr_device();
1371		if (!u_ctx) {
1372			err = -ENXIO;
1373			pr_err("chcr device assignment fails\n");
1374			goto out;
1375		}
1376		ctx->dev = &u_ctx->dev;
1377		ntxq = u_ctx->lldi.ntxq;
1378		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1379		txq_perchan = ntxq / u_ctx->lldi.nchan;
1380		spin_lock(&ctx->dev->lock_chcr_dev);
1381		ctx->tx_chan_id = ctx->dev->tx_channel_id;
1382		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1383		spin_unlock(&ctx->dev->lock_chcr_dev);
1384		rxq_idx = ctx->tx_chan_id * rxq_perchan;
1385		rxq_idx += id % rxq_perchan;
1386		txq_idx = ctx->tx_chan_id * txq_perchan;
1387		txq_idx += id % txq_perchan;
1388		ctx->rx_qidx = rxq_idx;
1389		ctx->tx_qidx = txq_idx;
1390		/* Channel Id used by SGE to forward packet to Host.
1391		 * Same value should be used in cpl_fw6_pld RSS_CH field
1392		 * by FW. Driver programs PCI channel ID to be used in fw
1393		 * at the time of queue allocation with value "pi->tx_chan"
1394		 */
1395		ctx->pci_chan_id = txq_idx / txq_perchan;
1396	}
1397out:
1398	return err;
1399}
1400
1401static int chcr_cra_init(struct crypto_tfm *tfm)
1402{
1403	struct crypto_alg *alg = tfm->__crt_alg;
1404	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1405	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1406
1407	ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
1408				CRYPTO_ALG_NEED_FALLBACK);
1409	if (IS_ERR(ablkctx->sw_cipher)) {
1410		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1411		return PTR_ERR(ablkctx->sw_cipher);
1412	}
 
 
 
1413
1414	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1415	return chcr_device_init(crypto_tfm_ctx(tfm));
1416}
1417
1418static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1419{
1420	struct crypto_alg *alg = tfm->__crt_alg;
1421	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1422	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1423
1424	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1425	 * cannot be used as fallback in chcr_handle_cipher_response
1426	 */
1427	ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
1428				CRYPTO_ALG_NEED_FALLBACK);
1429	if (IS_ERR(ablkctx->sw_cipher)) {
1430		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1431		return PTR_ERR(ablkctx->sw_cipher);
1432	}
1433	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1434	return chcr_device_init(crypto_tfm_ctx(tfm));
 
1435}
1436
1437
1438static void chcr_cra_exit(struct crypto_tfm *tfm)
1439{
1440	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1441	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1442
1443	crypto_free_sync_skcipher(ablkctx->sw_cipher);
1444}
1445
1446static int get_alg_config(struct algo_param *params,
1447			  unsigned int auth_size)
1448{
1449	switch (auth_size) {
1450	case SHA1_DIGEST_SIZE:
1451		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1452		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1453		params->result_size = SHA1_DIGEST_SIZE;
1454		break;
1455	case SHA224_DIGEST_SIZE:
1456		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1457		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1458		params->result_size = SHA256_DIGEST_SIZE;
1459		break;
1460	case SHA256_DIGEST_SIZE:
1461		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1462		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1463		params->result_size = SHA256_DIGEST_SIZE;
1464		break;
1465	case SHA384_DIGEST_SIZE:
1466		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1467		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1468		params->result_size = SHA512_DIGEST_SIZE;
1469		break;
1470	case SHA512_DIGEST_SIZE:
1471		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1472		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1473		params->result_size = SHA512_DIGEST_SIZE;
1474		break;
1475	default:
1476		pr_err("chcr : ERROR, unsupported digest size\n");
1477		return -EINVAL;
1478	}
1479	return 0;
1480}
1481
1482static inline void chcr_free_shash(struct crypto_shash *base_hash)
1483{
1484		crypto_free_shash(base_hash);
1485}
1486
1487/**
1488 *	create_hash_wr - Create hash work request
1489 *	@req - Cipher req base
 
1490 */
1491static struct sk_buff *create_hash_wr(struct ahash_request *req,
1492				      struct hash_wr_param *param)
1493{
1494	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1495	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1496	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
 
1497	struct sk_buff *skb = NULL;
1498	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1499	struct chcr_wr *chcr_req;
1500	struct ulptx_sgl *ulptx;
1501	unsigned int nents = 0, transhdr_len;
1502	unsigned int temp = 0;
1503	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1504		GFP_ATOMIC;
1505	struct adapter *adap = padap(h_ctx(tfm)->dev);
1506	int error = 0;
 
1507
 
1508	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1509	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1510				param->sg_len) <= SGE_MAX_WR_LEN;
1511	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1512		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1513	nents += param->bfr_len ? 1 : 0;
1514	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1515				param->sg_len, 16) : (sgl_len(nents) * 8);
1516	transhdr_len = roundup(transhdr_len, 16);
1517
1518	skb = alloc_skb(transhdr_len, flags);
1519	if (!skb)
1520		return ERR_PTR(-ENOMEM);
1521	chcr_req = __skb_put_zero(skb, transhdr_len);
1522
1523	chcr_req->sec_cpl.op_ivinsrtofst =
1524		FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
 
1525	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1526
1527	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1528		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1529	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1530		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1531	chcr_req->sec_cpl.seqno_numivs =
1532		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1533					 param->opad_needed, 0);
1534
1535	chcr_req->sec_cpl.ivgen_hdrlen =
1536		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1537
1538	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1539	       param->alg_prm.result_size);
1540
1541	if (param->opad_needed)
1542		memcpy(chcr_req->key_ctx.key +
1543		       ((param->alg_prm.result_size <= 32) ? 32 :
1544			CHCR_HASH_MAX_DIGEST_SIZE),
1545		       hmacctx->opad, param->alg_prm.result_size);
1546
1547	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1548					    param->alg_prm.mk_size, 0,
1549					    param->opad_needed,
1550					    ((param->kctx_len +
1551					     sizeof(chcr_req->key_ctx)) >> 4));
1552	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1553	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1554				     DUMMY_BYTES);
1555	if (param->bfr_len != 0) {
1556		req_ctx->hctx_wr.dma_addr =
1557			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1558				       param->bfr_len, DMA_TO_DEVICE);
1559		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1560				       req_ctx->hctx_wr. dma_addr)) {
1561			error = -ENOMEM;
1562			goto err;
1563		}
1564		req_ctx->hctx_wr.dma_len = param->bfr_len;
1565	} else {
1566		req_ctx->hctx_wr.dma_addr = 0;
1567	}
1568	chcr_add_hash_src_ent(req, ulptx, param);
1569	/* Request upto max wr size */
1570	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1571				(param->sg_len + param->bfr_len) : 0);
1572	atomic_inc(&adap->chcr_stats.digest_rqst);
1573	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1574		    param->hash_size, transhdr_len,
1575		    temp,  0);
1576	req_ctx->hctx_wr.skb = skb;
1577	return skb;
1578err:
1579	kfree_skb(skb);
1580	return  ERR_PTR(error);
1581}
1582
1583static int chcr_ahash_update(struct ahash_request *req)
1584{
1585	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1586	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1587	struct uld_ctx *u_ctx = NULL;
 
1588	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1589	struct sk_buff *skb;
1590	u8 remainder = 0, bs;
1591	unsigned int nbytes = req->nbytes;
1592	struct hash_wr_param params;
1593	int error, isfull = 0;
 
 
 
 
 
 
1594
1595	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1596	u_ctx = ULD_CTX(h_ctx(rtfm));
1597
1598	if (nbytes + req_ctx->reqlen >= bs) {
1599		remainder = (nbytes + req_ctx->reqlen) % bs;
1600		nbytes = nbytes + req_ctx->reqlen - remainder;
1601	} else {
1602		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1603				   + req_ctx->reqlen, nbytes, 0);
1604		req_ctx->reqlen += nbytes;
1605		return 0;
1606	}
1607	error = chcr_inc_wrcount(dev);
1608	if (error)
1609		return -ENXIO;
1610	/* Detach state for CHCR means lldi or padap is freed. Increasing
1611	 * inflight count for dev guarantees that lldi and padap is valid
1612	 */
1613	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1614					    h_ctx(rtfm)->tx_qidx))) {
1615		isfull = 1;
1616		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1617			error = -ENOSPC;
1618			goto err;
1619		}
1620	}
1621
1622	chcr_init_hctx_per_wr(req_ctx);
1623	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1624	if (error) {
1625		error = -ENOMEM;
1626		goto err;
1627	}
1628	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1629	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1630	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1631				     HASH_SPACE_LEFT(params.kctx_len), 0);
1632	if (params.sg_len > req->nbytes)
1633		params.sg_len = req->nbytes;
1634	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1635			req_ctx->reqlen;
1636	params.opad_needed = 0;
1637	params.more = 1;
1638	params.last = 0;
1639	params.bfr_len = req_ctx->reqlen;
1640	params.scmd1 = 0;
1641	req_ctx->hctx_wr.srcsg = req->src;
1642
1643	params.hash_size = params.alg_prm.result_size;
1644	req_ctx->data_len += params.sg_len + params.bfr_len;
1645	skb = create_hash_wr(req, &params);
1646	if (IS_ERR(skb)) {
1647		error = PTR_ERR(skb);
1648		goto unmap;
1649	}
1650
1651	req_ctx->hctx_wr.processed += params.sg_len;
1652	if (remainder) {
1653		/* Swap buffers */
1654		swap(req_ctx->reqbfr, req_ctx->skbfr);
1655		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1656				   req_ctx->reqbfr, remainder, req->nbytes -
1657				   remainder);
1658	}
1659	req_ctx->reqlen = remainder;
1660	skb->dev = u_ctx->lldi.ports[0];
1661	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1662	chcr_send_wr(skb);
1663
1664	return isfull ? -EBUSY : -EINPROGRESS;
1665unmap:
1666	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1667err:
1668	chcr_dec_wrcount(dev);
1669	return error;
1670}
1671
1672static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1673{
1674	memset(bfr_ptr, 0, bs);
1675	*bfr_ptr = 0x80;
1676	if (bs == 64)
1677		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1678	else
1679		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1680}
1681
1682static int chcr_ahash_final(struct ahash_request *req)
1683{
1684	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1685	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1686	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1687	struct hash_wr_param params;
1688	struct sk_buff *skb;
1689	struct uld_ctx *u_ctx = NULL;
 
1690	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1691	int error = -EINVAL;
 
 
 
 
 
 
1692
1693	error = chcr_inc_wrcount(dev);
1694	if (error)
1695		return -ENXIO;
1696
1697	chcr_init_hctx_per_wr(req_ctx);
1698	u_ctx = ULD_CTX(h_ctx(rtfm));
1699	if (is_hmac(crypto_ahash_tfm(rtfm)))
1700		params.opad_needed = 1;
1701	else
1702		params.opad_needed = 0;
1703	params.sg_len = 0;
1704	req_ctx->hctx_wr.isfinal = 1;
1705	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1706	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1707	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1708		params.opad_needed = 1;
1709		params.kctx_len *= 2;
1710	} else {
1711		params.opad_needed = 0;
1712	}
1713
1714	req_ctx->hctx_wr.result = 1;
1715	params.bfr_len = req_ctx->reqlen;
1716	req_ctx->data_len += params.bfr_len + params.sg_len;
1717	req_ctx->hctx_wr.srcsg = req->src;
1718	if (req_ctx->reqlen == 0) {
1719		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1720		params.last = 0;
1721		params.more = 1;
1722		params.scmd1 = 0;
1723		params.bfr_len = bs;
1724
1725	} else {
1726		params.scmd1 = req_ctx->data_len;
1727		params.last = 1;
1728		params.more = 0;
1729	}
1730	params.hash_size = crypto_ahash_digestsize(rtfm);
1731	skb = create_hash_wr(req, &params);
1732	if (IS_ERR(skb)) {
1733		error = PTR_ERR(skb);
1734		goto err;
1735	}
1736	req_ctx->reqlen = 0;
1737	skb->dev = u_ctx->lldi.ports[0];
1738	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1739	chcr_send_wr(skb);
1740	return -EINPROGRESS;
1741err:
1742	chcr_dec_wrcount(dev);
1743	return error;
1744}
1745
1746static int chcr_ahash_finup(struct ahash_request *req)
1747{
1748	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1749	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1750	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1751	struct uld_ctx *u_ctx = NULL;
 
1752	struct sk_buff *skb;
1753	struct hash_wr_param params;
1754	u8  bs;
1755	int error, isfull = 0;
 
 
 
 
 
 
1756
1757	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1758	u_ctx = ULD_CTX(h_ctx(rtfm));
1759	error = chcr_inc_wrcount(dev);
1760	if (error)
1761		return -ENXIO;
1762
1763	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1764					    h_ctx(rtfm)->tx_qidx))) {
1765		isfull = 1;
1766		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1767			error = -ENOSPC;
1768			goto err;
1769		}
1770	}
1771	chcr_init_hctx_per_wr(req_ctx);
1772	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1773	if (error) {
1774		error = -ENOMEM;
1775		goto err;
1776	}
1777
1778	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1779	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1780	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1781		params.kctx_len *= 2;
1782		params.opad_needed = 1;
1783	} else {
1784		params.opad_needed = 0;
1785	}
1786
1787	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1788				    HASH_SPACE_LEFT(params.kctx_len), 0);
1789	if (params.sg_len < req->nbytes) {
1790		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1791			params.kctx_len /= 2;
1792			params.opad_needed = 0;
1793		}
1794		params.last = 0;
1795		params.more = 1;
1796		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1797					- req_ctx->reqlen;
1798		params.hash_size = params.alg_prm.result_size;
1799		params.scmd1 = 0;
1800	} else {
1801		params.last = 1;
1802		params.more = 0;
1803		params.sg_len = req->nbytes;
1804		params.hash_size = crypto_ahash_digestsize(rtfm);
1805		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1806				params.sg_len;
1807	}
1808	params.bfr_len = req_ctx->reqlen;
1809	req_ctx->data_len += params.bfr_len + params.sg_len;
1810	req_ctx->hctx_wr.result = 1;
1811	req_ctx->hctx_wr.srcsg = req->src;
1812	if ((req_ctx->reqlen + req->nbytes) == 0) {
1813		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1814		params.last = 0;
1815		params.more = 1;
1816		params.scmd1 = 0;
1817		params.bfr_len = bs;
1818	}
1819	skb = create_hash_wr(req, &params);
1820	if (IS_ERR(skb)) {
1821		error = PTR_ERR(skb);
1822		goto unmap;
1823	}
1824	req_ctx->reqlen = 0;
1825	req_ctx->hctx_wr.processed += params.sg_len;
1826	skb->dev = u_ctx->lldi.ports[0];
1827	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1828	chcr_send_wr(skb);
1829
1830	return isfull ? -EBUSY : -EINPROGRESS;
1831unmap:
1832	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1833err:
1834	chcr_dec_wrcount(dev);
1835	return error;
1836}
1837
 
 
 
1838static int chcr_ahash_digest(struct ahash_request *req)
1839{
1840	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1841	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1842	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1843	struct uld_ctx *u_ctx = NULL;
 
1844	struct sk_buff *skb;
1845	struct hash_wr_param params;
1846	u8  bs;
1847	int error, isfull = 0;
 
 
 
 
 
 
 
 
 
 
 
1848
1849	rtfm->init(req);
1850	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1851	error = chcr_inc_wrcount(dev);
1852	if (error)
1853		return -ENXIO;
1854
1855	u_ctx = ULD_CTX(h_ctx(rtfm));
1856	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1857					    h_ctx(rtfm)->tx_qidx))) {
1858		isfull = 1;
1859		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1860			error = -ENOSPC;
1861			goto err;
1862		}
1863	}
1864
1865	chcr_init_hctx_per_wr(req_ctx);
1866	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1867	if (error) {
1868		error = -ENOMEM;
1869		goto err;
1870	}
1871
1872	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1873	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1874	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1875		params.kctx_len *= 2;
1876		params.opad_needed = 1;
1877	} else {
1878		params.opad_needed = 0;
1879	}
1880	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1881				HASH_SPACE_LEFT(params.kctx_len), 0);
1882	if (params.sg_len < req->nbytes) {
1883		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1884			params.kctx_len /= 2;
1885			params.opad_needed = 0;
1886		}
1887		params.last = 0;
1888		params.more = 1;
1889		params.scmd1 = 0;
1890		params.sg_len = rounddown(params.sg_len, bs);
1891		params.hash_size = params.alg_prm.result_size;
1892	} else {
1893		params.sg_len = req->nbytes;
1894		params.hash_size = crypto_ahash_digestsize(rtfm);
1895		params.last = 1;
1896		params.more = 0;
1897		params.scmd1 = req->nbytes + req_ctx->data_len;
1898
1899	}
1900	params.bfr_len = 0;
1901	req_ctx->hctx_wr.result = 1;
1902	req_ctx->hctx_wr.srcsg = req->src;
1903	req_ctx->data_len += params.bfr_len + params.sg_len;
1904
1905	if (req->nbytes == 0) {
1906		create_last_hash_block(req_ctx->reqbfr, bs, 0);
1907		params.more = 1;
1908		params.bfr_len = bs;
1909	}
1910
1911	skb = create_hash_wr(req, &params);
1912	if (IS_ERR(skb)) {
1913		error = PTR_ERR(skb);
1914		goto unmap;
1915	}
1916	req_ctx->hctx_wr.processed += params.sg_len;
1917	skb->dev = u_ctx->lldi.ports[0];
1918	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1919	chcr_send_wr(skb);
1920	return isfull ? -EBUSY : -EINPROGRESS;
1921unmap:
1922	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1923err:
1924	chcr_dec_wrcount(dev);
1925	return error;
1926}
1927
1928static int chcr_ahash_continue(struct ahash_request *req)
1929{
1930	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1931	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1932	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1933	struct uld_ctx *u_ctx = NULL;
 
1934	struct sk_buff *skb;
1935	struct hash_wr_param params;
1936	u8  bs;
1937	int error;
 
 
 
 
 
 
1938
1939	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1940	u_ctx = ULD_CTX(h_ctx(rtfm));
1941	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1942	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1943	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1944		params.kctx_len *= 2;
1945		params.opad_needed = 1;
1946	} else {
1947		params.opad_needed = 0;
1948	}
1949	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
1950					    HASH_SPACE_LEFT(params.kctx_len),
1951					    hctx_wr->src_ofst);
1952	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
1953		params.sg_len = req->nbytes - hctx_wr->processed;
1954	if (!hctx_wr->result ||
1955	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
1956		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1957			params.kctx_len /= 2;
1958			params.opad_needed = 0;
1959		}
1960		params.last = 0;
1961		params.more = 1;
1962		params.sg_len = rounddown(params.sg_len, bs);
1963		params.hash_size = params.alg_prm.result_size;
1964		params.scmd1 = 0;
1965	} else {
1966		params.last = 1;
1967		params.more = 0;
1968		params.hash_size = crypto_ahash_digestsize(rtfm);
1969		params.scmd1 = reqctx->data_len + params.sg_len;
1970	}
1971	params.bfr_len = 0;
1972	reqctx->data_len += params.sg_len;
1973	skb = create_hash_wr(req, &params);
1974	if (IS_ERR(skb)) {
1975		error = PTR_ERR(skb);
1976		goto err;
1977	}
1978	hctx_wr->processed += params.sg_len;
1979	skb->dev = u_ctx->lldi.ports[0];
1980	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1981	chcr_send_wr(skb);
1982	return 0;
1983err:
1984	return error;
1985}
1986
1987static inline void chcr_handle_ahash_resp(struct ahash_request *req,
1988					  unsigned char *input,
1989					  int err)
1990{
1991	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1992	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1993	int digestsize, updated_digestsize;
1994	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1995	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1996	struct chcr_dev *dev = h_ctx(tfm)->dev;
1997
1998	if (input == NULL)
1999		goto out;
2000	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2001	updated_digestsize = digestsize;
2002	if (digestsize == SHA224_DIGEST_SIZE)
2003		updated_digestsize = SHA256_DIGEST_SIZE;
2004	else if (digestsize == SHA384_DIGEST_SIZE)
2005		updated_digestsize = SHA512_DIGEST_SIZE;
2006
2007	if (hctx_wr->dma_addr) {
2008		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2009				 hctx_wr->dma_len, DMA_TO_DEVICE);
2010		hctx_wr->dma_addr = 0;
2011	}
2012	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2013				 req->nbytes)) {
2014		if (hctx_wr->result == 1) {
2015			hctx_wr->result = 0;
2016			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2017			       digestsize);
2018		} else {
2019			memcpy(reqctx->partial_hash,
2020			       input + sizeof(struct cpl_fw6_pld),
2021			       updated_digestsize);
2022
2023		}
2024		goto unmap;
2025	}
2026	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2027	       updated_digestsize);
2028
2029	err = chcr_ahash_continue(req);
2030	if (err)
2031		goto unmap;
2032	return;
2033unmap:
2034	if (hctx_wr->is_sg_map)
2035		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2036
2037
2038out:
2039	chcr_dec_wrcount(dev);
2040	req->base.complete(&req->base, err);
2041}
2042
2043/*
2044 *	chcr_handle_resp - Unmap the DMA buffers associated with the request
2045 *	@req: crypto request
2046 */
2047int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2048			 int err)
2049{
2050	struct crypto_tfm *tfm = req->tfm;
2051	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2052	struct adapter *adap = padap(ctx->dev);
2053
2054	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2055	case CRYPTO_ALG_TYPE_AEAD:
2056		err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2057		break;
2058
2059	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2060		 chcr_handle_cipher_resp(ablkcipher_request_cast(req),
2061					       input, err);
2062		break;
2063	case CRYPTO_ALG_TYPE_AHASH:
2064		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2065		}
2066	atomic_inc(&adap->chcr_stats.complete);
2067	return err;
2068}
2069static int chcr_ahash_export(struct ahash_request *areq, void *out)
2070{
2071	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2072	struct chcr_ahash_req_ctx *state = out;
2073
2074	state->reqlen = req_ctx->reqlen;
2075	state->data_len = req_ctx->data_len;
2076	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2077	memcpy(state->partial_hash, req_ctx->partial_hash,
2078	       CHCR_HASH_MAX_DIGEST_SIZE);
2079	chcr_init_hctx_per_wr(state);
2080	return 0;
2081}
2082
2083static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2084{
2085	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2086	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2087
2088	req_ctx->reqlen = state->reqlen;
2089	req_ctx->data_len = state->data_len;
2090	req_ctx->reqbfr = req_ctx->bfr1;
2091	req_ctx->skbfr = req_ctx->bfr2;
2092	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2093	memcpy(req_ctx->partial_hash, state->partial_hash,
2094	       CHCR_HASH_MAX_DIGEST_SIZE);
2095	chcr_init_hctx_per_wr(req_ctx);
2096	return 0;
2097}
2098
2099static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2100			     unsigned int keylen)
2101{
2102	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2103	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2104	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2105	unsigned int i, err = 0, updated_digestsize;
2106
2107	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2108
2109	/* use the key to calculate the ipad and opad. ipad will sent with the
2110	 * first request's data. opad will be sent with the final hash result
2111	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2112	 */
2113	shash->tfm = hmacctx->base_hash;
2114	if (keylen > bs) {
2115		err = crypto_shash_digest(shash, key, keylen,
2116					  hmacctx->ipad);
2117		if (err)
2118			goto out;
2119		keylen = digestsize;
2120	} else {
2121		memcpy(hmacctx->ipad, key, keylen);
2122	}
2123	memset(hmacctx->ipad + keylen, 0, bs - keylen);
2124	memcpy(hmacctx->opad, hmacctx->ipad, bs);
 
2125
2126	for (i = 0; i < bs / sizeof(int); i++) {
2127		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2128		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2129	}
2130
2131	updated_digestsize = digestsize;
2132	if (digestsize == SHA224_DIGEST_SIZE)
2133		updated_digestsize = SHA256_DIGEST_SIZE;
2134	else if (digestsize == SHA384_DIGEST_SIZE)
2135		updated_digestsize = SHA512_DIGEST_SIZE;
2136	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2137					hmacctx->ipad, digestsize);
2138	if (err)
2139		goto out;
2140	chcr_change_order(hmacctx->ipad, updated_digestsize);
2141
2142	err = chcr_compute_partial_hash(shash, hmacctx->opad,
2143					hmacctx->opad, digestsize);
2144	if (err)
2145		goto out;
2146	chcr_change_order(hmacctx->opad, updated_digestsize);
2147out:
2148	return err;
2149}
2150
2151static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
2152			       unsigned int key_len)
2153{
2154	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2155	unsigned short context_size = 0;
2156	int err;
2157
2158	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2159	if (err)
2160		goto badkey_err;
2161
2162	memcpy(ablkctx->key, key, key_len);
2163	ablkctx->enckey_len = key_len;
2164	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2165	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2166	ablkctx->key_ctx_hdr =
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2167		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2168				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2169				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2170				 CHCR_KEYCTX_NO_KEY, 1,
2171				 0, context_size);
 
2172	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2173	return 0;
2174badkey_err:
2175	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2176	ablkctx->enckey_len = 0;
2177
2178	return err;
2179}
2180
2181static int chcr_sha_init(struct ahash_request *areq)
2182{
2183	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2184	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2185	int digestsize =  crypto_ahash_digestsize(tfm);
2186
2187	req_ctx->data_len = 0;
2188	req_ctx->reqlen = 0;
2189	req_ctx->reqbfr = req_ctx->bfr1;
2190	req_ctx->skbfr = req_ctx->bfr2;
2191	copy_hash_init_values(req_ctx->partial_hash, digestsize);
2192
2193	return 0;
2194}
2195
2196static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2197{
2198	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2199				 sizeof(struct chcr_ahash_req_ctx));
2200	return chcr_device_init(crypto_tfm_ctx(tfm));
2201}
2202
2203static int chcr_hmac_init(struct ahash_request *areq)
2204{
2205	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2206	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2207	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2208	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2209	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2210
2211	chcr_sha_init(areq);
2212	req_ctx->data_len = bs;
2213	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2214		if (digestsize == SHA224_DIGEST_SIZE)
2215			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2216			       SHA256_DIGEST_SIZE);
2217		else if (digestsize == SHA384_DIGEST_SIZE)
2218			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2219			       SHA512_DIGEST_SIZE);
2220		else
2221			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2222			       digestsize);
2223	}
2224	return 0;
2225}
2226
2227static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2228{
2229	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2230	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2231	unsigned int digestsize =
2232		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2233
2234	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2235				 sizeof(struct chcr_ahash_req_ctx));
2236	hmacctx->base_hash = chcr_alloc_shash(digestsize);
2237	if (IS_ERR(hmacctx->base_hash))
2238		return PTR_ERR(hmacctx->base_hash);
2239	return chcr_device_init(crypto_tfm_ctx(tfm));
2240}
2241
2242static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2243{
2244	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2245	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2246
2247	if (hmacctx->base_hash) {
2248		chcr_free_shash(hmacctx->base_hash);
2249		hmacctx->base_hash = NULL;
2250	}
2251}
2252
2253inline void chcr_aead_common_exit(struct aead_request *req)
2254{
2255	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2256	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2257	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2258
2259	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2260}
2261
2262static int chcr_aead_common_init(struct aead_request *req)
2263{
2264	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2265	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2266	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2267	unsigned int authsize = crypto_aead_authsize(tfm);
2268	int error = -EINVAL;
2269
2270	/* validate key size */
2271	if (aeadctx->enckey_len == 0)
2272		goto err;
2273	if (reqctx->op && req->cryptlen < authsize)
2274		goto err;
2275	if (reqctx->b0_len)
2276		reqctx->scratch_pad = reqctx->iv + IV;
2277	else
2278		reqctx->scratch_pad = NULL;
2279
2280	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2281				  reqctx->op);
2282	if (error) {
2283		error = -ENOMEM;
2284		goto err;
2285	}
2286
2287	return 0;
2288err:
2289	return error;
2290}
2291
2292static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2293				   int aadmax, int wrlen,
2294				   unsigned short op_type)
2295{
2296	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2297
2298	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2299	    dst_nents > MAX_DSGL_ENT ||
2300	    (req->assoclen > aadmax) ||
2301	    (wrlen > SGE_MAX_WR_LEN))
2302		return 1;
2303	return 0;
2304}
2305
2306static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2307{
2308	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2309	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2310	struct aead_request *subreq = aead_request_ctx(req);
2311
2312	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2313	aead_request_set_callback(subreq, req->base.flags,
2314				  req->base.complete, req->base.data);
2315	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2316				 req->iv);
2317	aead_request_set_ad(subreq, req->assoclen);
2318	return op_type ? crypto_aead_decrypt(subreq) :
2319		crypto_aead_encrypt(subreq);
2320}
2321
2322static struct sk_buff *create_authenc_wr(struct aead_request *req,
2323					 unsigned short qid,
2324					 int size)
2325{
2326	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2327	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 
 
2328	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2329	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2330	struct sk_buff *skb = NULL;
2331	struct chcr_wr *chcr_req;
2332	struct cpl_rx_phys_dsgl *phys_cpl;
2333	struct ulptx_sgl *ulptx;
2334	unsigned int transhdr_len;
2335	unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2336	unsigned int   kctx_len = 0, dnents, snents;
2337	unsigned int  authsize = crypto_aead_authsize(tfm);
2338	int error = -EINVAL;
2339	u8 *ivptr;
2340	int null = 0;
2341	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2342		GFP_ATOMIC;
2343	struct adapter *adap = padap(a_ctx(tfm)->dev);
 
2344
 
2345	if (req->cryptlen == 0)
2346		return NULL;
2347
2348	reqctx->b0_len = 0;
2349	error = chcr_aead_common_init(req);
2350	if (error)
2351		return ERR_PTR(error);
2352
2353	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2354		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2355		null = 1;
2356	}
2357	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2358		(reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2359	dnents += MIN_AUTH_SG; // For IV
2360	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2361			       CHCR_SRC_SG_SIZE, 0);
2362	dst_size = get_space_for_phys_dsgl(dnents);
2363	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2364		- sizeof(chcr_req->key_ctx);
2365	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2366	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2367			SGE_MAX_WR_LEN;
2368	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2369			: (sgl_len(snents) * 8);
2370	transhdr_len += temp;
2371	transhdr_len = roundup(transhdr_len, 16);
2372
2373	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2374				    transhdr_len, reqctx->op)) {
2375		atomic_inc(&adap->chcr_stats.fallback);
2376		chcr_aead_common_exit(req);
2377		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2378	}
2379	skb = alloc_skb(transhdr_len, flags);
2380	if (!skb) {
2381		error = -ENOMEM;
2382		goto err;
2383	}
2384
2385	chcr_req = __skb_put_zero(skb, transhdr_len);
2386
2387	temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2388
2389	/*
2390	 * Input order	is AAD,IV and Payload. where IV should be included as
2391	 * the part of authdata. All other fields should be filled according
2392	 * to the hardware spec
2393	 */
2394	chcr_req->sec_cpl.op_ivinsrtofst =
2395		FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
2396	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2397	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2398					null ? 0 : 1 + IV,
2399					null ? 0 : IV + req->assoclen,
2400					req->assoclen + IV + 1,
2401					(temp & 0x1F0) >> 4);
2402	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2403					temp & 0xF,
2404					null ? 0 : req->assoclen + IV + 1,
2405					temp, temp);
2406	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2407	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2408		temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2409	else
2410		temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2411	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2412					(reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2413					temp,
2414					actx->auth_mode, aeadctx->hmac_ctrl,
2415					IV >> 1);
2416	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2417					 0, 0, dst_size);
2418
2419	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2420	if (reqctx->op == CHCR_ENCRYPT_OP ||
2421		subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2422		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2423		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2424		       aeadctx->enckey_len);
2425	else
2426		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2427		       aeadctx->enckey_len);
2428
2429	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2430	       actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2431	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2432	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2433	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2434	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2435	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2436		memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2437		memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2438				CTR_RFC3686_IV_SIZE);
2439		*(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2440			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2441	} else {
2442		memcpy(ivptr, req->iv, IV);
2443	}
2444	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2445	chcr_add_aead_src_ent(req, ulptx);
2446	atomic_inc(&adap->chcr_stats.cipher_rqst);
2447	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2448		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2449	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2450		   transhdr_len, temp, 0);
2451	reqctx->skb = skb;
2452
2453	return skb;
2454err:
2455	chcr_aead_common_exit(req);
2456
2457	return ERR_PTR(error);
2458}
2459
2460int chcr_aead_dma_map(struct device *dev,
2461		      struct aead_request *req,
2462		      unsigned short op_type)
2463{
2464	int error;
2465	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2466	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2467	unsigned int authsize = crypto_aead_authsize(tfm);
2468	int dst_size;
2469
2470	dst_size = req->assoclen + req->cryptlen + (op_type ?
2471				-authsize : authsize);
2472	if (!req->cryptlen || !dst_size)
 
 
 
 
 
 
 
 
 
 
 
2473		return 0;
2474	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2475					DMA_BIDIRECTIONAL);
2476	if (dma_mapping_error(dev, reqctx->iv_dma))
2477		return -ENOMEM;
2478	if (reqctx->b0_len)
2479		reqctx->b0_dma = reqctx->iv_dma + IV;
2480	else
2481		reqctx->b0_dma = 0;
2482	if (req->src == req->dst) {
2483		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2484				   DMA_BIDIRECTIONAL);
 
2485		if (!error)
2486			goto err;
2487	} else {
2488		error = dma_map_sg(dev, req->src, sg_nents(req->src),
 
2489				   DMA_TO_DEVICE);
2490		if (!error)
2491			goto err;
2492		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
 
2493				   DMA_FROM_DEVICE);
2494		if (!error) {
2495			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2496				   DMA_TO_DEVICE);
 
2497			goto err;
2498		}
2499	}
2500
2501	return 0;
2502err:
2503	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2504	return -ENOMEM;
2505}
2506
2507void chcr_aead_dma_unmap(struct device *dev,
2508			 struct aead_request *req,
2509			 unsigned short op_type)
2510{
2511	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2512	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2513	unsigned int authsize = crypto_aead_authsize(tfm);
2514	int dst_size;
2515
2516	dst_size = req->assoclen + req->cryptlen + (op_type ?
2517					-authsize : authsize);
2518	if (!req->cryptlen || !dst_size)
 
 
 
 
 
 
 
 
 
 
 
2519		return;
2520
2521	dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2522					DMA_BIDIRECTIONAL);
2523	if (req->src == req->dst) {
2524		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2525				   DMA_BIDIRECTIONAL);
 
2526	} else {
2527		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2528				   DMA_TO_DEVICE);
2529		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2530				   DMA_FROM_DEVICE);
 
 
2531	}
2532}
2533
2534void chcr_add_aead_src_ent(struct aead_request *req,
2535			   struct ulptx_sgl *ulptx)
2536{
2537	struct ulptx_walk ulp_walk;
2538	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2539
2540	if (reqctx->imm) {
2541		u8 *buf = (u8 *)ulptx;
2542
2543		if (reqctx->b0_len) {
2544			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2545			buf += reqctx->b0_len;
2546		}
2547		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2548				   buf, req->cryptlen + req->assoclen, 0);
2549	} else {
2550		ulptx_walk_init(&ulp_walk, ulptx);
2551		if (reqctx->b0_len)
2552			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2553					    reqctx->b0_dma);
2554		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2555				  req->assoclen,  0);
2556		ulptx_walk_end(&ulp_walk);
2557	}
2558}
2559
2560void chcr_add_aead_dst_ent(struct aead_request *req,
2561			   struct cpl_rx_phys_dsgl *phys_cpl,
2562			   unsigned short qid)
2563{
2564	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2565	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2566	struct dsgl_walk dsgl_walk;
2567	unsigned int authsize = crypto_aead_authsize(tfm);
2568	struct chcr_context *ctx = a_ctx(tfm);
 
2569	u32 temp;
 
2570
 
2571	dsgl_walk_init(&dsgl_walk, phys_cpl);
2572	dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2573	temp = req->assoclen + req->cryptlen +
2574		(reqctx->op ? -authsize : authsize);
2575	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2576	dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2577}
2578
2579void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2580			     void *ulptx,
2581			     struct  cipher_wr_param *wrparam)
2582{
2583	struct ulptx_walk ulp_walk;
2584	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2585	u8 *buf = ulptx;
2586
2587	memcpy(buf, reqctx->iv, IV);
2588	buf += IV;
2589	if (reqctx->imm) {
2590		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2591				   buf, wrparam->bytes, reqctx->processed);
2592	} else {
2593		ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2594		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2595				  reqctx->src_ofst);
2596		reqctx->srcsg = ulp_walk.last_sg;
2597		reqctx->src_ofst = ulp_walk.last_sg_len;
2598		ulptx_walk_end(&ulp_walk);
2599	}
2600}
2601
2602void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2603			     struct cpl_rx_phys_dsgl *phys_cpl,
2604			     struct  cipher_wr_param *wrparam,
2605			     unsigned short qid)
2606{
2607	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2608	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
2609	struct chcr_context *ctx = c_ctx(tfm);
 
2610	struct dsgl_walk dsgl_walk;
 
2611
 
2612	dsgl_walk_init(&dsgl_walk, phys_cpl);
2613	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2614			 reqctx->dst_ofst);
2615	reqctx->dstsg = dsgl_walk.last_sg;
2616	reqctx->dst_ofst = dsgl_walk.last_sg_len;
2617
2618	dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2619}
2620
2621void chcr_add_hash_src_ent(struct ahash_request *req,
2622			   struct ulptx_sgl *ulptx,
2623			   struct hash_wr_param *param)
2624{
2625	struct ulptx_walk ulp_walk;
2626	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2627
2628	if (reqctx->hctx_wr.imm) {
2629		u8 *buf = (u8 *)ulptx;
2630
2631		if (param->bfr_len) {
2632			memcpy(buf, reqctx->reqbfr, param->bfr_len);
2633			buf += param->bfr_len;
2634		}
2635
2636		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2637				   sg_nents(reqctx->hctx_wr.srcsg), buf,
2638				   param->sg_len, 0);
2639	} else {
2640		ulptx_walk_init(&ulp_walk, ulptx);
2641		if (param->bfr_len)
2642			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2643					    reqctx->hctx_wr.dma_addr);
2644		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2645				  param->sg_len, reqctx->hctx_wr.src_ofst);
2646		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2647		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2648		ulptx_walk_end(&ulp_walk);
2649	}
2650}
2651
2652int chcr_hash_dma_map(struct device *dev,
2653		      struct ahash_request *req)
2654{
2655	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2656	int error = 0;
2657
2658	if (!req->nbytes)
2659		return 0;
2660	error = dma_map_sg(dev, req->src, sg_nents(req->src),
2661			   DMA_TO_DEVICE);
2662	if (!error)
2663		return -ENOMEM;
2664	req_ctx->hctx_wr.is_sg_map = 1;
2665	return 0;
2666}
2667
2668void chcr_hash_dma_unmap(struct device *dev,
2669			 struct ahash_request *req)
2670{
2671	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2672
2673	if (!req->nbytes)
2674		return;
2675
2676	dma_unmap_sg(dev, req->src, sg_nents(req->src),
2677			   DMA_TO_DEVICE);
2678	req_ctx->hctx_wr.is_sg_map = 0;
2679
2680}
2681
2682int chcr_cipher_dma_map(struct device *dev,
2683			struct ablkcipher_request *req)
2684{
2685	int error;
2686
2687	if (req->src == req->dst) {
2688		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2689				   DMA_BIDIRECTIONAL);
2690		if (!error)
2691			goto err;
2692	} else {
2693		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2694				   DMA_TO_DEVICE);
2695		if (!error)
2696			goto err;
2697		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2698				   DMA_FROM_DEVICE);
2699		if (!error) {
2700			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2701				   DMA_TO_DEVICE);
2702			goto err;
2703		}
2704	}
2705
2706	return 0;
2707err:
2708	return -ENOMEM;
2709}
2710
2711void chcr_cipher_dma_unmap(struct device *dev,
2712			   struct ablkcipher_request *req)
2713{
2714	if (req->src == req->dst) {
2715		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2716				   DMA_BIDIRECTIONAL);
2717	} else {
2718		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2719				   DMA_TO_DEVICE);
2720		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2721				   DMA_FROM_DEVICE);
2722	}
2723}
2724
2725static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2726{
2727	__be32 data;
2728
2729	memset(block, 0, csize);
2730	block += csize;
2731
2732	if (csize >= 4)
2733		csize = 4;
2734	else if (msglen > (unsigned int)(1 << (8 * csize)))
2735		return -EOVERFLOW;
2736
2737	data = cpu_to_be32(msglen);
2738	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2739
2740	return 0;
2741}
2742
2743static int generate_b0(struct aead_request *req, u8 *ivptr,
2744			unsigned short op_type)
2745{
2746	unsigned int l, lp, m;
2747	int rc;
2748	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2749	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2750	u8 *b0 = reqctx->scratch_pad;
2751
2752	m = crypto_aead_authsize(aead);
2753
2754	memcpy(b0, ivptr, 16);
2755
2756	lp = b0[0];
2757	l = lp + 1;
2758
2759	/* set m, bits 3-5 */
2760	*b0 |= (8 * ((m - 2) / 2));
2761
2762	/* set adata, bit 6, if associated data is used */
2763	if (req->assoclen)
2764		*b0 |= 64;
2765	rc = set_msg_len(b0 + 16 - l,
2766			 (op_type == CHCR_DECRYPT_OP) ?
2767			 req->cryptlen - m : req->cryptlen, l);
2768
2769	return rc;
2770}
2771
2772static inline int crypto_ccm_check_iv(const u8 *iv)
2773{
2774	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2775	if (iv[0] < 1 || iv[0] > 7)
2776		return -EINVAL;
2777
2778	return 0;
2779}
2780
2781static int ccm_format_packet(struct aead_request *req,
2782			     u8 *ivptr,
2783			     unsigned int sub_type,
2784			     unsigned short op_type,
2785			     unsigned int assoclen)
2786{
2787	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2788	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2789	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2790	int rc = 0;
2791
2792	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2793		ivptr[0] = 3;
2794		memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2795		memcpy(ivptr + 4, req->iv, 8);
2796		memset(ivptr + 12, 0, 4);
2797	} else {
2798		memcpy(ivptr, req->iv, 16);
2799	}
2800	if (assoclen)
2801		*((unsigned short *)(reqctx->scratch_pad + 16)) =
2802				htons(assoclen);
2803
2804	rc = generate_b0(req, ivptr, op_type);
2805	/* zero the ctr value */
2806	memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2807	return rc;
2808}
2809
2810static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2811				  unsigned int dst_size,
2812				  struct aead_request *req,
2813				  unsigned short op_type)
2814{
2815	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2816	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 
 
 
2817	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2818	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2819	unsigned int c_id = a_ctx(tfm)->tx_chan_id;
2820	unsigned int ccm_xtra;
2821	unsigned char tag_offset = 0, auth_offset = 0;
2822	unsigned int assoclen;
2823
 
 
2824	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2825		assoclen = req->assoclen - 8;
2826	else
2827		assoclen = req->assoclen;
2828	ccm_xtra = CCM_B0_SIZE +
2829		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2830
2831	auth_offset = req->cryptlen ?
2832		(req->assoclen + IV + 1 + ccm_xtra) : 0;
2833	if (op_type == CHCR_DECRYPT_OP) {
2834		if (crypto_aead_authsize(tfm) != req->cryptlen)
2835			tag_offset = crypto_aead_authsize(tfm);
2836		else
2837			auth_offset = 0;
2838	}
2839
2840
2841	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2842					 2, 1);
2843	sec_cpl->pldlen =
2844		htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2845	/* For CCM there wil be b0 always. So AAD start will be 1 always */
2846	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2847				1 + IV,	IV + assoclen + ccm_xtra,
2848				req->assoclen + IV + 1 + ccm_xtra, 0);
2849
2850	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2851					auth_offset, tag_offset,
2852					(op_type == CHCR_ENCRYPT_OP) ? 0 :
2853					crypto_aead_authsize(tfm));
2854	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2855					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2856					cipher_mode, mac_mode,
2857					aeadctx->hmac_ctrl, IV >> 1);
2858
2859	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2860					0, dst_size);
2861}
2862
2863static int aead_ccm_validate_input(unsigned short op_type,
2864				   struct aead_request *req,
2865				   struct chcr_aead_ctx *aeadctx,
2866				   unsigned int sub_type)
2867{
2868	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2869		if (crypto_ccm_check_iv(req->iv)) {
2870			pr_err("CCM: IV check fails\n");
2871			return -EINVAL;
2872		}
2873	} else {
2874		if (req->assoclen != 16 && req->assoclen != 20) {
2875			pr_err("RFC4309: Invalid AAD length %d\n",
2876			       req->assoclen);
2877			return -EINVAL;
2878		}
2879	}
2880	return 0;
2881}
2882
2883static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2884					  unsigned short qid,
2885					  int size)
2886{
2887	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2888	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2889	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2890	struct sk_buff *skb = NULL;
2891	struct chcr_wr *chcr_req;
2892	struct cpl_rx_phys_dsgl *phys_cpl;
2893	struct ulptx_sgl *ulptx;
2894	unsigned int transhdr_len;
2895	unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
2896	unsigned int sub_type, assoclen = req->assoclen;
2897	unsigned int authsize = crypto_aead_authsize(tfm);
2898	int error = -EINVAL;
2899	u8 *ivptr;
2900	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2901		GFP_ATOMIC;
2902	struct adapter *adap = padap(a_ctx(tfm)->dev);
2903
2904	sub_type = get_aead_subtype(tfm);
2905	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2906		assoclen -= 8;
2907	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2908	error = chcr_aead_common_init(req);
2909	if (error)
2910		return ERR_PTR(error);
2911
2912	error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
2913	if (error)
2914		goto err;
2915	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
2916			+ (reqctx->op ? -authsize : authsize),
2917			CHCR_DST_SG_SIZE, 0);
2918	dnents += MIN_CCM_SG; // For IV and B0
2919	dst_size = get_space_for_phys_dsgl(dnents);
2920	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2921			       CHCR_SRC_SG_SIZE, 0);
2922	snents += MIN_CCM_SG; //For B0
2923	kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2924	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2925	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
2926		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
2927	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
2928				     reqctx->b0_len, 16) :
2929		(sgl_len(snents) *  8);
2930	transhdr_len += temp;
2931	transhdr_len = roundup(transhdr_len, 16);
2932
2933	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2934				reqctx->b0_len, transhdr_len, reqctx->op)) {
2935		atomic_inc(&adap->chcr_stats.fallback);
2936		chcr_aead_common_exit(req);
2937		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2938	}
2939	skb = alloc_skb(transhdr_len,  flags);
2940
2941	if (!skb) {
2942		error = -ENOMEM;
2943		goto err;
2944	}
2945
2946	chcr_req = __skb_put_zero(skb, transhdr_len);
2947
2948	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
2949
2950	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2951	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2952	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2953			aeadctx->key, aeadctx->enckey_len);
2954
2955	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2956	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2957	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2958	error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
2959	if (error)
2960		goto dstmap_fail;
2961	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2962	chcr_add_aead_src_ent(req, ulptx);
2963
2964	atomic_inc(&adap->chcr_stats.aead_rqst);
2965	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2966		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
2967		reqctx->b0_len) : 0);
2968	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2969		    transhdr_len, temp, 0);
2970	reqctx->skb = skb;
2971
2972	return skb;
2973dstmap_fail:
2974	kfree_skb(skb);
2975err:
2976	chcr_aead_common_exit(req);
2977	return ERR_PTR(error);
2978}
2979
2980static struct sk_buff *create_gcm_wr(struct aead_request *req,
2981				     unsigned short qid,
2982				     int size)
2983{
2984	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2985	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2986	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
 
 
2987	struct sk_buff *skb = NULL;
2988	struct chcr_wr *chcr_req;
2989	struct cpl_rx_phys_dsgl *phys_cpl;
2990	struct ulptx_sgl *ulptx;
2991	unsigned int transhdr_len, dnents = 0, snents;
2992	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
2993	unsigned int authsize = crypto_aead_authsize(tfm);
2994	int error = -EINVAL;
2995	u8 *ivptr;
2996	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2997		GFP_ATOMIC;
2998	struct adapter *adap = padap(a_ctx(tfm)->dev);
 
2999
 
3000	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3001		assoclen = req->assoclen - 8;
3002
3003	reqctx->b0_len = 0;
3004	error = chcr_aead_common_init(req);
3005	if (error)
3006		return ERR_PTR(error);
3007	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3008				(reqctx->op ? -authsize : authsize),
3009				CHCR_DST_SG_SIZE, 0);
3010	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3011			       CHCR_SRC_SG_SIZE, 0);
3012	dnents += MIN_GCM_SG; // For IV
3013	dst_size = get_space_for_phys_dsgl(dnents);
3014	kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3015	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3016	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3017			SGE_MAX_WR_LEN;
3018	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3019		(sgl_len(snents) * 8);
3020	transhdr_len += temp;
3021	transhdr_len = roundup(transhdr_len, 16);
3022	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3023			    transhdr_len, reqctx->op)) {
3024
3025		atomic_inc(&adap->chcr_stats.fallback);
3026		chcr_aead_common_exit(req);
3027		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3028	}
3029	skb = alloc_skb(transhdr_len, flags);
3030	if (!skb) {
3031		error = -ENOMEM;
3032		goto err;
3033	}
3034
3035	chcr_req = __skb_put_zero(skb, transhdr_len);
3036
3037	//Offset of tag from end
3038	temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3039	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3040					a_ctx(tfm)->tx_chan_id, 2, 1);
3041	chcr_req->sec_cpl.pldlen =
3042		htonl(req->assoclen + IV + req->cryptlen);
3043	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3044					assoclen ? 1 + IV : 0,
3045					assoclen ? IV + assoclen : 0,
3046					req->assoclen + IV + 1, 0);
3047	chcr_req->sec_cpl.cipherstop_lo_authinsert =
3048			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3049						temp, temp);
3050	chcr_req->sec_cpl.seqno_numivs =
3051			FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3052					CHCR_ENCRYPT_OP) ? 1 : 0,
3053					CHCR_SCMD_CIPHER_MODE_AES_GCM,
3054					CHCR_SCMD_AUTH_MODE_GHASH,
3055					aeadctx->hmac_ctrl, IV >> 1);
3056	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3057					0, 0, dst_size);
3058	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3059	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3060	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3061	       GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3062
3063	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3064	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3065	/* prepare a 16 byte iv */
3066	/* S   A   L  T |  IV | 0x00000001 */
3067	if (get_aead_subtype(tfm) ==
3068	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3069		memcpy(ivptr, aeadctx->salt, 4);
3070		memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3071	} else {
3072		memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3073	}
3074	*((unsigned int *)(ivptr + 12)) = htonl(0x01);
3075
3076	ulptx = (struct ulptx_sgl *)(ivptr + 16);
3077
3078	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3079	chcr_add_aead_src_ent(req, ulptx);
3080	atomic_inc(&adap->chcr_stats.aead_rqst);
3081	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3082		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3083	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3084		    transhdr_len, temp, reqctx->verify);
3085	reqctx->skb = skb;
3086	return skb;
3087
3088err:
3089	chcr_aead_common_exit(req);
3090	return ERR_PTR(error);
3091}
3092
3093
3094
3095static int chcr_aead_cra_init(struct crypto_aead *tfm)
3096{
3097	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3098	struct aead_alg *alg = crypto_aead_alg(tfm);
3099
3100	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3101					       CRYPTO_ALG_NEED_FALLBACK |
3102					       CRYPTO_ALG_ASYNC);
3103	if  (IS_ERR(aeadctx->sw_cipher))
3104		return PTR_ERR(aeadctx->sw_cipher);
3105	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3106				 sizeof(struct aead_request) +
3107				 crypto_aead_reqsize(aeadctx->sw_cipher)));
 
3108	return chcr_device_init(a_ctx(tfm));
3109}
3110
3111static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3112{
3113	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3114
3115	crypto_free_aead(aeadctx->sw_cipher);
3116}
3117
3118static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3119					unsigned int authsize)
3120{
3121	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3122
3123	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3124	aeadctx->mayverify = VERIFY_HW;
3125	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3126}
3127static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3128				    unsigned int authsize)
3129{
3130	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3131	u32 maxauth = crypto_aead_maxauthsize(tfm);
3132
3133	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3134	 * true for sha1. authsize == 12 condition should be before
3135	 * authsize == (maxauth >> 1)
3136	 */
3137	if (authsize == ICV_4) {
3138		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3139		aeadctx->mayverify = VERIFY_HW;
3140	} else if (authsize == ICV_6) {
3141		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3142		aeadctx->mayverify = VERIFY_HW;
3143	} else if (authsize == ICV_10) {
3144		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3145		aeadctx->mayverify = VERIFY_HW;
3146	} else if (authsize == ICV_12) {
3147		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3148		aeadctx->mayverify = VERIFY_HW;
3149	} else if (authsize == ICV_14) {
3150		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3151		aeadctx->mayverify = VERIFY_HW;
3152	} else if (authsize == (maxauth >> 1)) {
3153		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3154		aeadctx->mayverify = VERIFY_HW;
3155	} else if (authsize == maxauth) {
3156		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3157		aeadctx->mayverify = VERIFY_HW;
3158	} else {
3159		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3160		aeadctx->mayverify = VERIFY_SW;
3161	}
3162	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3163}
3164
3165
3166static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3167{
3168	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3169
3170	switch (authsize) {
3171	case ICV_4:
3172		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3173		aeadctx->mayverify = VERIFY_HW;
3174		break;
3175	case ICV_8:
3176		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3177		aeadctx->mayverify = VERIFY_HW;
3178		break;
3179	case ICV_12:
3180		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3181		aeadctx->mayverify = VERIFY_HW;
3182		break;
3183	case ICV_14:
3184		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3185		aeadctx->mayverify = VERIFY_HW;
3186		break;
3187	case ICV_16:
3188		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3189		aeadctx->mayverify = VERIFY_HW;
3190		break;
3191	case ICV_13:
3192	case ICV_15:
3193		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3194		aeadctx->mayverify = VERIFY_SW;
3195		break;
3196	default:
3197
3198		  crypto_tfm_set_flags((struct crypto_tfm *) tfm,
3199			CRYPTO_TFM_RES_BAD_KEY_LEN);
3200		return -EINVAL;
3201	}
3202	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3203}
3204
3205static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3206					  unsigned int authsize)
3207{
3208	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3209
3210	switch (authsize) {
3211	case ICV_8:
3212		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3213		aeadctx->mayverify = VERIFY_HW;
3214		break;
3215	case ICV_12:
3216		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3217		aeadctx->mayverify = VERIFY_HW;
3218		break;
3219	case ICV_16:
3220		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3221		aeadctx->mayverify = VERIFY_HW;
3222		break;
3223	default:
3224		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3225				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3226		return -EINVAL;
3227	}
3228	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3229}
3230
3231static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3232				unsigned int authsize)
3233{
3234	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3235
3236	switch (authsize) {
3237	case ICV_4:
3238		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3239		aeadctx->mayverify = VERIFY_HW;
3240		break;
3241	case ICV_6:
3242		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3243		aeadctx->mayverify = VERIFY_HW;
3244		break;
3245	case ICV_8:
3246		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3247		aeadctx->mayverify = VERIFY_HW;
3248		break;
3249	case ICV_10:
3250		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3251		aeadctx->mayverify = VERIFY_HW;
3252		break;
3253	case ICV_12:
3254		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3255		aeadctx->mayverify = VERIFY_HW;
3256		break;
3257	case ICV_14:
3258		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3259		aeadctx->mayverify = VERIFY_HW;
3260		break;
3261	case ICV_16:
3262		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3263		aeadctx->mayverify = VERIFY_HW;
3264		break;
3265	default:
3266		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3267				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3268		return -EINVAL;
3269	}
3270	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3271}
3272
3273static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3274				const u8 *key,
3275				unsigned int keylen)
3276{
3277	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3278	unsigned char ck_size, mk_size;
3279	int key_ctx_size = 0;
3280
3281	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3282	if (keylen == AES_KEYSIZE_128) {
3283		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3284		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3285	} else if (keylen == AES_KEYSIZE_192) {
3286		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3287		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3288	} else if (keylen == AES_KEYSIZE_256) {
3289		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3290		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3291	} else {
3292		crypto_tfm_set_flags((struct crypto_tfm *)aead,
3293				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3294		aeadctx->enckey_len = 0;
3295		return	-EINVAL;
3296	}
3297	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3298						key_ctx_size >> 4);
3299	memcpy(aeadctx->key, key, keylen);
3300	aeadctx->enckey_len = keylen;
3301
3302	return 0;
3303}
3304
3305static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3306				const u8 *key,
3307				unsigned int keylen)
3308{
3309	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3310	int error;
3311
3312	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3313	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3314			      CRYPTO_TFM_REQ_MASK);
3315	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3316	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3317	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3318			      CRYPTO_TFM_RES_MASK);
3319	if (error)
3320		return error;
3321	return chcr_ccm_common_setkey(aead, key, keylen);
3322}
3323
3324static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3325				    unsigned int keylen)
3326{
3327	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3328	int error;
3329
3330	if (keylen < 3) {
3331		crypto_tfm_set_flags((struct crypto_tfm *)aead,
3332				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3333		aeadctx->enckey_len = 0;
3334		return	-EINVAL;
3335	}
3336	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3337	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3338			      CRYPTO_TFM_REQ_MASK);
3339	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3340	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3341	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3342			      CRYPTO_TFM_RES_MASK);
3343	if (error)
3344		return error;
3345	keylen -= 3;
3346	memcpy(aeadctx->salt, key + keylen, 3);
3347	return chcr_ccm_common_setkey(aead, key, keylen);
3348}
3349
3350static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3351			   unsigned int keylen)
3352{
3353	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3354	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3355	unsigned int ck_size;
3356	int ret = 0, key_ctx_size = 0;
3357	struct crypto_aes_ctx aes;
3358
3359	aeadctx->enckey_len = 0;
3360	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3361	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3362			      & CRYPTO_TFM_REQ_MASK);
3363	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3364	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3365	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3366			      CRYPTO_TFM_RES_MASK);
3367	if (ret)
3368		goto out;
3369
3370	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3371	    keylen > 3) {
3372		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3373		memcpy(aeadctx->salt, key + keylen, 4);
3374	}
3375	if (keylen == AES_KEYSIZE_128) {
3376		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3377	} else if (keylen == AES_KEYSIZE_192) {
3378		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3379	} else if (keylen == AES_KEYSIZE_256) {
3380		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3381	} else {
3382		crypto_tfm_set_flags((struct crypto_tfm *)aead,
3383				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3384		pr_err("GCM: Invalid key length %d\n", keylen);
3385		ret = -EINVAL;
3386		goto out;
3387	}
3388
3389	memcpy(aeadctx->key, key, keylen);
3390	aeadctx->enckey_len = keylen;
3391	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3392		AEAD_H_SIZE;
3393	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3394						CHCR_KEYCTX_MAC_KEY_SIZE_128,
3395						0, 0,
3396						key_ctx_size >> 4);
3397	/* Calculate the H = CIPH(K, 0 repeated 16 times).
3398	 * It will go in key context
3399	 */
3400	ret = aes_expandkey(&aes, key, keylen);
3401	if (ret) {
3402		aeadctx->enckey_len = 0;
3403		goto out;
3404	}
3405	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3406	aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3407	memzero_explicit(&aes, sizeof(aes));
3408
3409out:
3410	return ret;
3411}
3412
3413static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3414				   unsigned int keylen)
3415{
3416	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3417	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3418	/* it contains auth and cipher key both*/
3419	struct crypto_authenc_keys keys;
3420	unsigned int bs, subtype;
3421	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3422	int err = 0, i, key_ctx_len = 0;
3423	unsigned char ck_size = 0;
3424	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3425	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3426	struct algo_param param;
3427	int align;
3428	u8 *o_ptr = NULL;
3429
3430	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3431	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3432			      & CRYPTO_TFM_REQ_MASK);
3433	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3434	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3435	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3436			      & CRYPTO_TFM_RES_MASK);
3437	if (err)
3438		goto out;
3439
3440	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3441		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3442		goto out;
3443	}
3444
3445	if (get_alg_config(&param, max_authsize)) {
3446		pr_err("chcr : Unsupported digest size\n");
3447		goto out;
3448	}
3449	subtype = get_aead_subtype(authenc);
3450	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3451		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3452		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3453			goto out;
3454		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3455		- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3456		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3457	}
3458	if (keys.enckeylen == AES_KEYSIZE_128) {
3459		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3460	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3461		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3462	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3463		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3464	} else {
3465		pr_err("chcr : Unsupported cipher key\n");
3466		goto out;
3467	}
3468
3469	/* Copy only encryption key. We use authkey to generate h(ipad) and
3470	 * h(opad) so authkey is not needed again. authkeylen size have the
3471	 * size of the hash digest size.
3472	 */
3473	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3474	aeadctx->enckey_len = keys.enckeylen;
3475	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3476		subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3477
3478		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3479			    aeadctx->enckey_len << 3);
3480	}
3481	base_hash  = chcr_alloc_shash(max_authsize);
3482	if (IS_ERR(base_hash)) {
3483		pr_err("chcr : Base driver cannot be loaded\n");
3484		aeadctx->enckey_len = 0;
3485		memzero_explicit(&keys, sizeof(keys));
3486		return -EINVAL;
3487	}
3488	{
3489		SHASH_DESC_ON_STACK(shash, base_hash);
3490
3491		shash->tfm = base_hash;
3492		bs = crypto_shash_blocksize(base_hash);
3493		align = KEYCTX_ALIGN_PAD(max_authsize);
3494		o_ptr =  actx->h_iopad + param.result_size + align;
3495
3496		if (keys.authkeylen > bs) {
3497			err = crypto_shash_digest(shash, keys.authkey,
3498						  keys.authkeylen,
3499						  o_ptr);
3500			if (err) {
3501				pr_err("chcr : Base driver cannot be loaded\n");
3502				goto out;
3503			}
3504			keys.authkeylen = max_authsize;
3505		} else
3506			memcpy(o_ptr, keys.authkey, keys.authkeylen);
3507
3508		/* Compute the ipad-digest*/
3509		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3510		memcpy(pad, o_ptr, keys.authkeylen);
3511		for (i = 0; i < bs >> 2; i++)
3512			*((unsigned int *)pad + i) ^= IPAD_DATA;
3513
3514		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3515					      max_authsize))
3516			goto out;
3517		/* Compute the opad-digest */
3518		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3519		memcpy(pad, o_ptr, keys.authkeylen);
3520		for (i = 0; i < bs >> 2; i++)
3521			*((unsigned int *)pad + i) ^= OPAD_DATA;
3522
3523		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3524			goto out;
3525
3526		/* convert the ipad and opad digest to network order */
3527		chcr_change_order(actx->h_iopad, param.result_size);
3528		chcr_change_order(o_ptr, param.result_size);
3529		key_ctx_len = sizeof(struct _key_ctx) +
3530			roundup(keys.enckeylen, 16) +
3531			(param.result_size + align) * 2;
3532		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3533						0, 1, key_ctx_len >> 4);
3534		actx->auth_mode = param.auth_mode;
3535		chcr_free_shash(base_hash);
3536
3537		memzero_explicit(&keys, sizeof(keys));
3538		return 0;
3539	}
3540out:
3541	aeadctx->enckey_len = 0;
3542	memzero_explicit(&keys, sizeof(keys));
3543	if (!IS_ERR(base_hash))
3544		chcr_free_shash(base_hash);
3545	return -EINVAL;
3546}
3547
3548static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3549					const u8 *key, unsigned int keylen)
3550{
3551	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3552	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3553	struct crypto_authenc_keys keys;
3554	int err;
3555	/* it contains auth and cipher key both*/
3556	unsigned int subtype;
3557	int key_ctx_len = 0;
3558	unsigned char ck_size = 0;
3559
3560	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3561	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3562			      & CRYPTO_TFM_REQ_MASK);
3563	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3564	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3565	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3566			      & CRYPTO_TFM_RES_MASK);
3567	if (err)
3568		goto out;
3569
3570	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3571		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3572		goto out;
3573	}
3574	subtype = get_aead_subtype(authenc);
3575	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3576	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3577		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3578			goto out;
3579		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3580			- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3581		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3582	}
3583	if (keys.enckeylen == AES_KEYSIZE_128) {
3584		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3585	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3586		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3587	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3588		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3589	} else {
3590		pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3591		goto out;
3592	}
3593	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3594	aeadctx->enckey_len = keys.enckeylen;
3595	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3596	    subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3597		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3598				aeadctx->enckey_len << 3);
3599	}
3600	key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3601
3602	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3603						0, key_ctx_len >> 4);
3604	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3605	memzero_explicit(&keys, sizeof(keys));
3606	return 0;
3607out:
3608	aeadctx->enckey_len = 0;
3609	memzero_explicit(&keys, sizeof(keys));
3610	return -EINVAL;
3611}
3612
3613static int chcr_aead_op(struct aead_request *req,
3614			int size,
3615			create_wr_t create_wr_fn)
3616{
3617	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3618	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3619	struct uld_ctx *u_ctx;
 
3620	struct sk_buff *skb;
3621	int isfull = 0;
3622	struct chcr_dev *cdev;
3623
3624	cdev = a_ctx(tfm)->dev;
3625	if (!cdev) {
3626		pr_err("chcr : %s : No crypto device.\n", __func__);
3627		return -ENXIO;
3628	}
3629
3630	if (chcr_inc_wrcount(cdev)) {
3631	/* Detach state for CHCR means lldi or padap is freed.
3632	 * We cannot increment fallback here.
3633	 */
3634		return chcr_aead_fallback(req, reqctx->op);
3635	}
3636
3637	u_ctx = ULD_CTX(a_ctx(tfm));
3638	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3639				   a_ctx(tfm)->tx_qidx)) {
3640		isfull = 1;
3641		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
3642			chcr_dec_wrcount(cdev);
3643			return -ENOSPC;
3644		}
 
 
 
 
 
 
3645	}
3646
3647	/* Form a WR from req */
3648	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
3649
3650	if (IS_ERR_OR_NULL(skb)) {
3651		chcr_dec_wrcount(cdev);
3652		return PTR_ERR_OR_ZERO(skb);
3653	}
3654
3655	skb->dev = u_ctx->lldi.ports[0];
3656	set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3657	chcr_send_wr(skb);
3658	return isfull ? -EBUSY : -EINPROGRESS;
3659}
3660
3661static int chcr_aead_encrypt(struct aead_request *req)
3662{
3663	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3664	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
 
 
 
 
 
 
 
3665
3666	reqctx->verify = VERIFY_HW;
3667	reqctx->op = CHCR_ENCRYPT_OP;
3668
3669	switch (get_aead_subtype(tfm)) {
3670	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3671	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3672	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3673	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3674		return chcr_aead_op(req, 0, create_authenc_wr);
3675	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3676	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3677		return chcr_aead_op(req, 0, create_aead_ccm_wr);
3678	default:
3679		return chcr_aead_op(req, 0, create_gcm_wr);
3680	}
3681}
3682
3683static int chcr_aead_decrypt(struct aead_request *req)
3684{
3685	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3686	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3687	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
 
3688	int size;
 
 
 
 
 
 
3689
3690	if (aeadctx->mayverify == VERIFY_SW) {
3691		size = crypto_aead_maxauthsize(tfm);
3692		reqctx->verify = VERIFY_SW;
3693	} else {
3694		size = 0;
3695		reqctx->verify = VERIFY_HW;
3696	}
3697	reqctx->op = CHCR_DECRYPT_OP;
3698	switch (get_aead_subtype(tfm)) {
3699	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3700	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3701	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3702	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3703		return chcr_aead_op(req, size, create_authenc_wr);
3704	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3705	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3706		return chcr_aead_op(req, size, create_aead_ccm_wr);
3707	default:
3708		return chcr_aead_op(req, size, create_gcm_wr);
3709	}
3710}
3711
3712static struct chcr_alg_template driver_algs[] = {
3713	/* AES-CBC */
3714	{
3715		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3716		.is_registered = 0,
3717		.alg.crypto = {
3718			.cra_name		= "cbc(aes)",
3719			.cra_driver_name	= "cbc-aes-chcr",
3720			.cra_blocksize		= AES_BLOCK_SIZE,
3721			.cra_init		= chcr_cra_init,
3722			.cra_exit		= chcr_cra_exit,
3723			.cra_u.ablkcipher	= {
3724				.min_keysize	= AES_MIN_KEY_SIZE,
3725				.max_keysize	= AES_MAX_KEY_SIZE,
3726				.ivsize		= AES_BLOCK_SIZE,
3727				.setkey			= chcr_aes_cbc_setkey,
3728				.encrypt		= chcr_aes_encrypt,
3729				.decrypt		= chcr_aes_decrypt,
3730			}
3731		}
3732	},
3733	{
3734		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3735		.is_registered = 0,
3736		.alg.crypto =   {
3737			.cra_name		= "xts(aes)",
3738			.cra_driver_name	= "xts-aes-chcr",
3739			.cra_blocksize		= AES_BLOCK_SIZE,
3740			.cra_init		= chcr_cra_init,
3741			.cra_exit		= NULL,
3742			.cra_u .ablkcipher = {
3743					.min_keysize	= 2 * AES_MIN_KEY_SIZE,
3744					.max_keysize	= 2 * AES_MAX_KEY_SIZE,
3745					.ivsize		= AES_BLOCK_SIZE,
3746					.setkey		= chcr_aes_xts_setkey,
3747					.encrypt	= chcr_aes_encrypt,
3748					.decrypt	= chcr_aes_decrypt,
3749				}
3750			}
3751	},
3752	{
3753		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3754		.is_registered = 0,
3755		.alg.crypto = {
3756			.cra_name		= "ctr(aes)",
3757			.cra_driver_name	= "ctr-aes-chcr",
3758			.cra_blocksize		= 1,
3759			.cra_init		= chcr_cra_init,
3760			.cra_exit		= chcr_cra_exit,
3761			.cra_u.ablkcipher	= {
3762				.min_keysize	= AES_MIN_KEY_SIZE,
3763				.max_keysize	= AES_MAX_KEY_SIZE,
3764				.ivsize		= AES_BLOCK_SIZE,
3765				.setkey		= chcr_aes_ctr_setkey,
3766				.encrypt	= chcr_aes_encrypt,
3767				.decrypt	= chcr_aes_decrypt,
3768			}
3769		}
3770	},
3771	{
3772		.type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3773			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3774		.is_registered = 0,
3775		.alg.crypto = {
3776			.cra_name		= "rfc3686(ctr(aes))",
3777			.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3778			.cra_blocksize		= 1,
3779			.cra_init		= chcr_rfc3686_init,
3780			.cra_exit		= chcr_cra_exit,
3781			.cra_u.ablkcipher	= {
3782				.min_keysize	= AES_MIN_KEY_SIZE +
3783					CTR_RFC3686_NONCE_SIZE,
3784				.max_keysize	= AES_MAX_KEY_SIZE +
3785					CTR_RFC3686_NONCE_SIZE,
3786				.ivsize		= CTR_RFC3686_IV_SIZE,
3787				.setkey		= chcr_aes_rfc3686_setkey,
3788				.encrypt	= chcr_aes_encrypt,
3789				.decrypt	= chcr_aes_decrypt,
3790			}
3791		}
3792	},
3793	/* SHA */
3794	{
3795		.type = CRYPTO_ALG_TYPE_AHASH,
3796		.is_registered = 0,
3797		.alg.hash = {
3798			.halg.digestsize = SHA1_DIGEST_SIZE,
3799			.halg.base = {
3800				.cra_name = "sha1",
3801				.cra_driver_name = "sha1-chcr",
3802				.cra_blocksize = SHA1_BLOCK_SIZE,
3803			}
3804		}
3805	},
3806	{
3807		.type = CRYPTO_ALG_TYPE_AHASH,
3808		.is_registered = 0,
3809		.alg.hash = {
3810			.halg.digestsize = SHA256_DIGEST_SIZE,
3811			.halg.base = {
3812				.cra_name = "sha256",
3813				.cra_driver_name = "sha256-chcr",
3814				.cra_blocksize = SHA256_BLOCK_SIZE,
3815			}
3816		}
3817	},
3818	{
3819		.type = CRYPTO_ALG_TYPE_AHASH,
3820		.is_registered = 0,
3821		.alg.hash = {
3822			.halg.digestsize = SHA224_DIGEST_SIZE,
3823			.halg.base = {
3824				.cra_name = "sha224",
3825				.cra_driver_name = "sha224-chcr",
3826				.cra_blocksize = SHA224_BLOCK_SIZE,
3827			}
3828		}
3829	},
3830	{
3831		.type = CRYPTO_ALG_TYPE_AHASH,
3832		.is_registered = 0,
3833		.alg.hash = {
3834			.halg.digestsize = SHA384_DIGEST_SIZE,
3835			.halg.base = {
3836				.cra_name = "sha384",
3837				.cra_driver_name = "sha384-chcr",
3838				.cra_blocksize = SHA384_BLOCK_SIZE,
3839			}
3840		}
3841	},
3842	{
3843		.type = CRYPTO_ALG_TYPE_AHASH,
3844		.is_registered = 0,
3845		.alg.hash = {
3846			.halg.digestsize = SHA512_DIGEST_SIZE,
3847			.halg.base = {
3848				.cra_name = "sha512",
3849				.cra_driver_name = "sha512-chcr",
3850				.cra_blocksize = SHA512_BLOCK_SIZE,
3851			}
3852		}
3853	},
3854	/* HMAC */
3855	{
3856		.type = CRYPTO_ALG_TYPE_HMAC,
3857		.is_registered = 0,
3858		.alg.hash = {
3859			.halg.digestsize = SHA1_DIGEST_SIZE,
3860			.halg.base = {
3861				.cra_name = "hmac(sha1)",
3862				.cra_driver_name = "hmac-sha1-chcr",
3863				.cra_blocksize = SHA1_BLOCK_SIZE,
3864			}
3865		}
3866	},
3867	{
3868		.type = CRYPTO_ALG_TYPE_HMAC,
3869		.is_registered = 0,
3870		.alg.hash = {
3871			.halg.digestsize = SHA224_DIGEST_SIZE,
3872			.halg.base = {
3873				.cra_name = "hmac(sha224)",
3874				.cra_driver_name = "hmac-sha224-chcr",
3875				.cra_blocksize = SHA224_BLOCK_SIZE,
3876			}
3877		}
3878	},
3879	{
3880		.type = CRYPTO_ALG_TYPE_HMAC,
3881		.is_registered = 0,
3882		.alg.hash = {
3883			.halg.digestsize = SHA256_DIGEST_SIZE,
3884			.halg.base = {
3885				.cra_name = "hmac(sha256)",
3886				.cra_driver_name = "hmac-sha256-chcr",
3887				.cra_blocksize = SHA256_BLOCK_SIZE,
3888			}
3889		}
3890	},
3891	{
3892		.type = CRYPTO_ALG_TYPE_HMAC,
3893		.is_registered = 0,
3894		.alg.hash = {
3895			.halg.digestsize = SHA384_DIGEST_SIZE,
3896			.halg.base = {
3897				.cra_name = "hmac(sha384)",
3898				.cra_driver_name = "hmac-sha384-chcr",
3899				.cra_blocksize = SHA384_BLOCK_SIZE,
3900			}
3901		}
3902	},
3903	{
3904		.type = CRYPTO_ALG_TYPE_HMAC,
3905		.is_registered = 0,
3906		.alg.hash = {
3907			.halg.digestsize = SHA512_DIGEST_SIZE,
3908			.halg.base = {
3909				.cra_name = "hmac(sha512)",
3910				.cra_driver_name = "hmac-sha512-chcr",
3911				.cra_blocksize = SHA512_BLOCK_SIZE,
3912			}
3913		}
3914	},
3915	/* Add AEAD Algorithms */
3916	{
3917		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3918		.is_registered = 0,
3919		.alg.aead = {
3920			.base = {
3921				.cra_name = "gcm(aes)",
3922				.cra_driver_name = "gcm-aes-chcr",
3923				.cra_blocksize	= 1,
3924				.cra_priority = CHCR_AEAD_PRIORITY,
3925				.cra_ctxsize =	sizeof(struct chcr_context) +
3926						sizeof(struct chcr_aead_ctx) +
3927						sizeof(struct chcr_gcm_ctx),
3928			},
3929			.ivsize = GCM_AES_IV_SIZE,
3930			.maxauthsize = GHASH_DIGEST_SIZE,
3931			.setkey = chcr_gcm_setkey,
3932			.setauthsize = chcr_gcm_setauthsize,
3933		}
3934	},
3935	{
3936		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3937		.is_registered = 0,
3938		.alg.aead = {
3939			.base = {
3940				.cra_name = "rfc4106(gcm(aes))",
3941				.cra_driver_name = "rfc4106-gcm-aes-chcr",
3942				.cra_blocksize	 = 1,
3943				.cra_priority = CHCR_AEAD_PRIORITY + 1,
3944				.cra_ctxsize =	sizeof(struct chcr_context) +
3945						sizeof(struct chcr_aead_ctx) +
3946						sizeof(struct chcr_gcm_ctx),
3947
3948			},
3949			.ivsize = GCM_RFC4106_IV_SIZE,
3950			.maxauthsize	= GHASH_DIGEST_SIZE,
3951			.setkey = chcr_gcm_setkey,
3952			.setauthsize	= chcr_4106_4309_setauthsize,
3953		}
3954	},
3955	{
3956		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3957		.is_registered = 0,
3958		.alg.aead = {
3959			.base = {
3960				.cra_name = "ccm(aes)",
3961				.cra_driver_name = "ccm-aes-chcr",
3962				.cra_blocksize	 = 1,
3963				.cra_priority = CHCR_AEAD_PRIORITY,
3964				.cra_ctxsize =	sizeof(struct chcr_context) +
3965						sizeof(struct chcr_aead_ctx),
3966
3967			},
3968			.ivsize = AES_BLOCK_SIZE,
3969			.maxauthsize	= GHASH_DIGEST_SIZE,
3970			.setkey = chcr_aead_ccm_setkey,
3971			.setauthsize	= chcr_ccm_setauthsize,
3972		}
3973	},
3974	{
3975		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3976		.is_registered = 0,
3977		.alg.aead = {
3978			.base = {
3979				.cra_name = "rfc4309(ccm(aes))",
3980				.cra_driver_name = "rfc4309-ccm-aes-chcr",
3981				.cra_blocksize	 = 1,
3982				.cra_priority = CHCR_AEAD_PRIORITY + 1,
3983				.cra_ctxsize =	sizeof(struct chcr_context) +
3984						sizeof(struct chcr_aead_ctx),
3985
3986			},
3987			.ivsize = 8,
3988			.maxauthsize	= GHASH_DIGEST_SIZE,
3989			.setkey = chcr_aead_rfc4309_setkey,
3990			.setauthsize = chcr_4106_4309_setauthsize,
3991		}
3992	},
3993	{
3994		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3995		.is_registered = 0,
3996		.alg.aead = {
3997			.base = {
3998				.cra_name = "authenc(hmac(sha1),cbc(aes))",
3999				.cra_driver_name =
4000					"authenc-hmac-sha1-cbc-aes-chcr",
4001				.cra_blocksize	 = AES_BLOCK_SIZE,
4002				.cra_priority = CHCR_AEAD_PRIORITY,
4003				.cra_ctxsize =	sizeof(struct chcr_context) +
4004						sizeof(struct chcr_aead_ctx) +
4005						sizeof(struct chcr_authenc_ctx),
4006
4007			},
4008			.ivsize = AES_BLOCK_SIZE,
4009			.maxauthsize = SHA1_DIGEST_SIZE,
4010			.setkey = chcr_authenc_setkey,
4011			.setauthsize = chcr_authenc_setauthsize,
4012		}
4013	},
4014	{
4015		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4016		.is_registered = 0,
4017		.alg.aead = {
4018			.base = {
4019
4020				.cra_name = "authenc(hmac(sha256),cbc(aes))",
4021				.cra_driver_name =
4022					"authenc-hmac-sha256-cbc-aes-chcr",
4023				.cra_blocksize	 = AES_BLOCK_SIZE,
4024				.cra_priority = CHCR_AEAD_PRIORITY,
4025				.cra_ctxsize =	sizeof(struct chcr_context) +
4026						sizeof(struct chcr_aead_ctx) +
4027						sizeof(struct chcr_authenc_ctx),
4028
4029			},
4030			.ivsize = AES_BLOCK_SIZE,
4031			.maxauthsize	= SHA256_DIGEST_SIZE,
4032			.setkey = chcr_authenc_setkey,
4033			.setauthsize = chcr_authenc_setauthsize,
4034		}
4035	},
4036	{
4037		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4038		.is_registered = 0,
4039		.alg.aead = {
4040			.base = {
4041				.cra_name = "authenc(hmac(sha224),cbc(aes))",
4042				.cra_driver_name =
4043					"authenc-hmac-sha224-cbc-aes-chcr",
4044				.cra_blocksize	 = AES_BLOCK_SIZE,
4045				.cra_priority = CHCR_AEAD_PRIORITY,
4046				.cra_ctxsize =	sizeof(struct chcr_context) +
4047						sizeof(struct chcr_aead_ctx) +
4048						sizeof(struct chcr_authenc_ctx),
4049			},
4050			.ivsize = AES_BLOCK_SIZE,
4051			.maxauthsize = SHA224_DIGEST_SIZE,
4052			.setkey = chcr_authenc_setkey,
4053			.setauthsize = chcr_authenc_setauthsize,
4054		}
4055	},
4056	{
4057		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4058		.is_registered = 0,
4059		.alg.aead = {
4060			.base = {
4061				.cra_name = "authenc(hmac(sha384),cbc(aes))",
4062				.cra_driver_name =
4063					"authenc-hmac-sha384-cbc-aes-chcr",
4064				.cra_blocksize	 = AES_BLOCK_SIZE,
4065				.cra_priority = CHCR_AEAD_PRIORITY,
4066				.cra_ctxsize =	sizeof(struct chcr_context) +
4067						sizeof(struct chcr_aead_ctx) +
4068						sizeof(struct chcr_authenc_ctx),
4069
4070			},
4071			.ivsize = AES_BLOCK_SIZE,
4072			.maxauthsize = SHA384_DIGEST_SIZE,
4073			.setkey = chcr_authenc_setkey,
4074			.setauthsize = chcr_authenc_setauthsize,
4075		}
4076	},
4077	{
4078		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4079		.is_registered = 0,
4080		.alg.aead = {
4081			.base = {
4082				.cra_name = "authenc(hmac(sha512),cbc(aes))",
4083				.cra_driver_name =
4084					"authenc-hmac-sha512-cbc-aes-chcr",
4085				.cra_blocksize	 = AES_BLOCK_SIZE,
4086				.cra_priority = CHCR_AEAD_PRIORITY,
4087				.cra_ctxsize =	sizeof(struct chcr_context) +
4088						sizeof(struct chcr_aead_ctx) +
4089						sizeof(struct chcr_authenc_ctx),
4090
4091			},
4092			.ivsize = AES_BLOCK_SIZE,
4093			.maxauthsize = SHA512_DIGEST_SIZE,
4094			.setkey = chcr_authenc_setkey,
4095			.setauthsize = chcr_authenc_setauthsize,
4096		}
4097	},
4098	{
4099		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4100		.is_registered = 0,
4101		.alg.aead = {
4102			.base = {
4103				.cra_name = "authenc(digest_null,cbc(aes))",
4104				.cra_driver_name =
4105					"authenc-digest_null-cbc-aes-chcr",
4106				.cra_blocksize	 = AES_BLOCK_SIZE,
4107				.cra_priority = CHCR_AEAD_PRIORITY,
4108				.cra_ctxsize =	sizeof(struct chcr_context) +
4109						sizeof(struct chcr_aead_ctx) +
4110						sizeof(struct chcr_authenc_ctx),
4111
4112			},
4113			.ivsize  = AES_BLOCK_SIZE,
4114			.maxauthsize = 0,
4115			.setkey  = chcr_aead_digest_null_setkey,
4116			.setauthsize = chcr_authenc_null_setauthsize,
4117		}
4118	},
4119	{
4120		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4121		.is_registered = 0,
4122		.alg.aead = {
4123			.base = {
4124				.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4125				.cra_driver_name =
4126				"authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4127				.cra_blocksize	 = 1,
4128				.cra_priority = CHCR_AEAD_PRIORITY,
4129				.cra_ctxsize =	sizeof(struct chcr_context) +
4130						sizeof(struct chcr_aead_ctx) +
4131						sizeof(struct chcr_authenc_ctx),
4132
4133			},
4134			.ivsize = CTR_RFC3686_IV_SIZE,
4135			.maxauthsize = SHA1_DIGEST_SIZE,
4136			.setkey = chcr_authenc_setkey,
4137			.setauthsize = chcr_authenc_setauthsize,
4138		}
4139	},
4140	{
4141		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4142		.is_registered = 0,
4143		.alg.aead = {
4144			.base = {
4145
4146				.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4147				.cra_driver_name =
4148				"authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4149				.cra_blocksize	 = 1,
4150				.cra_priority = CHCR_AEAD_PRIORITY,
4151				.cra_ctxsize =	sizeof(struct chcr_context) +
4152						sizeof(struct chcr_aead_ctx) +
4153						sizeof(struct chcr_authenc_ctx),
4154
4155			},
4156			.ivsize = CTR_RFC3686_IV_SIZE,
4157			.maxauthsize	= SHA256_DIGEST_SIZE,
4158			.setkey = chcr_authenc_setkey,
4159			.setauthsize = chcr_authenc_setauthsize,
4160		}
4161	},
4162	{
4163		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4164		.is_registered = 0,
4165		.alg.aead = {
4166			.base = {
4167				.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4168				.cra_driver_name =
4169				"authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4170				.cra_blocksize	 = 1,
4171				.cra_priority = CHCR_AEAD_PRIORITY,
4172				.cra_ctxsize =	sizeof(struct chcr_context) +
4173						sizeof(struct chcr_aead_ctx) +
4174						sizeof(struct chcr_authenc_ctx),
4175			},
4176			.ivsize = CTR_RFC3686_IV_SIZE,
4177			.maxauthsize = SHA224_DIGEST_SIZE,
4178			.setkey = chcr_authenc_setkey,
4179			.setauthsize = chcr_authenc_setauthsize,
4180		}
4181	},
4182	{
4183		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4184		.is_registered = 0,
4185		.alg.aead = {
4186			.base = {
4187				.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4188				.cra_driver_name =
4189				"authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4190				.cra_blocksize	 = 1,
4191				.cra_priority = CHCR_AEAD_PRIORITY,
4192				.cra_ctxsize =	sizeof(struct chcr_context) +
4193						sizeof(struct chcr_aead_ctx) +
4194						sizeof(struct chcr_authenc_ctx),
4195
4196			},
4197			.ivsize = CTR_RFC3686_IV_SIZE,
4198			.maxauthsize = SHA384_DIGEST_SIZE,
4199			.setkey = chcr_authenc_setkey,
4200			.setauthsize = chcr_authenc_setauthsize,
4201		}
4202	},
4203	{
4204		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4205		.is_registered = 0,
4206		.alg.aead = {
4207			.base = {
4208				.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4209				.cra_driver_name =
4210				"authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4211				.cra_blocksize	 = 1,
4212				.cra_priority = CHCR_AEAD_PRIORITY,
4213				.cra_ctxsize =	sizeof(struct chcr_context) +
4214						sizeof(struct chcr_aead_ctx) +
4215						sizeof(struct chcr_authenc_ctx),
4216
4217			},
4218			.ivsize = CTR_RFC3686_IV_SIZE,
4219			.maxauthsize = SHA512_DIGEST_SIZE,
4220			.setkey = chcr_authenc_setkey,
4221			.setauthsize = chcr_authenc_setauthsize,
4222		}
4223	},
4224	{
4225		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4226		.is_registered = 0,
4227		.alg.aead = {
4228			.base = {
4229				.cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4230				.cra_driver_name =
4231				"authenc-digest_null-rfc3686-ctr-aes-chcr",
4232				.cra_blocksize	 = 1,
4233				.cra_priority = CHCR_AEAD_PRIORITY,
4234				.cra_ctxsize =	sizeof(struct chcr_context) +
4235						sizeof(struct chcr_aead_ctx) +
4236						sizeof(struct chcr_authenc_ctx),
4237
4238			},
4239			.ivsize  = CTR_RFC3686_IV_SIZE,
4240			.maxauthsize = 0,
4241			.setkey  = chcr_aead_digest_null_setkey,
4242			.setauthsize = chcr_authenc_null_setauthsize,
4243		}
4244	},
4245};
4246
4247/*
4248 *	chcr_unregister_alg - Deregister crypto algorithms with
4249 *	kernel framework.
4250 */
4251static int chcr_unregister_alg(void)
4252{
4253	int i;
4254
4255	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4256		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4257		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4258			if (driver_algs[i].is_registered)
4259				crypto_unregister_alg(
4260						&driver_algs[i].alg.crypto);
 
 
 
 
4261			break;
4262		case CRYPTO_ALG_TYPE_AEAD:
4263			if (driver_algs[i].is_registered)
 
4264				crypto_unregister_aead(
4265						&driver_algs[i].alg.aead);
 
 
4266			break;
4267		case CRYPTO_ALG_TYPE_AHASH:
4268			if (driver_algs[i].is_registered)
 
 
4269				crypto_unregister_ahash(
4270						&driver_algs[i].alg.hash);
 
 
4271			break;
4272		}
4273		driver_algs[i].is_registered = 0;
4274	}
4275	return 0;
4276}
4277
4278#define SZ_AHASH_CTX sizeof(struct chcr_context)
4279#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4280#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4281
4282/*
4283 *	chcr_register_alg - Register crypto algorithms with kernel framework.
4284 */
4285static int chcr_register_alg(void)
4286{
4287	struct crypto_alg ai;
4288	struct ahash_alg *a_hash;
4289	int err = 0, i;
4290	char *name = NULL;
4291
4292	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4293		if (driver_algs[i].is_registered)
4294			continue;
4295		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4296		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4297			driver_algs[i].alg.crypto.cra_priority =
4298				CHCR_CRA_PRIORITY;
4299			driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
4300			driver_algs[i].alg.crypto.cra_flags =
4301				CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
 
4302				CRYPTO_ALG_NEED_FALLBACK;
4303			driver_algs[i].alg.crypto.cra_ctxsize =
4304				sizeof(struct chcr_context) +
4305				sizeof(struct ablk_ctx);
4306			driver_algs[i].alg.crypto.cra_alignmask = 0;
4307			driver_algs[i].alg.crypto.cra_type =
4308				&crypto_ablkcipher_type;
4309			err = crypto_register_alg(&driver_algs[i].alg.crypto);
4310			name = driver_algs[i].alg.crypto.cra_driver_name;
4311			break;
4312		case CRYPTO_ALG_TYPE_AEAD:
4313			driver_algs[i].alg.aead.base.cra_flags =
4314				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
 
4315			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4316			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4317			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4318			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4319			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4320			err = crypto_register_aead(&driver_algs[i].alg.aead);
4321			name = driver_algs[i].alg.aead.base.cra_driver_name;
4322			break;
4323		case CRYPTO_ALG_TYPE_AHASH:
4324			a_hash = &driver_algs[i].alg.hash;
4325			a_hash->update = chcr_ahash_update;
4326			a_hash->final = chcr_ahash_final;
4327			a_hash->finup = chcr_ahash_finup;
4328			a_hash->digest = chcr_ahash_digest;
4329			a_hash->export = chcr_ahash_export;
4330			a_hash->import = chcr_ahash_import;
4331			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4332			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4333			a_hash->halg.base.cra_module = THIS_MODULE;
4334			a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
 
4335			a_hash->halg.base.cra_alignmask = 0;
4336			a_hash->halg.base.cra_exit = NULL;
4337
4338			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4339				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4340				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4341				a_hash->init = chcr_hmac_init;
4342				a_hash->setkey = chcr_ahash_setkey;
4343				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4344			} else {
4345				a_hash->init = chcr_sha_init;
4346				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4347				a_hash->halg.base.cra_init = chcr_sha_cra_init;
4348			}
4349			err = crypto_register_ahash(&driver_algs[i].alg.hash);
4350			ai = driver_algs[i].alg.hash.halg.base;
4351			name = ai.cra_driver_name;
4352			break;
4353		}
4354		if (err) {
4355			pr_err("chcr : %s : Algorithm registration failed\n",
4356			       name);
4357			goto register_err;
4358		} else {
4359			driver_algs[i].is_registered = 1;
4360		}
4361	}
4362	return 0;
4363
4364register_err:
4365	chcr_unregister_alg();
4366	return err;
4367}
4368
4369/*
4370 *	start_crypto - Register the crypto algorithms.
4371 *	This should called once when the first device comesup. After this
4372 *	kernel will start calling driver APIs for crypto operations.
4373 */
4374int start_crypto(void)
4375{
4376	return chcr_register_alg();
4377}
4378
4379/*
4380 *	stop_crypto - Deregister all the crypto algorithms with kernel.
4381 *	This should be called once when the last device goes down. After this
4382 *	kernel will not call the driver API for crypto operations.
4383 */
4384int stop_crypto(void)
4385{
4386	chcr_unregister_alg();
4387	return 0;
4388}