Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2 * This file is part of the Chelsio T6 Crypto driver for Linux.
   3 *
   4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 *
  34 * Written and Maintained by:
  35 *	Manoj Malviya (manojmalviya@chelsio.com)
  36 *	Atul Gupta (atul.gupta@chelsio.com)
  37 *	Jitendra Lulla (jlulla@chelsio.com)
  38 *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  39 *	Harsh Jain (harsh@chelsio.com)
  40 */
  41
  42#define pr_fmt(fmt) "chcr:" fmt
  43
  44#include <linux/kernel.h>
  45#include <linux/module.h>
  46#include <linux/crypto.h>
 
  47#include <linux/skbuff.h>
  48#include <linux/rtnetlink.h>
  49#include <linux/highmem.h>
  50#include <linux/scatterlist.h>
  51
  52#include <crypto/aes.h>
  53#include <crypto/algapi.h>
  54#include <crypto/hash.h>
  55#include <crypto/gcm.h>
  56#include <crypto/sha1.h>
  57#include <crypto/sha2.h>
  58#include <crypto/authenc.h>
  59#include <crypto/ctr.h>
  60#include <crypto/gf128mul.h>
  61#include <crypto/internal/aead.h>
  62#include <crypto/null.h>
  63#include <crypto/internal/skcipher.h>
  64#include <crypto/aead.h>
  65#include <crypto/scatterwalk.h>
  66#include <crypto/internal/hash.h>
  67
  68#include "t4fw_api.h"
  69#include "t4_msg.h"
  70#include "chcr_core.h"
  71#include "chcr_algo.h"
  72#include "chcr_crypto.h"
  73
  74#define IV AES_BLOCK_SIZE
  75
  76static unsigned int sgl_ent_len[] = {
  77	0, 0, 16, 24, 40, 48, 64, 72, 88,
  78	96, 112, 120, 136, 144, 160, 168, 184,
  79	192, 208, 216, 232, 240, 256, 264, 280,
  80	288, 304, 312, 328, 336, 352, 360, 376
  81};
  82
  83static unsigned int dsgl_ent_len[] = {
  84	0, 32, 32, 48, 48, 64, 64, 80, 80,
  85	112, 112, 128, 128, 144, 144, 160, 160,
  86	192, 192, 208, 208, 224, 224, 240, 240,
  87	272, 272, 288, 288, 304, 304, 320, 320
  88};
  89
  90static u32 round_constant[11] = {
  91	0x01000000, 0x02000000, 0x04000000, 0x08000000,
  92	0x10000000, 0x20000000, 0x40000000, 0x80000000,
  93	0x1B000000, 0x36000000, 0x6C000000
  94};
  95
  96static int chcr_handle_cipher_resp(struct skcipher_request *req,
  97				   unsigned char *input, int err);
  98
  99static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
 100{
 101	return &ctx->crypto_ctx->aeadctx;
 102}
 103
 104static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
 105{
 106	return &ctx->crypto_ctx->ablkctx;
 107}
 108
 109static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
 110{
 111	return &ctx->crypto_ctx->hmacctx;
 112}
 113
 114static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
 115{
 116	return gctx->ctx->gcm;
 117}
 118
 119static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
 120{
 121	return gctx->ctx->authenc;
 122}
 123
 124static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
 125{
 126	return container_of(ctx->dev, struct uld_ctx, dev);
 127}
 128
 129static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
 130{
 131	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
 132}
 133
 134static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
 135			 unsigned int entlen,
 136			 unsigned int skip)
 137{
 138	int nents = 0;
 139	unsigned int less;
 140	unsigned int skip_len = 0;
 141
 142	while (sg && skip) {
 143		if (sg_dma_len(sg) <= skip) {
 144			skip -= sg_dma_len(sg);
 145			skip_len = 0;
 146			sg = sg_next(sg);
 147		} else {
 148			skip_len = skip;
 149			skip = 0;
 150		}
 151	}
 152
 153	while (sg && reqlen) {
 154		less = min(reqlen, sg_dma_len(sg) - skip_len);
 155		nents += DIV_ROUND_UP(less, entlen);
 156		reqlen -= less;
 157		skip_len = 0;
 158		sg = sg_next(sg);
 159	}
 160	return nents;
 161}
 162
 163static inline int get_aead_subtype(struct crypto_aead *aead)
 
 
 
 
 
 
 164{
 165	struct aead_alg *alg = crypto_aead_alg(aead);
 166	struct chcr_alg_template *chcr_crypto_alg =
 167		container_of(alg, struct chcr_alg_template, alg.aead);
 168	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 169}
 170
 171void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
 172{
 173	u8 temp[SHA512_DIGEST_SIZE];
 174	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 175	int authsize = crypto_aead_authsize(tfm);
 176	struct cpl_fw6_pld *fw6_pld;
 177	int cmp = 0;
 178
 179	fw6_pld = (struct cpl_fw6_pld *)input;
 180	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
 181	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
 182		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
 183	} else {
 184
 185		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
 186				authsize, req->assoclen +
 187				req->cryptlen - authsize);
 188		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
 189	}
 190	if (cmp)
 191		*err = -EBADMSG;
 192	else
 193		*err = 0;
 194}
 195
 196static int chcr_inc_wrcount(struct chcr_dev *dev)
 
 
 
 
 
 197{
 198	if (dev->state == CHCR_DETACH)
 199		return 1;
 200	atomic_inc(&dev->inflight);
 201	return 0;
 202}
 
 203
 204static inline void chcr_dec_wrcount(struct chcr_dev *dev)
 205{
 206	atomic_dec(&dev->inflight);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 207}
 208
 209static inline int chcr_handle_aead_resp(struct aead_request *req,
 210					 unsigned char *input,
 211					 int err)
 
 
 
 
 
 212{
 213	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
 214	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 215	struct chcr_dev *dev = a_ctx(tfm)->dev;
 216
 217	chcr_aead_common_exit(req);
 218	if (reqctx->verify == VERIFY_SW) {
 219		chcr_verify_tag(req, input, &err);
 220		reqctx->verify = VERIFY_HW;
 221	}
 222	chcr_dec_wrcount(dev);
 223	aead_request_complete(req, err);
 224
 225	return err;
 
 
 
 
 226}
 227
 228static void get_aes_decrypt_key(unsigned char *dec_key,
 229				       const unsigned char *key,
 230				       unsigned int keylength)
 231{
 232	u32 temp;
 233	u32 w_ring[MAX_NK];
 234	int i, j, k;
 235	u8  nr, nk;
 236
 237	switch (keylength) {
 238	case AES_KEYLENGTH_128BIT:
 239		nk = KEYLENGTH_4BYTES;
 240		nr = NUMBER_OF_ROUNDS_10;
 241		break;
 242	case AES_KEYLENGTH_192BIT:
 243		nk = KEYLENGTH_6BYTES;
 244		nr = NUMBER_OF_ROUNDS_12;
 245		break;
 246	case AES_KEYLENGTH_256BIT:
 247		nk = KEYLENGTH_8BYTES;
 248		nr = NUMBER_OF_ROUNDS_14;
 249		break;
 250	default:
 251		return;
 252	}
 253	for (i = 0; i < nk; i++)
 254		w_ring[i] = get_unaligned_be32(&key[i * 4]);
 255
 256	i = 0;
 257	temp = w_ring[nk - 1];
 258	while (i + nk < (nr + 1) * 4) {
 259		if (!(i % nk)) {
 260			/* RotWord(temp) */
 261			temp = (temp << 8) | (temp >> 24);
 262			temp = aes_ks_subword(temp);
 263			temp ^= round_constant[i / nk];
 264		} else if (nk == 8 && (i % 4 == 0)) {
 265			temp = aes_ks_subword(temp);
 266		}
 267		w_ring[i % nk] ^= temp;
 268		temp = w_ring[i % nk];
 269		i++;
 270	}
 271	i--;
 272	for (k = 0, j = i % nk; k < nk; k++) {
 273		put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
 274		j--;
 275		if (j < 0)
 276			j += nk;
 277	}
 278}
 279
 280static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
 281{
 282	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
 283
 284	switch (ds) {
 285	case SHA1_DIGEST_SIZE:
 286		base_hash = crypto_alloc_shash("sha1", 0, 0);
 287		break;
 288	case SHA224_DIGEST_SIZE:
 289		base_hash = crypto_alloc_shash("sha224", 0, 0);
 290		break;
 291	case SHA256_DIGEST_SIZE:
 292		base_hash = crypto_alloc_shash("sha256", 0, 0);
 293		break;
 294	case SHA384_DIGEST_SIZE:
 295		base_hash = crypto_alloc_shash("sha384", 0, 0);
 296		break;
 297	case SHA512_DIGEST_SIZE:
 298		base_hash = crypto_alloc_shash("sha512", 0, 0);
 299		break;
 300	}
 301
 302	return base_hash;
 303}
 304
 305static int chcr_compute_partial_hash(struct shash_desc *desc,
 306				     char *iopad, char *result_hash,
 307				     int digest_size)
 308{
 309	struct sha1_state sha1_st;
 310	struct sha256_state sha256_st;
 311	struct sha512_state sha512_st;
 312	int error;
 313
 314	if (digest_size == SHA1_DIGEST_SIZE) {
 315		error = crypto_shash_init(desc) ?:
 316			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
 317			crypto_shash_export(desc, (void *)&sha1_st);
 318		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
 319	} else if (digest_size == SHA224_DIGEST_SIZE) {
 320		error = crypto_shash_init(desc) ?:
 321			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 322			crypto_shash_export(desc, (void *)&sha256_st);
 323		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 324
 325	} else if (digest_size == SHA256_DIGEST_SIZE) {
 326		error = crypto_shash_init(desc) ?:
 327			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 328			crypto_shash_export(desc, (void *)&sha256_st);
 329		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 330
 331	} else if (digest_size == SHA384_DIGEST_SIZE) {
 332		error = crypto_shash_init(desc) ?:
 333			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 334			crypto_shash_export(desc, (void *)&sha512_st);
 335		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 336
 337	} else if (digest_size == SHA512_DIGEST_SIZE) {
 338		error = crypto_shash_init(desc) ?:
 339			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 340			crypto_shash_export(desc, (void *)&sha512_st);
 341		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 342	} else {
 343		error = -EINVAL;
 344		pr_err("Unknown digest size %d\n", digest_size);
 345	}
 346	return error;
 347}
 348
 349static void chcr_change_order(char *buf, int ds)
 350{
 351	int i;
 352
 353	if (ds == SHA512_DIGEST_SIZE) {
 354		for (i = 0; i < (ds / sizeof(u64)); i++)
 355			*((__be64 *)buf + i) =
 356				cpu_to_be64(*((u64 *)buf + i));
 357	} else {
 358		for (i = 0; i < (ds / sizeof(u32)); i++)
 359			*((__be32 *)buf + i) =
 360				cpu_to_be32(*((u32 *)buf + i));
 361	}
 362}
 363
 364static inline int is_hmac(struct crypto_tfm *tfm)
 365{
 366	struct crypto_alg *alg = tfm->__crt_alg;
 367	struct chcr_alg_template *chcr_crypto_alg =
 368		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
 369			     alg.hash);
 370	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
 371		return 1;
 372	return 0;
 373}
 374
 375static inline void dsgl_walk_init(struct dsgl_walk *walk,
 376				   struct cpl_rx_phys_dsgl *dsgl)
 377{
 378	walk->dsgl = dsgl;
 379	walk->nents = 0;
 380	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
 381}
 382
 383static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
 384				 int pci_chan_id)
 385{
 386	struct cpl_rx_phys_dsgl *phys_cpl;
 387
 388	phys_cpl = walk->dsgl;
 389
 390	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
 391				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
 392	phys_cpl->pcirlxorder_to_noofsgentr =
 393		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
 394		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
 395		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
 396		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
 397		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
 398		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
 399	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
 400	phys_cpl->rss_hdr_int.qid = htons(qid);
 401	phys_cpl->rss_hdr_int.hash_val = 0;
 402	phys_cpl->rss_hdr_int.channel = pci_chan_id;
 403}
 404
 405static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
 406					size_t size,
 407					dma_addr_t addr)
 408{
 409	int j;
 410
 411	if (!size)
 412		return;
 413	j = walk->nents;
 414	walk->to->len[j % 8] = htons(size);
 415	walk->to->addr[j % 8] = cpu_to_be64(addr);
 416	j++;
 417	if ((j % 8) == 0)
 418		walk->to++;
 419	walk->nents = j;
 420}
 421
 422static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
 423			   struct scatterlist *sg,
 424			      unsigned int slen,
 425			      unsigned int skip)
 426{
 427	int skip_len = 0;
 428	unsigned int left_size = slen, len = 0;
 429	unsigned int j = walk->nents;
 430	int offset, ent_len;
 431
 432	if (!slen)
 433		return;
 434	while (sg && skip) {
 435		if (sg_dma_len(sg) <= skip) {
 436			skip -= sg_dma_len(sg);
 437			skip_len = 0;
 438			sg = sg_next(sg);
 439		} else {
 440			skip_len = skip;
 441			skip = 0;
 442		}
 443	}
 444
 445	while (left_size && sg) {
 446		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 447		offset = 0;
 448		while (len) {
 449			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
 450			walk->to->len[j % 8] = htons(ent_len);
 451			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
 452						      offset + skip_len);
 453			offset += ent_len;
 454			len -= ent_len;
 455			j++;
 456			if ((j % 8) == 0)
 457				walk->to++;
 458		}
 459		walk->last_sg = sg;
 460		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
 461					  skip_len) + skip_len;
 462		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 463		skip_len = 0;
 464		sg = sg_next(sg);
 465	}
 466	walk->nents = j;
 467}
 468
 469static inline void ulptx_walk_init(struct ulptx_walk *walk,
 470				   struct ulptx_sgl *ulp)
 471{
 472	walk->sgl = ulp;
 473	walk->nents = 0;
 474	walk->pair_idx = 0;
 475	walk->pair = ulp->sge;
 476	walk->last_sg = NULL;
 477	walk->last_sg_len = 0;
 478}
 479
 480static inline void ulptx_walk_end(struct ulptx_walk *walk)
 481{
 482	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
 483			      ULPTX_NSGE_V(walk->nents));
 484}
 485
 486
 487static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
 488					size_t size,
 489					dma_addr_t addr)
 490{
 491	if (!size)
 492		return;
 493
 494	if (walk->nents == 0) {
 495		walk->sgl->len0 = cpu_to_be32(size);
 496		walk->sgl->addr0 = cpu_to_be64(addr);
 497	} else {
 498		walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
 499		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
 500		walk->pair_idx = !walk->pair_idx;
 501		if (!walk->pair_idx)
 502			walk->pair++;
 503	}
 504	walk->nents++;
 
 505}
 506
 507static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
 508					struct scatterlist *sg,
 509			       unsigned int len,
 510			       unsigned int skip)
 511{
 512	int small;
 513	int skip_len = 0;
 514	unsigned int sgmin;
 515
 516	if (!len)
 517		return;
 518	while (sg && skip) {
 519		if (sg_dma_len(sg) <= skip) {
 520			skip -= sg_dma_len(sg);
 521			skip_len = 0;
 522			sg = sg_next(sg);
 523		} else {
 524			skip_len = skip;
 525			skip = 0;
 526		}
 527	}
 528	WARN(!sg, "SG should not be null here\n");
 529	if (sg && (walk->nents == 0)) {
 530		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
 531		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 532		walk->sgl->len0 = cpu_to_be32(sgmin);
 533		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
 534		walk->nents++;
 535		len -= sgmin;
 536		walk->last_sg = sg;
 537		walk->last_sg_len = sgmin + skip_len;
 538		skip_len += sgmin;
 539		if (sg_dma_len(sg) == skip_len) {
 540			sg = sg_next(sg);
 541			skip_len = 0;
 542		}
 543	}
 544
 545	while (sg && len) {
 546		small = min(sg_dma_len(sg) - skip_len, len);
 547		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 548		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
 549		walk->pair->addr[walk->pair_idx] =
 550			cpu_to_be64(sg_dma_address(sg) + skip_len);
 551		walk->pair_idx = !walk->pair_idx;
 552		walk->nents++;
 553		if (!walk->pair_idx)
 554			walk->pair++;
 555		len -= sgmin;
 556		skip_len += sgmin;
 557		walk->last_sg = sg;
 558		walk->last_sg_len = skip_len;
 559		if (sg_dma_len(sg) == skip_len) {
 560			sg = sg_next(sg);
 561			skip_len = 0;
 562		}
 563	}
 564}
 565
 566static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
 567{
 568	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 569	struct chcr_alg_template *chcr_crypto_alg =
 570		container_of(alg, struct chcr_alg_template, alg.skcipher);
 571
 572	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 573}
 574
 575static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
 576{
 577	struct adapter *adap = netdev2adap(dev);
 578	struct sge_uld_txq_info *txq_info =
 579		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
 580	struct sge_uld_txq *txq;
 581	int ret = 0;
 
 
 
 
 
 
 582
 583	local_bh_disable();
 584	txq = &txq_info->uldtxq[idx];
 585	spin_lock(&txq->sendq.lock);
 586	if (txq->full)
 587		ret = -1;
 588	spin_unlock(&txq->sendq.lock);
 589	local_bh_enable();
 590	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 591}
 592
 593static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
 594			       struct _key_ctx *key_ctx)
 595{
 596	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
 597		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
 598	} else {
 599		memcpy(key_ctx->key,
 600		       ablkctx->key + (ablkctx->enckey_len >> 1),
 601		       ablkctx->enckey_len >> 1);
 602		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
 603		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
 604	}
 605	return 0;
 606}
 607
 608static int chcr_hash_ent_in_wr(struct scatterlist *src,
 609			     unsigned int minsg,
 610			     unsigned int space,
 611			     unsigned int srcskip)
 612{
 613	int srclen = 0;
 614	int srcsg = minsg;
 615	int soffset = 0, sless;
 616
 617	if (sg_dma_len(src) == srcskip) {
 618		src = sg_next(src);
 619		srcskip = 0;
 620	}
 621	while (src && space > (sgl_ent_len[srcsg + 1])) {
 622		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
 623							CHCR_SRC_SG_SIZE);
 624		srclen += sless;
 625		soffset += sless;
 626		srcsg++;
 627		if (sg_dma_len(src) == (soffset + srcskip)) {
 628			src = sg_next(src);
 629			soffset = 0;
 630			srcskip = 0;
 631		}
 632	}
 633	return srclen;
 634}
 635
 636static int chcr_sg_ent_in_wr(struct scatterlist *src,
 637			     struct scatterlist *dst,
 638			     unsigned int minsg,
 639			     unsigned int space,
 640			     unsigned int srcskip,
 641			     unsigned int dstskip)
 642{
 643	int srclen = 0, dstlen = 0;
 644	int srcsg = minsg, dstsg = minsg;
 645	int offset = 0, soffset = 0, less, sless = 0;
 646
 647	if (sg_dma_len(src) == srcskip) {
 648		src = sg_next(src);
 649		srcskip = 0;
 650	}
 651	if (sg_dma_len(dst) == dstskip) {
 652		dst = sg_next(dst);
 653		dstskip = 0;
 654	}
 655
 656	while (src && dst &&
 657	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
 658		sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
 659				CHCR_SRC_SG_SIZE);
 660		srclen += sless;
 661		srcsg++;
 662		offset = 0;
 663		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
 664		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
 665			if (srclen <= dstlen)
 666				break;
 667			less = min_t(unsigned int, sg_dma_len(dst) - offset -
 668				     dstskip, CHCR_DST_SG_SIZE);
 669			dstlen += less;
 670			offset += less;
 671			if ((offset + dstskip) == sg_dma_len(dst)) {
 672				dst = sg_next(dst);
 673				offset = 0;
 674			}
 675			dstsg++;
 676			dstskip = 0;
 677		}
 678		soffset += sless;
 679		if ((soffset + srcskip) == sg_dma_len(src)) {
 680			src = sg_next(src);
 681			srcskip = 0;
 682			soffset = 0;
 683		}
 684
 685	}
 686	return min(srclen, dstlen);
 687}
 688
 689static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
 690				struct skcipher_request *req,
 691				u8 *iv,
 692				unsigned short op_type)
 693{
 694	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
 695	int err;
 696
 697	skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
 698	skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
 699				      req->base.complete, req->base.data);
 700	skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
 701				   req->cryptlen, iv);
 702
 703	err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
 704			crypto_skcipher_encrypt(&reqctx->fallback_req);
 705
 706	return err;
 707
 708}
 709
 710static inline int get_qidxs(struct crypto_async_request *req,
 711			    unsigned int *txqidx, unsigned int *rxqidx)
 712{
 713	struct crypto_tfm *tfm = req->tfm;
 714	int ret = 0;
 715
 716	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
 717	case CRYPTO_ALG_TYPE_AEAD:
 718	{
 719		struct aead_request *aead_req =
 720			container_of(req, struct aead_request, base);
 721		struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(aead_req);
 722		*txqidx = reqctx->txqidx;
 723		*rxqidx = reqctx->rxqidx;
 724		break;
 725	}
 726	case CRYPTO_ALG_TYPE_SKCIPHER:
 727	{
 728		struct skcipher_request *sk_req =
 729			container_of(req, struct skcipher_request, base);
 730		struct chcr_skcipher_req_ctx *reqctx =
 731			skcipher_request_ctx(sk_req);
 732		*txqidx = reqctx->txqidx;
 733		*rxqidx = reqctx->rxqidx;
 734		break;
 735	}
 736	case CRYPTO_ALG_TYPE_AHASH:
 737	{
 738		struct ahash_request *ahash_req =
 739			container_of(req, struct ahash_request, base);
 740		struct chcr_ahash_req_ctx *reqctx =
 741			ahash_request_ctx(ahash_req);
 742		*txqidx = reqctx->txqidx;
 743		*rxqidx = reqctx->rxqidx;
 744		break;
 745	}
 746	default:
 747		ret = -EINVAL;
 748		/* should never get here */
 749		BUG();
 750		break;
 751	}
 752	return ret;
 753}
 754
 755static inline void create_wreq(struct chcr_context *ctx,
 756			       struct chcr_wr *chcr_req,
 757			       struct crypto_async_request *req,
 758			       unsigned int imm,
 759			       int hash_sz,
 760			       unsigned int len16,
 761			       unsigned int sc_len,
 762			       unsigned int lcb)
 763{
 764	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 765	unsigned int tx_channel_id, rx_channel_id;
 766	unsigned int txqidx = 0, rxqidx = 0;
 767	unsigned int qid, fid, portno;
 768
 769	get_qidxs(req, &txqidx, &rxqidx);
 770	qid = u_ctx->lldi.rxq_ids[rxqidx];
 771	fid = u_ctx->lldi.rxq_ids[0];
 772	portno = rxqidx / ctx->rxq_perchan;
 773	tx_channel_id = txqidx / ctx->txq_perchan;
 774	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
 775
 
 
 
 
 
 
 776
 777	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
 
 778	chcr_req->wreq.pld_size_hash_size =
 779		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
 
 780	chcr_req->wreq.len16_pkd =
 781		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
 
 782	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
 783	chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
 784							    !!lcb, txqidx);
 
 
 
 
 
 785
 786	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
 787	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
 788				((sizeof(chcr_req->wreq)) >> 4)));
 789	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
 790	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
 791					   sizeof(chcr_req->key_ctx) + sc_len);
 
 792}
 793
 794/**
 795 *	create_cipher_wr - form the WR for cipher operations
 796 *	@wrparam: Container for create_cipher_wr()'s parameters
 
 
 
 797 */
 798static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
 
 
 
 799{
 800	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
 801	struct chcr_context *ctx = c_ctx(tfm);
 802	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 803	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
 804	struct sk_buff *skb = NULL;
 805	struct chcr_wr *chcr_req;
 806	struct cpl_rx_phys_dsgl *phys_cpl;
 807	struct ulptx_sgl *ulptx;
 808	struct chcr_skcipher_req_ctx *reqctx =
 809		skcipher_request_ctx(wrparam->req);
 810	unsigned int temp = 0, transhdr_len, dst_size;
 811	int error;
 812	int nents;
 813	unsigned int kctx_len;
 814	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 815			GFP_KERNEL : GFP_ATOMIC;
 816	struct adapter *adap = padap(ctx->dev);
 817	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
 818
 819	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
 820	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
 821			      reqctx->dst_ofst);
 822	dst_size = get_space_for_phys_dsgl(nents);
 823	kctx_len = roundup(ablkctx->enckey_len, 16);
 824	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
 825	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
 826				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
 827	temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
 828				     (sgl_len(nents) * 8);
 829	transhdr_len += temp;
 830	transhdr_len = roundup(transhdr_len, 16);
 831	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
 832	if (!skb) {
 833		error = -ENOMEM;
 834		goto err;
 835	}
 836	chcr_req = __skb_put_zero(skb, transhdr_len);
 
 
 
 
 
 
 
 
 
 
 837	chcr_req->sec_cpl.op_ivinsrtofst =
 838			FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
 839
 840	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
 841	chcr_req->sec_cpl.aadstart_cipherstop_hi =
 842			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
 843
 844	chcr_req->sec_cpl.cipherstop_lo_authinsert =
 845			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
 846	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
 847							 ablkctx->ciph_mode,
 848							 0, 0, IV >> 1);
 849	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
 850							  0, 1, dst_size);
 851
 852	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
 853	if ((reqctx->op == CHCR_DECRYPT_OP) &&
 854	    (!(get_cryptoalg_subtype(tfm) ==
 855	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
 856	    (!(get_cryptoalg_subtype(tfm) ==
 857	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
 858		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
 859	} else {
 860		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
 861		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
 862			memcpy(chcr_req->key_ctx.key, ablkctx->key,
 863			       ablkctx->enckey_len);
 864		} else {
 865			memcpy(chcr_req->key_ctx.key, ablkctx->key +
 866			       (ablkctx->enckey_len >> 1),
 867			       ablkctx->enckey_len >> 1);
 868			memcpy(chcr_req->key_ctx.key +
 869			       (ablkctx->enckey_len >> 1),
 870			       ablkctx->key,
 871			       ablkctx->enckey_len >> 1);
 872		}
 873	}
 874	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
 875	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
 876	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
 877	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
 878
 879	atomic_inc(&adap->chcr_stats.cipher_rqst);
 880	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
 881		+ (reqctx->imm ? (wrparam->bytes) : 0);
 882	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
 883		    transhdr_len, temp,
 884			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
 
 
 
 
 885	reqctx->skb = skb;
 886
 887	if (reqctx->op && (ablkctx->ciph_mode ==
 888			   CHCR_SCMD_CIPHER_MODE_AES_CBC))
 889		sg_pcopy_to_buffer(wrparam->req->src,
 890			sg_nents(wrparam->req->src), wrparam->req->iv, 16,
 891			reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
 892
 893	return skb;
 894err:
 895	return ERR_PTR(error);
 896}
 897
 898static inline int chcr_keyctx_ck_size(unsigned int keylen)
 899{
 900	int ck_size = 0;
 901
 902	if (keylen == AES_KEYSIZE_128)
 903		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
 904	else if (keylen == AES_KEYSIZE_192)
 905		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
 906	else if (keylen == AES_KEYSIZE_256)
 907		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
 908	else
 909		ck_size = 0;
 910
 911	return ck_size;
 912}
 913static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
 914				       const u8 *key,
 915				       unsigned int keylen)
 916{
 917	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 918
 919	crypto_skcipher_clear_flags(ablkctx->sw_cipher,
 920				CRYPTO_TFM_REQ_MASK);
 921	crypto_skcipher_set_flags(ablkctx->sw_cipher,
 922				cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 923	return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
 924}
 925
 926static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
 927			       const u8 *key,
 928			       unsigned int keylen)
 929{
 930	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 
 931	unsigned int ck_size, context_size;
 932	u16 alignment = 0;
 933	int err;
 934
 935	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 936	if (err)
 
 
 
 
 
 
 937		goto badkey_err;
 938
 939	ck_size = chcr_keyctx_ck_size(keylen);
 940	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
 941	memcpy(ablkctx->key, key, keylen);
 942	ablkctx->enckey_len = keylen;
 943	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
 944	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 945			keylen + alignment) >> 4;
 946
 947	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 948						0, 0, context_size);
 949	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
 950	return 0;
 951badkey_err:
 
 952	ablkctx->enckey_len = 0;
 953
 954	return err;
 955}
 956
 957static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
 958				   const u8 *key,
 959				   unsigned int keylen)
 960{
 961	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 962	unsigned int ck_size, context_size;
 963	u16 alignment = 0;
 964	int err;
 965
 966	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 967	if (err)
 968		goto badkey_err;
 969	ck_size = chcr_keyctx_ck_size(keylen);
 970	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
 971	memcpy(ablkctx->key, key, keylen);
 972	ablkctx->enckey_len = keylen;
 973	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 974			keylen + alignment) >> 4;
 975
 976	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 977						0, 0, context_size);
 978	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
 979
 980	return 0;
 981badkey_err:
 982	ablkctx->enckey_len = 0;
 983
 984	return err;
 985}
 986
 987static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
 988				   const u8 *key,
 989				   unsigned int keylen)
 990{
 991	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 992	unsigned int ck_size, context_size;
 993	u16 alignment = 0;
 994	int err;
 995
 996	if (keylen < CTR_RFC3686_NONCE_SIZE)
 997		return -EINVAL;
 998	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
 999	       CTR_RFC3686_NONCE_SIZE);
1000
1001	keylen -= CTR_RFC3686_NONCE_SIZE;
1002	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1003	if (err)
1004		goto badkey_err;
1005
1006	ck_size = chcr_keyctx_ck_size(keylen);
1007	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1008	memcpy(ablkctx->key, key, keylen);
1009	ablkctx->enckey_len = keylen;
1010	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1011			keylen + alignment) >> 4;
1012
1013	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1014						0, 0, context_size);
1015	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1016
1017	return 0;
1018badkey_err:
1019	ablkctx->enckey_len = 0;
1020
1021	return err;
1022}
1023static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1024{
1025	unsigned int size = AES_BLOCK_SIZE;
1026	__be32 *b = (__be32 *)(dstiv + size);
1027	u32 c, prev;
1028
1029	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1030	for (; size >= 4; size -= 4) {
1031		prev = be32_to_cpu(*--b);
1032		c = prev + add;
1033		*b = cpu_to_be32(c);
1034		if (prev < c)
1035			break;
1036		add = 1;
1037	}
1038
1039}
1040
1041static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1042{
1043	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1044	u64 c;
1045	u32 temp = be32_to_cpu(*--b);
1046
1047	temp = ~temp;
1048	c = (u64)temp +  1; // No of block can processed without overflow
1049	if ((bytes / AES_BLOCK_SIZE) >= c)
1050		bytes = c * AES_BLOCK_SIZE;
1051	return bytes;
1052}
1053
1054static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1055			     u32 isfinal)
1056{
1057	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1058	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1059	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1060	struct crypto_aes_ctx aes;
1061	int ret, i;
1062	u8 *key;
1063	unsigned int keylen;
1064	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1065	int round8 = round / 8;
1066
1067	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1068
1069	keylen = ablkctx->enckey_len / 2;
1070	key = ablkctx->key + keylen;
1071	/* For a 192 bit key remove the padded zeroes which was
1072	 * added in chcr_xts_setkey
1073	 */
1074	if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1075			== CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1076		ret = aes_expandkey(&aes, key, keylen - 8);
1077	else
1078		ret = aes_expandkey(&aes, key, keylen);
1079	if (ret)
1080		return ret;
1081	aes_encrypt(&aes, iv, iv);
1082	for (i = 0; i < round8; i++)
1083		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1084
1085	for (i = 0; i < (round % 8); i++)
1086		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1087
1088	if (!isfinal)
1089		aes_decrypt(&aes, iv, iv);
1090
1091	memzero_explicit(&aes, sizeof(aes));
1092	return 0;
1093}
1094
1095static int chcr_update_cipher_iv(struct skcipher_request *req,
1096				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1097{
1098	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1099	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1100	int subtype = get_cryptoalg_subtype(tfm);
1101	int ret = 0;
1102
1103	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1104		ctr_add_iv(iv, req->iv, (reqctx->processed /
1105			   AES_BLOCK_SIZE));
1106	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1107		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1108			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1109						AES_BLOCK_SIZE) + 1);
1110	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1111		ret = chcr_update_tweak(req, iv, 0);
1112	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1113		if (reqctx->op)
1114			/*Updated before sending last WR*/
1115			memcpy(iv, req->iv, AES_BLOCK_SIZE);
1116		else
1117			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1118	}
1119
1120	return ret;
1121
1122}
1123
1124/* We need separate function for final iv because in rfc3686  Initial counter
1125 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1126 * for subsequent update requests
1127 */
1128
1129static int chcr_final_cipher_iv(struct skcipher_request *req,
1130				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1131{
1132	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1133	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1134	int subtype = get_cryptoalg_subtype(tfm);
 
1135	int ret = 0;
1136
1137	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1138		ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1139						       AES_BLOCK_SIZE));
1140	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1141		if (!reqctx->partial_req)
1142			memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1143		else
1144			ret = chcr_update_tweak(req, iv, 1);
1145	}
1146	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1147		/*Already updated for Decrypt*/
1148		if (!reqctx->op)
1149			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1150
1151	}
1152	return ret;
1153
1154}
1155
1156static int chcr_handle_cipher_resp(struct skcipher_request *req,
1157				   unsigned char *input, int err)
1158{
1159	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1160	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1161	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1162	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1163	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1164	struct chcr_dev *dev = c_ctx(tfm)->dev;
1165	struct chcr_context *ctx = c_ctx(tfm);
1166	struct adapter *adap = padap(ctx->dev);
1167	struct cipher_wr_param wrparam;
1168	struct sk_buff *skb;
1169	int bytes;
1170
1171	if (err)
1172		goto unmap;
1173	if (req->cryptlen == reqctx->processed) {
1174		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1175				      req);
1176		err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1177		goto complete;
1178	}
1179
1180	if (!reqctx->imm) {
1181		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1182					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1183					  reqctx->src_ofst, reqctx->dst_ofst);
1184		if ((bytes + reqctx->processed) >= req->cryptlen)
1185			bytes  = req->cryptlen - reqctx->processed;
1186		else
1187			bytes = rounddown(bytes, 16);
1188	} else {
1189		/*CTR mode counter overflow*/
1190		bytes  = req->cryptlen - reqctx->processed;
1191	}
1192	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1193	if (err)
1194		goto unmap;
1195
1196	if (unlikely(bytes == 0)) {
1197		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1198				      req);
1199		memcpy(req->iv, reqctx->init_iv, IV);
1200		atomic_inc(&adap->chcr_stats.fallback);
1201		err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1202					   reqctx->op);
1203		goto complete;
1204	}
1205
1206	if (get_cryptoalg_subtype(tfm) ==
1207	    CRYPTO_ALG_SUB_TYPE_CTR)
1208		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1209	wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1210	wrparam.req = req;
1211	wrparam.bytes = bytes;
1212	skb = create_cipher_wr(&wrparam);
1213	if (IS_ERR(skb)) {
1214		pr_err("%s : Failed to form WR. No memory\n", __func__);
1215		err = PTR_ERR(skb);
1216		goto unmap;
1217	}
1218	skb->dev = u_ctx->lldi.ports[0];
1219	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1220	chcr_send_wr(skb);
1221	reqctx->last_req_len = bytes;
1222	reqctx->processed += bytes;
1223	if (get_cryptoalg_subtype(tfm) ==
1224		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1225			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1226		complete(&ctx->cbc_aes_aio_done);
1227	}
1228	return 0;
1229unmap:
1230	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1231complete:
1232	if (get_cryptoalg_subtype(tfm) ==
1233		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1234			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1235		complete(&ctx->cbc_aes_aio_done);
1236	}
1237	chcr_dec_wrcount(dev);
1238	skcipher_request_complete(req, err);
1239	return err;
1240}
1241
1242static int process_cipher(struct skcipher_request *req,
1243				  unsigned short qid,
1244				  struct sk_buff **skb,
1245				  unsigned short op_type)
1246{
1247	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1248	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1249	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1250	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1251	struct adapter *adap = padap(c_ctx(tfm)->dev);
1252	struct	cipher_wr_param wrparam;
1253	int bytes, err = -EINVAL;
1254	int subtype;
1255
1256	reqctx->processed = 0;
1257	reqctx->partial_req = 0;
1258	if (!req->iv)
1259		goto error;
1260	subtype = get_cryptoalg_subtype(tfm);
1261	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1262	    (req->cryptlen == 0) ||
1263	    (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1264		if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1265			goto fallback;
1266		else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1267			 subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1268			goto fallback;
1269		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1270		       ablkctx->enckey_len, req->cryptlen, ivsize);
1271		goto error;
1272	}
1273
1274	err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1275	if (err)
1276		goto error;
1277	if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1278					    AES_MIN_KEY_SIZE +
1279					    sizeof(struct cpl_rx_phys_dsgl) +
1280					/*Min dsgl size*/
1281					    32))) {
1282		/* Can be sent as Imm*/
1283		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1284
1285		dnents = sg_nents_xlen(req->dst, req->cryptlen,
1286				       CHCR_DST_SG_SIZE, 0);
1287		phys_dsgl = get_space_for_phys_dsgl(dnents);
1288		kctx_len = roundup(ablkctx->enckey_len, 16);
1289		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1290		reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1291			SGE_MAX_WR_LEN;
1292		bytes = IV + req->cryptlen;
1293
1294	} else {
1295		reqctx->imm = 0;
1296	}
1297
1298	if (!reqctx->imm) {
1299		bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1300					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1301					  0, 0);
1302		if ((bytes + reqctx->processed) >= req->cryptlen)
1303			bytes  = req->cryptlen - reqctx->processed;
1304		else
1305			bytes = rounddown(bytes, 16);
1306	} else {
1307		bytes = req->cryptlen;
1308	}
1309	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1310		bytes = adjust_ctr_overflow(req->iv, bytes);
1311	}
1312	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1313		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1314		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1315				CTR_RFC3686_IV_SIZE);
1316
1317		/* initialize counter portion of counter block */
1318		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1319			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1320		memcpy(reqctx->init_iv, reqctx->iv, IV);
1321
1322	} else {
1323
1324		memcpy(reqctx->iv, req->iv, IV);
1325		memcpy(reqctx->init_iv, req->iv, IV);
1326	}
1327	if (unlikely(bytes == 0)) {
1328		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1329				      req);
1330fallback:       atomic_inc(&adap->chcr_stats.fallback);
1331		err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
1332					   subtype ==
1333					   CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1334					   reqctx->iv : req->iv,
1335					   op_type);
1336		goto error;
1337	}
1338	reqctx->op = op_type;
1339	reqctx->srcsg = req->src;
1340	reqctx->dstsg = req->dst;
1341	reqctx->src_ofst = 0;
1342	reqctx->dst_ofst = 0;
1343	wrparam.qid = qid;
1344	wrparam.req = req;
1345	wrparam.bytes = bytes;
1346	*skb = create_cipher_wr(&wrparam);
1347	if (IS_ERR(*skb)) {
1348		err = PTR_ERR(*skb);
1349		goto unmap;
1350	}
1351	reqctx->processed = bytes;
1352	reqctx->last_req_len = bytes;
1353	reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1354
1355	return 0;
1356unmap:
1357	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1358error:
1359	return err;
1360}
1361
1362static int chcr_aes_encrypt(struct skcipher_request *req)
1363{
1364	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1365	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1366	struct chcr_dev *dev = c_ctx(tfm)->dev;
1367	struct sk_buff *skb = NULL;
1368	int err;
1369	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1370	struct chcr_context *ctx = c_ctx(tfm);
1371	unsigned int cpu;
1372
1373	cpu = get_cpu();
1374	reqctx->txqidx = cpu % ctx->ntxq;
1375	reqctx->rxqidx = cpu % ctx->nrxq;
1376	put_cpu();
1377
1378	err = chcr_inc_wrcount(dev);
1379	if (err)
1380		return -ENXIO;
1381	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1382						reqctx->txqidx) &&
1383		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1384			err = -ENOSPC;
1385			goto error;
1386	}
1387
1388	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1389			     &skb, CHCR_ENCRYPT_OP);
1390	if (err || !skb)
1391		return  err;
 
 
1392	skb->dev = u_ctx->lldi.ports[0];
1393	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1394	chcr_send_wr(skb);
1395	if (get_cryptoalg_subtype(tfm) ==
1396		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1397			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1398			reqctx->partial_req = 1;
1399			wait_for_completion(&ctx->cbc_aes_aio_done);
1400        }
1401	return -EINPROGRESS;
1402error:
1403	chcr_dec_wrcount(dev);
1404	return err;
1405}
1406
1407static int chcr_aes_decrypt(struct skcipher_request *req)
1408{
1409	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1410	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1411	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1412	struct chcr_dev *dev = c_ctx(tfm)->dev;
1413	struct sk_buff *skb = NULL;
1414	int err;
1415	struct chcr_context *ctx = c_ctx(tfm);
1416	unsigned int cpu;
1417
1418	cpu = get_cpu();
1419	reqctx->txqidx = cpu % ctx->ntxq;
1420	reqctx->rxqidx = cpu % ctx->nrxq;
1421	put_cpu();
1422
1423	err = chcr_inc_wrcount(dev);
1424	if (err)
1425		return -ENXIO;
1426
1427	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1428						reqctx->txqidx) &&
1429		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1430			return -ENOSPC;
1431	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1432			     &skb, CHCR_DECRYPT_OP);
1433	if (err || !skb)
1434		return err;
1435	skb->dev = u_ctx->lldi.ports[0];
1436	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1437	chcr_send_wr(skb);
1438	return -EINPROGRESS;
1439}
1440static int chcr_device_init(struct chcr_context *ctx)
1441{
1442	struct uld_ctx *u_ctx = NULL;
1443	int txq_perchan, ntxq;
1444	int err = 0, rxq_perchan;
1445
 
1446	if (!ctx->dev) {
1447		u_ctx = assign_chcr_device();
1448		if (!u_ctx) {
1449			err = -ENXIO;
1450			pr_err("chcr device assignment fails\n");
1451			goto out;
1452		}
1453		ctx->dev = &u_ctx->dev;
1454		ntxq = u_ctx->lldi.ntxq;
1455		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1456		txq_perchan = ntxq / u_ctx->lldi.nchan;
1457		ctx->ntxq = ntxq;
1458		ctx->nrxq = u_ctx->lldi.nrxq;
1459		ctx->rxq_perchan = rxq_perchan;
1460		ctx->txq_perchan = txq_perchan;
 
1461	}
1462out:
1463	return err;
1464}
1465
1466static int chcr_init_tfm(struct crypto_skcipher *tfm)
1467{
1468	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1469	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1470	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1471
1472	ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1473				CRYPTO_ALG_NEED_FALLBACK);
1474	if (IS_ERR(ablkctx->sw_cipher)) {
1475		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1476		return PTR_ERR(ablkctx->sw_cipher);
1477	}
1478	init_completion(&ctx->cbc_aes_aio_done);
1479	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1480					 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1481
1482	return chcr_device_init(ctx);
1483}
1484
1485static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1486{
1487	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1488	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1489	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1490
1491	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1492	 * cannot be used as fallback in chcr_handle_cipher_response
1493	 */
1494	ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1495				CRYPTO_ALG_NEED_FALLBACK);
1496	if (IS_ERR(ablkctx->sw_cipher)) {
1497		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1498		return PTR_ERR(ablkctx->sw_cipher);
1499	}
1500	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1501				    crypto_skcipher_reqsize(ablkctx->sw_cipher));
1502	return chcr_device_init(ctx);
1503}
1504
1505
1506static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1507{
1508	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1509	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1510
1511	crypto_free_skcipher(ablkctx->sw_cipher);
1512}
1513
1514static int get_alg_config(struct algo_param *params,
1515			  unsigned int auth_size)
1516{
1517	switch (auth_size) {
1518	case SHA1_DIGEST_SIZE:
1519		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1520		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1521		params->result_size = SHA1_DIGEST_SIZE;
1522		break;
1523	case SHA224_DIGEST_SIZE:
1524		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1525		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1526		params->result_size = SHA256_DIGEST_SIZE;
1527		break;
1528	case SHA256_DIGEST_SIZE:
1529		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1530		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1531		params->result_size = SHA256_DIGEST_SIZE;
1532		break;
1533	case SHA384_DIGEST_SIZE:
1534		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1535		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1536		params->result_size = SHA512_DIGEST_SIZE;
1537		break;
1538	case SHA512_DIGEST_SIZE:
1539		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1540		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1541		params->result_size = SHA512_DIGEST_SIZE;
1542		break;
1543	default:
1544		pr_err("ERROR, unsupported digest size\n");
1545		return -EINVAL;
1546	}
1547	return 0;
1548}
1549
1550static inline void chcr_free_shash(struct crypto_shash *base_hash)
1551{
1552		crypto_free_shash(base_hash);
1553}
1554
1555/**
1556 *	create_hash_wr - Create hash work request
1557 *	@req: Cipher req base
1558 *	@param: Container for create_hash_wr()'s parameters
1559 */
1560static struct sk_buff *create_hash_wr(struct ahash_request *req,
1561				      struct hash_wr_param *param)
1562{
1563	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1564	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1565	struct chcr_context *ctx = h_ctx(tfm);
1566	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1567	struct sk_buff *skb = NULL;
1568	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1569	struct chcr_wr *chcr_req;
1570	struct ulptx_sgl *ulptx;
1571	unsigned int nents = 0, transhdr_len;
1572	unsigned int temp = 0;
 
1573	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1574		GFP_ATOMIC;
1575	struct adapter *adap = padap(h_ctx(tfm)->dev);
1576	int error = 0;
1577	unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1578
1579	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
1580	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1581	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1582				param->sg_len) <= SGE_MAX_WR_LEN;
1583	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1584		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1585	nents += param->bfr_len ? 1 : 0;
1586	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1587				param->sg_len, 16) : (sgl_len(nents) * 8);
1588	transhdr_len = roundup(transhdr_len, 16);
1589
1590	skb = alloc_skb(transhdr_len, flags);
 
 
 
 
 
 
 
 
 
 
1591	if (!skb)
1592		return ERR_PTR(-ENOMEM);
1593	chcr_req = __skb_put_zero(skb, transhdr_len);
1594
1595	chcr_req->sec_cpl.op_ivinsrtofst =
1596		FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
 
1597
 
 
1598	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1599
1600	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1601		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1602	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1603		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1604	chcr_req->sec_cpl.seqno_numivs =
1605		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1606					 param->opad_needed, 0);
1607
1608	chcr_req->sec_cpl.ivgen_hdrlen =
1609		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1610
1611	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1612	       param->alg_prm.result_size);
1613
1614	if (param->opad_needed)
1615		memcpy(chcr_req->key_ctx.key +
1616		       ((param->alg_prm.result_size <= 32) ? 32 :
1617			CHCR_HASH_MAX_DIGEST_SIZE),
1618		       hmacctx->opad, param->alg_prm.result_size);
1619
1620	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1621					    param->alg_prm.mk_size, 0,
1622					    param->opad_needed,
1623					    ((param->kctx_len +
1624					     sizeof(chcr_req->key_ctx)) >> 4));
1625	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1626	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1627				     DUMMY_BYTES);
1628	if (param->bfr_len != 0) {
1629		req_ctx->hctx_wr.dma_addr =
1630			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1631				       param->bfr_len, DMA_TO_DEVICE);
1632		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1633				       req_ctx->hctx_wr. dma_addr)) {
1634			error = -ENOMEM;
1635			goto err;
1636		}
1637		req_ctx->hctx_wr.dma_len = param->bfr_len;
1638	} else {
1639		req_ctx->hctx_wr.dma_addr = 0;
1640	}
1641	chcr_add_hash_src_ent(req, ulptx, param);
1642	/* Request upto max wr size */
1643	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1644				(param->sg_len + param->bfr_len) : 0);
1645	atomic_inc(&adap->chcr_stats.digest_rqst);
1646	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1647		    param->hash_size, transhdr_len,
1648		    temp,  0);
1649	req_ctx->hctx_wr.skb = skb;
1650	return skb;
1651err:
1652	kfree_skb(skb);
1653	return  ERR_PTR(error);
1654}
1655
1656static int chcr_ahash_update(struct ahash_request *req)
1657{
1658	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1659	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1660	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1661	struct chcr_context *ctx = h_ctx(rtfm);
1662	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1663	struct sk_buff *skb;
1664	u8 remainder = 0, bs;
1665	unsigned int nbytes = req->nbytes;
1666	struct hash_wr_param params;
1667	int error;
1668	unsigned int cpu;
1669
1670	cpu = get_cpu();
1671	req_ctx->txqidx = cpu % ctx->ntxq;
1672	req_ctx->rxqidx = cpu % ctx->nrxq;
1673	put_cpu();
1674
1675	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1676
 
 
 
 
 
 
 
1677	if (nbytes + req_ctx->reqlen >= bs) {
1678		remainder = (nbytes + req_ctx->reqlen) % bs;
1679		nbytes = nbytes + req_ctx->reqlen - remainder;
1680	} else {
1681		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1682				   + req_ctx->reqlen, nbytes, 0);
1683		req_ctx->reqlen += nbytes;
1684		return 0;
1685	}
1686	error = chcr_inc_wrcount(dev);
1687	if (error)
1688		return -ENXIO;
1689	/* Detach state for CHCR means lldi or padap is freed. Increasing
1690	 * inflight count for dev guarantees that lldi and padap is valid
1691	 */
1692	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1693						req_ctx->txqidx) &&
1694		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1695			error = -ENOSPC;
1696			goto err;
1697	}
1698
1699	chcr_init_hctx_per_wr(req_ctx);
1700	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1701	if (error) {
1702		error = -ENOMEM;
1703		goto err;
1704	}
1705	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1706	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1707	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1708				     HASH_SPACE_LEFT(params.kctx_len), 0);
1709	if (params.sg_len > req->nbytes)
1710		params.sg_len = req->nbytes;
1711	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1712			req_ctx->reqlen;
1713	params.opad_needed = 0;
1714	params.more = 1;
1715	params.last = 0;
 
1716	params.bfr_len = req_ctx->reqlen;
1717	params.scmd1 = 0;
1718	req_ctx->hctx_wr.srcsg = req->src;
1719
1720	params.hash_size = params.alg_prm.result_size;
1721	req_ctx->data_len += params.sg_len + params.bfr_len;
1722	skb = create_hash_wr(req, &params);
1723	if (IS_ERR(skb)) {
1724		error = PTR_ERR(skb);
1725		goto unmap;
1726	}
1727
1728	req_ctx->hctx_wr.processed += params.sg_len;
1729	if (remainder) {
 
1730		/* Swap buffers */
1731		swap(req_ctx->reqbfr, req_ctx->skbfr);
 
 
1732		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1733				   req_ctx->reqbfr, remainder, req->nbytes -
1734				   remainder);
1735	}
1736	req_ctx->reqlen = remainder;
1737	skb->dev = u_ctx->lldi.ports[0];
1738	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1739	chcr_send_wr(skb);
 
1740	return -EINPROGRESS;
1741unmap:
1742	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1743err:
1744	chcr_dec_wrcount(dev);
1745	return error;
1746}
1747
1748static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1749{
1750	memset(bfr_ptr, 0, bs);
1751	*bfr_ptr = 0x80;
1752	if (bs == 64)
1753		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1754	else
1755		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1756}
1757
1758static int chcr_ahash_final(struct ahash_request *req)
1759{
1760	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1761	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1762	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1763	struct hash_wr_param params;
1764	struct sk_buff *skb;
1765	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1766	struct chcr_context *ctx = h_ctx(rtfm);
1767	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1768	int error;
1769	unsigned int cpu;
1770
1771	cpu = get_cpu();
1772	req_ctx->txqidx = cpu % ctx->ntxq;
1773	req_ctx->rxqidx = cpu % ctx->nrxq;
1774	put_cpu();
1775
1776	error = chcr_inc_wrcount(dev);
1777	if (error)
1778		return -ENXIO;
1779
1780	chcr_init_hctx_per_wr(req_ctx);
1781	if (is_hmac(crypto_ahash_tfm(rtfm)))
1782		params.opad_needed = 1;
1783	else
1784		params.opad_needed = 0;
1785	params.sg_len = 0;
1786	req_ctx->hctx_wr.isfinal = 1;
1787	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1788	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1789	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1790		params.opad_needed = 1;
1791		params.kctx_len *= 2;
1792	} else {
1793		params.opad_needed = 0;
1794	}
1795
1796	req_ctx->hctx_wr.result = 1;
1797	params.bfr_len = req_ctx->reqlen;
1798	req_ctx->data_len += params.bfr_len + params.sg_len;
1799	req_ctx->hctx_wr.srcsg = req->src;
1800	if (req_ctx->reqlen == 0) {
1801		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1802		params.last = 0;
1803		params.more = 1;
1804		params.scmd1 = 0;
1805		params.bfr_len = bs;
1806
1807	} else {
1808		params.scmd1 = req_ctx->data_len;
1809		params.last = 1;
1810		params.more = 0;
1811	}
1812	params.hash_size = crypto_ahash_digestsize(rtfm);
1813	skb = create_hash_wr(req, &params);
1814	if (IS_ERR(skb)) {
1815		error = PTR_ERR(skb);
1816		goto err;
1817	}
1818	req_ctx->reqlen = 0;
1819	skb->dev = u_ctx->lldi.ports[0];
1820	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1821	chcr_send_wr(skb);
1822	return -EINPROGRESS;
1823err:
1824	chcr_dec_wrcount(dev);
1825	return error;
1826}
1827
1828static int chcr_ahash_finup(struct ahash_request *req)
1829{
1830	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1831	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1832	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1833	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1834	struct chcr_context *ctx = h_ctx(rtfm);
1835	struct sk_buff *skb;
1836	struct hash_wr_param params;
1837	u8  bs;
1838	int error;
1839	unsigned int cpu;
1840
1841	cpu = get_cpu();
1842	req_ctx->txqidx = cpu % ctx->ntxq;
1843	req_ctx->rxqidx = cpu % ctx->nrxq;
1844	put_cpu();
1845
1846	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1847	error = chcr_inc_wrcount(dev);
1848	if (error)
1849		return -ENXIO;
1850
1851	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1852						req_ctx->txqidx) &&
1853		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1854			error = -ENOSPC;
1855			goto err;
1856	}
1857	chcr_init_hctx_per_wr(req_ctx);
1858	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1859	if (error) {
1860		error = -ENOMEM;
1861		goto err;
1862	}
1863
1864	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1865	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1866	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1867		params.kctx_len *= 2;
1868		params.opad_needed = 1;
1869	} else {
1870		params.opad_needed = 0;
1871	}
1872
1873	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1874				    HASH_SPACE_LEFT(params.kctx_len), 0);
1875	if (params.sg_len < req->nbytes) {
1876		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1877			params.kctx_len /= 2;
1878			params.opad_needed = 0;
1879		}
1880		params.last = 0;
1881		params.more = 1;
1882		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1883					- req_ctx->reqlen;
1884		params.hash_size = params.alg_prm.result_size;
1885		params.scmd1 = 0;
1886	} else {
1887		params.last = 1;
1888		params.more = 0;
1889		params.sg_len = req->nbytes;
1890		params.hash_size = crypto_ahash_digestsize(rtfm);
1891		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1892				params.sg_len;
1893	}
1894	params.bfr_len = req_ctx->reqlen;
 
1895	req_ctx->data_len += params.bfr_len + params.sg_len;
1896	req_ctx->hctx_wr.result = 1;
1897	req_ctx->hctx_wr.srcsg = req->src;
1898	if ((req_ctx->reqlen + req->nbytes) == 0) {
1899		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1900		params.last = 0;
1901		params.more = 1;
1902		params.scmd1 = 0;
1903		params.bfr_len = bs;
 
 
 
 
1904	}
 
1905	skb = create_hash_wr(req, &params);
1906	if (IS_ERR(skb)) {
1907		error = PTR_ERR(skb);
1908		goto unmap;
1909	}
1910	req_ctx->reqlen = 0;
1911	req_ctx->hctx_wr.processed += params.sg_len;
1912	skb->dev = u_ctx->lldi.ports[0];
1913	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1914	chcr_send_wr(skb);
 
1915	return -EINPROGRESS;
1916unmap:
1917	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1918err:
1919	chcr_dec_wrcount(dev);
1920	return error;
1921}
1922
1923static int chcr_hmac_init(struct ahash_request *areq);
1924static int chcr_sha_init(struct ahash_request *areq);
1925
1926static int chcr_ahash_digest(struct ahash_request *req)
1927{
1928	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1929	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1930	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1931	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1932	struct chcr_context *ctx = h_ctx(rtfm);
1933	struct sk_buff *skb;
1934	struct hash_wr_param params;
1935	u8  bs;
1936	int error;
1937	unsigned int cpu;
1938
1939	cpu = get_cpu();
1940	req_ctx->txqidx = cpu % ctx->ntxq;
1941	req_ctx->rxqidx = cpu % ctx->nrxq;
1942	put_cpu();
1943
1944	if (is_hmac(crypto_ahash_tfm(rtfm)))
1945		chcr_hmac_init(req);
1946	else
1947		chcr_sha_init(req);
1948
 
1949	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1950	error = chcr_inc_wrcount(dev);
1951	if (error)
1952		return -ENXIO;
1953
 
1954	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1955						req_ctx->txqidx) &&
1956		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1957			error = -ENOSPC;
1958			goto err;
1959	}
1960
1961	chcr_init_hctx_per_wr(req_ctx);
1962	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1963	if (error) {
1964		error = -ENOMEM;
1965		goto err;
1966	}
1967
1968	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1969	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1970	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1971		params.kctx_len *= 2;
1972		params.opad_needed = 1;
1973	} else {
1974		params.opad_needed = 0;
1975	}
1976	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1977				HASH_SPACE_LEFT(params.kctx_len), 0);
1978	if (params.sg_len < req->nbytes) {
1979		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1980			params.kctx_len /= 2;
1981			params.opad_needed = 0;
1982		}
1983		params.last = 0;
1984		params.more = 1;
1985		params.scmd1 = 0;
1986		params.sg_len = rounddown(params.sg_len, bs);
1987		params.hash_size = params.alg_prm.result_size;
1988	} else {
1989		params.sg_len = req->nbytes;
1990		params.hash_size = crypto_ahash_digestsize(rtfm);
1991		params.last = 1;
1992		params.more = 0;
1993		params.scmd1 = req->nbytes + req_ctx->data_len;
1994
1995	}
 
 
1996	params.bfr_len = 0;
1997	req_ctx->hctx_wr.result = 1;
1998	req_ctx->hctx_wr.srcsg = req->src;
 
1999	req_ctx->data_len += params.bfr_len + params.sg_len;
2000
2001	if (req->nbytes == 0) {
2002		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
2003		params.more = 1;
2004		params.bfr_len = bs;
2005	}
2006
2007	skb = create_hash_wr(req, &params);
2008	if (IS_ERR(skb)) {
2009		error = PTR_ERR(skb);
2010		goto unmap;
2011	}
2012	req_ctx->hctx_wr.processed += params.sg_len;
2013	skb->dev = u_ctx->lldi.ports[0];
2014	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
2015	chcr_send_wr(skb);
2016	return -EINPROGRESS;
2017unmap:
2018	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2019err:
2020	chcr_dec_wrcount(dev);
2021	return error;
2022}
2023
2024static int chcr_ahash_continue(struct ahash_request *req)
2025{
2026	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2027	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2028	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2029	struct chcr_context *ctx = h_ctx(rtfm);
2030	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2031	struct sk_buff *skb;
2032	struct hash_wr_param params;
2033	u8  bs;
2034	int error;
2035	unsigned int cpu;
2036
2037	cpu = get_cpu();
2038	reqctx->txqidx = cpu % ctx->ntxq;
2039	reqctx->rxqidx = cpu % ctx->nrxq;
2040	put_cpu();
2041
2042	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2043	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
2044	params.kctx_len = roundup(params.alg_prm.result_size, 16);
2045	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2046		params.kctx_len *= 2;
2047		params.opad_needed = 1;
2048	} else {
2049		params.opad_needed = 0;
2050	}
2051	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2052					    HASH_SPACE_LEFT(params.kctx_len),
2053					    hctx_wr->src_ofst);
2054	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2055		params.sg_len = req->nbytes - hctx_wr->processed;
2056	if (!hctx_wr->result ||
2057	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2058		if (is_hmac(crypto_ahash_tfm(rtfm))) {
2059			params.kctx_len /= 2;
2060			params.opad_needed = 0;
2061		}
2062		params.last = 0;
2063		params.more = 1;
2064		params.sg_len = rounddown(params.sg_len, bs);
2065		params.hash_size = params.alg_prm.result_size;
2066		params.scmd1 = 0;
2067	} else {
2068		params.last = 1;
2069		params.more = 0;
2070		params.hash_size = crypto_ahash_digestsize(rtfm);
2071		params.scmd1 = reqctx->data_len + params.sg_len;
2072	}
2073	params.bfr_len = 0;
2074	reqctx->data_len += params.sg_len;
2075	skb = create_hash_wr(req, &params);
2076	if (IS_ERR(skb)) {
2077		error = PTR_ERR(skb);
2078		goto err;
2079	}
2080	hctx_wr->processed += params.sg_len;
2081	skb->dev = u_ctx->lldi.ports[0];
2082	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2083	chcr_send_wr(skb);
2084	return 0;
2085err:
2086	return error;
2087}
2088
2089static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2090					  unsigned char *input,
2091					  int err)
2092{
2093	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2094	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2095	int digestsize, updated_digestsize;
2096	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2097	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2098	struct chcr_dev *dev = h_ctx(tfm)->dev;
2099
2100	if (input == NULL)
2101		goto out;
2102	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2103	updated_digestsize = digestsize;
2104	if (digestsize == SHA224_DIGEST_SIZE)
2105		updated_digestsize = SHA256_DIGEST_SIZE;
2106	else if (digestsize == SHA384_DIGEST_SIZE)
2107		updated_digestsize = SHA512_DIGEST_SIZE;
2108
2109	if (hctx_wr->dma_addr) {
2110		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2111				 hctx_wr->dma_len, DMA_TO_DEVICE);
2112		hctx_wr->dma_addr = 0;
2113	}
2114	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2115				 req->nbytes)) {
2116		if (hctx_wr->result == 1) {
2117			hctx_wr->result = 0;
2118			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2119			       digestsize);
2120		} else {
2121			memcpy(reqctx->partial_hash,
2122			       input + sizeof(struct cpl_fw6_pld),
2123			       updated_digestsize);
2124
2125		}
2126		goto unmap;
2127	}
2128	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2129	       updated_digestsize);
2130
2131	err = chcr_ahash_continue(req);
2132	if (err)
2133		goto unmap;
2134	return;
2135unmap:
2136	if (hctx_wr->is_sg_map)
2137		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2138
2139
2140out:
2141	chcr_dec_wrcount(dev);
2142	ahash_request_complete(req, err);
2143}
2144
2145/*
2146 *	chcr_handle_resp - Unmap the DMA buffers associated with the request
2147 *	@req: crypto request
2148 */
2149int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2150			 int err)
2151{
2152	struct crypto_tfm *tfm = req->tfm;
2153	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2154	struct adapter *adap = padap(ctx->dev);
2155
2156	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2157	case CRYPTO_ALG_TYPE_AEAD:
2158		err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2159		break;
2160
2161	case CRYPTO_ALG_TYPE_SKCIPHER:
2162		 chcr_handle_cipher_resp(skcipher_request_cast(req),
2163					       input, err);
2164		break;
2165	case CRYPTO_ALG_TYPE_AHASH:
2166		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2167		}
2168	atomic_inc(&adap->chcr_stats.complete);
2169	return err;
2170}
2171static int chcr_ahash_export(struct ahash_request *areq, void *out)
2172{
2173	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2174	struct chcr_ahash_req_ctx *state = out;
2175
2176	state->reqlen = req_ctx->reqlen;
2177	state->data_len = req_ctx->data_len;
2178	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2179	memcpy(state->partial_hash, req_ctx->partial_hash,
2180	       CHCR_HASH_MAX_DIGEST_SIZE);
2181	chcr_init_hctx_per_wr(state);
2182	return 0;
2183}
2184
2185static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2186{
2187	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2188	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2189
2190	req_ctx->reqlen = state->reqlen;
2191	req_ctx->data_len = state->data_len;
2192	req_ctx->reqbfr = req_ctx->bfr1;
2193	req_ctx->skbfr = req_ctx->bfr2;
2194	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2195	memcpy(req_ctx->partial_hash, state->partial_hash,
2196	       CHCR_HASH_MAX_DIGEST_SIZE);
2197	chcr_init_hctx_per_wr(req_ctx);
2198	return 0;
2199}
2200
2201static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2202			     unsigned int keylen)
2203{
2204	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
 
2205	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2206	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2207	unsigned int i, err = 0, updated_digestsize;
2208
2209	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2210
2211	/* use the key to calculate the ipad and opad. ipad will sent with the
2212	 * first request's data. opad will be sent with the final hash result
2213	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2214	 */
2215	shash->tfm = hmacctx->base_hash;
 
2216	if (keylen > bs) {
2217		err = crypto_shash_digest(shash, key, keylen,
2218					  hmacctx->ipad);
2219		if (err)
2220			goto out;
2221		keylen = digestsize;
2222	} else {
2223		memcpy(hmacctx->ipad, key, keylen);
2224	}
2225	memset(hmacctx->ipad + keylen, 0, bs - keylen);
2226	unsafe_memcpy(hmacctx->opad, hmacctx->ipad, bs,
2227		      "fortified memcpy causes -Wrestrict warning");
2228
2229	for (i = 0; i < bs / sizeof(int); i++) {
2230		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2231		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2232	}
2233
2234	updated_digestsize = digestsize;
2235	if (digestsize == SHA224_DIGEST_SIZE)
2236		updated_digestsize = SHA256_DIGEST_SIZE;
2237	else if (digestsize == SHA384_DIGEST_SIZE)
2238		updated_digestsize = SHA512_DIGEST_SIZE;
2239	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2240					hmacctx->ipad, digestsize);
2241	if (err)
2242		goto out;
2243	chcr_change_order(hmacctx->ipad, updated_digestsize);
2244
2245	err = chcr_compute_partial_hash(shash, hmacctx->opad,
2246					hmacctx->opad, digestsize);
2247	if (err)
2248		goto out;
2249	chcr_change_order(hmacctx->opad, updated_digestsize);
2250out:
2251	return err;
2252}
2253
2254static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2255			       unsigned int key_len)
2256{
2257	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 
2258	unsigned short context_size = 0;
2259	int err;
2260
2261	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2262	if (err)
2263		goto badkey_err;
 
 
 
 
 
2264
2265	memcpy(ablkctx->key, key, key_len);
2266	ablkctx->enckey_len = key_len;
2267	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2268	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2269	/* Both keys for xts must be aligned to 16 byte boundary
2270	 * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2271	 */
2272	if (key_len == 48) {
2273		context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2274				+ 16) >> 4;
2275		memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2276		memset(ablkctx->key + 24, 0, 8);
2277		memset(ablkctx->key + 56, 0, 8);
2278		ablkctx->enckey_len = 64;
2279		ablkctx->key_ctx_hdr =
2280			FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2281					 CHCR_KEYCTX_NO_KEY, 1,
2282					 0, context_size);
2283	} else {
2284		ablkctx->key_ctx_hdr =
2285		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2286				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2287				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2288				 CHCR_KEYCTX_NO_KEY, 1,
2289				 0, context_size);
2290	}
2291	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2292	return 0;
2293badkey_err:
2294	ablkctx->enckey_len = 0;
2295
2296	return err;
2297}
2298
2299static int chcr_sha_init(struct ahash_request *areq)
2300{
2301	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2302	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2303	int digestsize =  crypto_ahash_digestsize(tfm);
2304
2305	req_ctx->data_len = 0;
2306	req_ctx->reqlen = 0;
2307	req_ctx->reqbfr = req_ctx->bfr1;
2308	req_ctx->skbfr = req_ctx->bfr2;
 
 
2309	copy_hash_init_values(req_ctx->partial_hash, digestsize);
2310
2311	return 0;
2312}
2313
2314static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2315{
2316	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2317				 sizeof(struct chcr_ahash_req_ctx));
2318	return chcr_device_init(crypto_tfm_ctx(tfm));
2319}
2320
2321static int chcr_hmac_init(struct ahash_request *areq)
2322{
2323	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2324	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2325	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
 
2326	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2327	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2328
2329	chcr_sha_init(areq);
2330	req_ctx->data_len = bs;
2331	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2332		if (digestsize == SHA224_DIGEST_SIZE)
2333			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2334			       SHA256_DIGEST_SIZE);
2335		else if (digestsize == SHA384_DIGEST_SIZE)
2336			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2337			       SHA512_DIGEST_SIZE);
2338		else
2339			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2340			       digestsize);
2341	}
2342	return 0;
2343}
2344
2345static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2346{
2347	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2348	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2349	unsigned int digestsize =
2350		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2351
2352	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2353				 sizeof(struct chcr_ahash_req_ctx));
2354	hmacctx->base_hash = chcr_alloc_shash(digestsize);
2355	if (IS_ERR(hmacctx->base_hash))
2356		return PTR_ERR(hmacctx->base_hash);
2357	return chcr_device_init(crypto_tfm_ctx(tfm));
2358}
2359
2360static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2361{
2362	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2363	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2364
2365	if (hmacctx->base_hash) {
2366		chcr_free_shash(hmacctx->base_hash);
2367		hmacctx->base_hash = NULL;
2368	}
2369}
2370
2371inline void chcr_aead_common_exit(struct aead_request *req)
 
2372{
2373	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2374	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2375	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2376
2377	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
 
 
 
 
 
 
2378}
2379
2380static int chcr_aead_common_init(struct aead_request *req)
2381{
2382	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2383	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2384	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2385	unsigned int authsize = crypto_aead_authsize(tfm);
2386	int error = -EINVAL;
2387
2388	/* validate key size */
2389	if (aeadctx->enckey_len == 0)
2390		goto err;
2391	if (reqctx->op && req->cryptlen < authsize)
2392		goto err;
2393	if (reqctx->b0_len)
2394		reqctx->scratch_pad = reqctx->iv + IV;
2395	else
2396		reqctx->scratch_pad = NULL;
2397
2398	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2399				  reqctx->op);
2400	if (error) {
2401		error = -ENOMEM;
2402		goto err;
2403	}
2404
2405	return 0;
2406err:
2407	return error;
2408}
2409
2410static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2411				   int aadmax, int wrlen,
2412				   unsigned short op_type)
2413{
2414	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2415
2416	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2417	    dst_nents > MAX_DSGL_ENT ||
2418	    (req->assoclen > aadmax) ||
2419	    (wrlen > SGE_MAX_WR_LEN))
2420		return 1;
2421	return 0;
2422}
2423
2424static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2425{
2426	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2427	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2428	struct aead_request *subreq = aead_request_ctx_dma(req);
2429
2430	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2431	aead_request_set_callback(subreq, req->base.flags,
2432				  req->base.complete, req->base.data);
2433	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2434				 req->iv);
2435	aead_request_set_ad(subreq, req->assoclen);
2436	return op_type ? crypto_aead_decrypt(subreq) :
2437		crypto_aead_encrypt(subreq);
2438}
2439
2440static struct sk_buff *create_authenc_wr(struct aead_request *req,
2441					 unsigned short qid,
2442					 int size)
 
2443{
2444	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2445	struct chcr_context *ctx = a_ctx(tfm);
2446	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2447	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2448	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2449	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2450	struct sk_buff *skb = NULL;
2451	struct chcr_wr *chcr_req;
2452	struct cpl_rx_phys_dsgl *phys_cpl;
2453	struct ulptx_sgl *ulptx;
2454	unsigned int transhdr_len;
2455	unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2456	unsigned int   kctx_len = 0, dnents, snents;
 
 
 
2457	unsigned int  authsize = crypto_aead_authsize(tfm);
2458	int error = -EINVAL;
2459	u8 *ivptr;
2460	int null = 0;
2461	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2462		GFP_ATOMIC;
2463	struct adapter *adap = padap(ctx->dev);
2464	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2465
2466	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2467	if (req->cryptlen == 0)
2468		return NULL;
2469
2470	reqctx->b0_len = 0;
2471	error = chcr_aead_common_init(req);
2472	if (error)
2473		return ERR_PTR(error);
 
 
2474
2475	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2476		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
 
 
 
 
 
 
2477		null = 1;
 
 
 
 
 
 
 
2478	}
2479	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2480		(reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2481	dnents += MIN_AUTH_SG; // For IV
2482	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2483			       CHCR_SRC_SG_SIZE, 0);
2484	dst_size = get_space_for_phys_dsgl(dnents);
2485	kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2486		- sizeof(chcr_req->key_ctx);
2487	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2488	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2489			SGE_MAX_WR_LEN;
2490	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2491			: (sgl_len(snents) * 8);
2492	transhdr_len += temp;
2493	transhdr_len = roundup(transhdr_len, 16);
2494
2495	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2496				    transhdr_len, reqctx->op)) {
2497		atomic_inc(&adap->chcr_stats.fallback);
2498		chcr_aead_common_exit(req);
2499		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2500	}
2501	skb = alloc_skb(transhdr_len, flags);
2502	if (!skb) {
2503		error = -ENOMEM;
2504		goto err;
2505	}
2506
2507	chcr_req = __skb_put_zero(skb, transhdr_len);
 
 
 
 
 
2508
2509	temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2510
2511	/*
2512	 * Input order	is AAD,IV and Payload. where IV should be included as
2513	 * the part of authdata. All other fields should be filled according
2514	 * to the hardware spec
2515	 */
2516	chcr_req->sec_cpl.op_ivinsrtofst =
2517				FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2518	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
 
2519	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2520					null ? 0 : 1 + IV,
2521					null ? 0 : IV + req->assoclen,
2522					req->assoclen + IV + 1,
2523					(temp & 0x1F0) >> 4);
2524	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2525					temp & 0xF,
2526					null ? 0 : req->assoclen + IV + 1,
2527					temp, temp);
2528	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2529	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2530		temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2531	else
2532		temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2533	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2534					(reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2535					temp,
2536					actx->auth_mode, aeadctx->hmac_ctrl,
2537					IV >> 1);
2538	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2539					 0, 0, dst_size);
2540
2541	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2542	if (reqctx->op == CHCR_ENCRYPT_OP ||
2543		subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2544		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2545		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2546		       aeadctx->enckey_len);
2547	else
2548		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2549		       aeadctx->enckey_len);
2550
2551	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2552	       actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2553	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2554	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2555	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2556	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2557	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2558		memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2559		memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2560				CTR_RFC3686_IV_SIZE);
2561		*(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2562			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2563	} else {
2564		memcpy(ivptr, req->iv, IV);
2565	}
2566	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2567	chcr_add_aead_src_ent(req, ulptx);
2568	atomic_inc(&adap->chcr_stats.cipher_rqst);
2569	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2570		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2571	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2572		   transhdr_len, temp, 0);
2573	reqctx->skb = skb;
2574
2575	return skb;
2576err:
2577	chcr_aead_common_exit(req);
 
 
 
 
 
2578
2579	return ERR_PTR(error);
2580}
2581
2582int chcr_aead_dma_map(struct device *dev,
2583		      struct aead_request *req,
2584		      unsigned short op_type)
2585{
2586	int error;
2587	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2588	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2589	unsigned int authsize = crypto_aead_authsize(tfm);
2590	int src_len, dst_len;
2591
2592	/* calculate and handle src and dst sg length separately
2593	 * for inplace and out-of place operations
2594	 */
2595	if (req->src == req->dst) {
2596		src_len = req->assoclen + req->cryptlen + (op_type ?
2597							0 : authsize);
2598		dst_len = src_len;
2599	} else {
2600		src_len = req->assoclen + req->cryptlen;
2601		dst_len = req->assoclen + req->cryptlen + (op_type ?
2602							-authsize : authsize);
2603	}
2604
2605	if (!req->cryptlen || !src_len || !dst_len)
2606		return 0;
2607	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2608					DMA_BIDIRECTIONAL);
2609	if (dma_mapping_error(dev, reqctx->iv_dma))
2610		return -ENOMEM;
2611	if (reqctx->b0_len)
2612		reqctx->b0_dma = reqctx->iv_dma + IV;
2613	else
2614		reqctx->b0_dma = 0;
2615	if (req->src == req->dst) {
2616		error = dma_map_sg(dev, req->src,
2617				sg_nents_for_len(req->src, src_len),
2618					DMA_BIDIRECTIONAL);
2619		if (!error)
2620			goto err;
2621	} else {
2622		error = dma_map_sg(dev, req->src,
2623				   sg_nents_for_len(req->src, src_len),
2624				   DMA_TO_DEVICE);
2625		if (!error)
2626			goto err;
2627		error = dma_map_sg(dev, req->dst,
2628				   sg_nents_for_len(req->dst, dst_len),
2629				   DMA_FROM_DEVICE);
2630		if (!error) {
2631			dma_unmap_sg(dev, req->src,
2632				     sg_nents_for_len(req->src, src_len),
2633				     DMA_TO_DEVICE);
2634			goto err;
2635		}
2636	}
 
 
 
 
 
 
2637
2638	return 0;
 
 
 
2639err:
2640	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2641	return -ENOMEM;
2642}
2643
2644void chcr_aead_dma_unmap(struct device *dev,
2645			 struct aead_request *req,
2646			 unsigned short op_type)
2647{
2648	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2649	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2650	unsigned int authsize = crypto_aead_authsize(tfm);
2651	int src_len, dst_len;
2652
2653	/* calculate and handle src and dst sg length separately
2654	 * for inplace and out-of place operations
2655	 */
2656	if (req->src == req->dst) {
2657		src_len = req->assoclen + req->cryptlen + (op_type ?
2658							0 : authsize);
2659		dst_len = src_len;
2660	} else {
2661		src_len = req->assoclen + req->cryptlen;
2662		dst_len = req->assoclen + req->cryptlen + (op_type ?
2663						-authsize : authsize);
2664	}
2665
2666	if (!req->cryptlen || !src_len || !dst_len)
2667		return;
2668
2669	dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2670					DMA_BIDIRECTIONAL);
2671	if (req->src == req->dst) {
2672		dma_unmap_sg(dev, req->src,
2673			     sg_nents_for_len(req->src, src_len),
2674			     DMA_BIDIRECTIONAL);
2675	} else {
2676		dma_unmap_sg(dev, req->src,
2677			     sg_nents_for_len(req->src, src_len),
2678			     DMA_TO_DEVICE);
2679		dma_unmap_sg(dev, req->dst,
2680			     sg_nents_for_len(req->dst, dst_len),
2681			     DMA_FROM_DEVICE);
2682	}
2683}
2684
2685void chcr_add_aead_src_ent(struct aead_request *req,
2686			   struct ulptx_sgl *ulptx)
2687{
2688	struct ulptx_walk ulp_walk;
2689	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2690
2691	if (reqctx->imm) {
2692		u8 *buf = (u8 *)ulptx;
2693
2694		if (reqctx->b0_len) {
2695			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2696			buf += reqctx->b0_len;
2697		}
2698		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2699				   buf, req->cryptlen + req->assoclen, 0);
2700	} else {
2701		ulptx_walk_init(&ulp_walk, ulptx);
2702		if (reqctx->b0_len)
2703			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2704					    reqctx->b0_dma);
2705		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2706				  req->assoclen,  0);
2707		ulptx_walk_end(&ulp_walk);
2708	}
2709}
2710
2711void chcr_add_aead_dst_ent(struct aead_request *req,
2712			   struct cpl_rx_phys_dsgl *phys_cpl,
2713			   unsigned short qid)
2714{
2715	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2716	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2717	struct dsgl_walk dsgl_walk;
2718	unsigned int authsize = crypto_aead_authsize(tfm);
2719	struct chcr_context *ctx = a_ctx(tfm);
2720	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2721	u32 temp;
2722	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2723
2724	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2725	dsgl_walk_init(&dsgl_walk, phys_cpl);
2726	dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2727	temp = req->assoclen + req->cryptlen +
2728		(reqctx->op ? -authsize : authsize);
2729	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2730	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2731}
2732
2733void chcr_add_cipher_src_ent(struct skcipher_request *req,
2734			     void *ulptx,
2735			     struct  cipher_wr_param *wrparam)
2736{
2737	struct ulptx_walk ulp_walk;
2738	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2739	u8 *buf = ulptx;
2740
2741	memcpy(buf, reqctx->iv, IV);
2742	buf += IV;
2743	if (reqctx->imm) {
2744		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2745				   buf, wrparam->bytes, reqctx->processed);
2746	} else {
2747		ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2748		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2749				  reqctx->src_ofst);
2750		reqctx->srcsg = ulp_walk.last_sg;
2751		reqctx->src_ofst = ulp_walk.last_sg_len;
2752		ulptx_walk_end(&ulp_walk);
2753	}
2754}
2755
2756void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2757			     struct cpl_rx_phys_dsgl *phys_cpl,
2758			     struct  cipher_wr_param *wrparam,
2759			     unsigned short qid)
2760{
2761	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2762	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2763	struct chcr_context *ctx = c_ctx(tfm);
2764	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2765	struct dsgl_walk dsgl_walk;
2766	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2767
2768	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2769	dsgl_walk_init(&dsgl_walk, phys_cpl);
2770	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2771			 reqctx->dst_ofst);
2772	reqctx->dstsg = dsgl_walk.last_sg;
2773	reqctx->dst_ofst = dsgl_walk.last_sg_len;
2774	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2775}
2776
2777void chcr_add_hash_src_ent(struct ahash_request *req,
2778			   struct ulptx_sgl *ulptx,
2779			   struct hash_wr_param *param)
2780{
2781	struct ulptx_walk ulp_walk;
2782	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2783
2784	if (reqctx->hctx_wr.imm) {
2785		u8 *buf = (u8 *)ulptx;
2786
2787		if (param->bfr_len) {
2788			memcpy(buf, reqctx->reqbfr, param->bfr_len);
2789			buf += param->bfr_len;
2790		}
2791
2792		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2793				   sg_nents(reqctx->hctx_wr.srcsg), buf,
2794				   param->sg_len, 0);
2795	} else {
2796		ulptx_walk_init(&ulp_walk, ulptx);
2797		if (param->bfr_len)
2798			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2799					    reqctx->hctx_wr.dma_addr);
2800		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2801				  param->sg_len, reqctx->hctx_wr.src_ofst);
2802		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2803		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2804		ulptx_walk_end(&ulp_walk);
2805	}
2806}
2807
2808int chcr_hash_dma_map(struct device *dev,
2809		      struct ahash_request *req)
2810{
2811	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2812	int error = 0;
2813
2814	if (!req->nbytes)
2815		return 0;
2816	error = dma_map_sg(dev, req->src, sg_nents(req->src),
2817			   DMA_TO_DEVICE);
2818	if (!error)
2819		return -ENOMEM;
2820	req_ctx->hctx_wr.is_sg_map = 1;
2821	return 0;
2822}
2823
2824void chcr_hash_dma_unmap(struct device *dev,
2825			 struct ahash_request *req)
2826{
2827	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2828
2829	if (!req->nbytes)
2830		return;
2831
2832	dma_unmap_sg(dev, req->src, sg_nents(req->src),
2833			   DMA_TO_DEVICE);
2834	req_ctx->hctx_wr.is_sg_map = 0;
2835
2836}
2837
2838int chcr_cipher_dma_map(struct device *dev,
2839			struct skcipher_request *req)
2840{
2841	int error;
2842
2843	if (req->src == req->dst) {
2844		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2845				   DMA_BIDIRECTIONAL);
2846		if (!error)
2847			goto err;
2848	} else {
2849		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2850				   DMA_TO_DEVICE);
2851		if (!error)
2852			goto err;
2853		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2854				   DMA_FROM_DEVICE);
2855		if (!error) {
2856			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2857				   DMA_TO_DEVICE);
2858			goto err;
2859		}
2860	}
2861
2862	return 0;
2863err:
2864	return -ENOMEM;
2865}
2866
2867void chcr_cipher_dma_unmap(struct device *dev,
2868			   struct skcipher_request *req)
2869{
2870	if (req->src == req->dst) {
2871		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2872				   DMA_BIDIRECTIONAL);
2873	} else {
2874		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2875				   DMA_TO_DEVICE);
2876		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2877				   DMA_FROM_DEVICE);
2878	}
2879}
2880
2881static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2882{
2883	__be32 data;
2884
2885	memset(block, 0, csize);
2886	block += csize;
2887
2888	if (csize >= 4)
2889		csize = 4;
2890	else if (msglen > (unsigned int)(1 << (8 * csize)))
2891		return -EOVERFLOW;
2892
2893	data = cpu_to_be32(msglen);
2894	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2895
2896	return 0;
2897}
2898
2899static int generate_b0(struct aead_request *req, u8 *ivptr,
 
2900			unsigned short op_type)
2901{
2902	unsigned int l, lp, m;
2903	int rc;
2904	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2905	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2906	u8 *b0 = reqctx->scratch_pad;
2907
2908	m = crypto_aead_authsize(aead);
2909
2910	memcpy(b0, ivptr, 16);
2911
2912	lp = b0[0];
2913	l = lp + 1;
2914
2915	/* set m, bits 3-5 */
2916	*b0 |= (8 * ((m - 2) / 2));
2917
2918	/* set adata, bit 6, if associated data is used */
2919	if (req->assoclen)
2920		*b0 |= 64;
2921	rc = set_msg_len(b0 + 16 - l,
2922			 (op_type == CHCR_DECRYPT_OP) ?
2923			 req->cryptlen - m : req->cryptlen, l);
2924
2925	return rc;
2926}
2927
2928static inline int crypto_ccm_check_iv(const u8 *iv)
2929{
2930	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2931	if (iv[0] < 1 || iv[0] > 7)
2932		return -EINVAL;
2933
2934	return 0;
2935}
2936
2937static int ccm_format_packet(struct aead_request *req,
2938			     u8 *ivptr,
2939			     unsigned int sub_type,
2940			     unsigned short op_type,
2941			     unsigned int assoclen)
2942{
2943	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2944	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2945	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2946	int rc = 0;
2947
 
 
 
 
 
2948	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2949		ivptr[0] = 3;
2950		memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2951		memcpy(ivptr + 4, req->iv, 8);
2952		memset(ivptr + 12, 0, 4);
2953	} else {
2954		memcpy(ivptr, req->iv, 16);
 
 
 
 
2955	}
2956	if (assoclen)
2957		put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
2958
2959	rc = generate_b0(req, ivptr, op_type);
2960	/* zero the ctr value */
2961	memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2962	return rc;
2963}
2964
2965static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2966				  unsigned int dst_size,
2967				  struct aead_request *req,
2968				  unsigned short op_type)
 
2969{
2970	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2971	struct chcr_context *ctx = a_ctx(tfm);
2972	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2973	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2974	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2975	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2976	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2977	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2978	unsigned int ccm_xtra;
2979	unsigned int tag_offset = 0, auth_offset = 0;
 
2980	unsigned int assoclen;
2981
2982	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2983
2984	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2985		assoclen = req->assoclen - 8;
2986	else
2987		assoclen = req->assoclen;
2988	ccm_xtra = CCM_B0_SIZE +
2989		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2990
2991	auth_offset = req->cryptlen ?
2992		(req->assoclen + IV + 1 + ccm_xtra) : 0;
2993	if (op_type == CHCR_DECRYPT_OP) {
2994		if (crypto_aead_authsize(tfm) != req->cryptlen)
2995			tag_offset = crypto_aead_authsize(tfm);
2996		else
2997			auth_offset = 0;
2998	}
2999
3000	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
 
 
 
3001	sec_cpl->pldlen =
3002		htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
3003	/* For CCM there wil be b0 always. So AAD start will be 1 always */
3004	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3005				1 + IV,	IV + assoclen + ccm_xtra,
3006				req->assoclen + IV + 1 + ccm_xtra, 0);
3007
3008	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
3009					auth_offset, tag_offset,
3010					(op_type == CHCR_ENCRYPT_OP) ? 0 :
3011					crypto_aead_authsize(tfm));
3012	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
3013					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
3014					cipher_mode, mac_mode,
3015					aeadctx->hmac_ctrl, IV >> 1);
3016
3017	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
3018					0, dst_size);
3019}
3020
3021static int aead_ccm_validate_input(unsigned short op_type,
3022				   struct aead_request *req,
3023				   struct chcr_aead_ctx *aeadctx,
3024				   unsigned int sub_type)
3025{
3026	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
3027		if (crypto_ccm_check_iv(req->iv)) {
3028			pr_err("CCM: IV check fails\n");
3029			return -EINVAL;
3030		}
3031	} else {
3032		if (req->assoclen != 16 && req->assoclen != 20) {
3033			pr_err("RFC4309: Invalid AAD length %d\n",
3034			       req->assoclen);
3035			return -EINVAL;
3036		}
3037	}
 
 
 
 
3038	return 0;
3039}
3040
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3041static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
3042					  unsigned short qid,
3043					  int size)
 
3044{
3045	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3046	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3047	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
 
 
3048	struct sk_buff *skb = NULL;
3049	struct chcr_wr *chcr_req;
3050	struct cpl_rx_phys_dsgl *phys_cpl;
3051	struct ulptx_sgl *ulptx;
3052	unsigned int transhdr_len;
3053	unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
3054	unsigned int sub_type, assoclen = req->assoclen;
 
3055	unsigned int authsize = crypto_aead_authsize(tfm);
3056	int error = -EINVAL;
3057	u8 *ivptr;
3058	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3059		GFP_ATOMIC;
3060	struct adapter *adap = padap(a_ctx(tfm)->dev);
3061
 
 
 
 
 
 
3062	sub_type = get_aead_subtype(tfm);
3063	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
3064		assoclen -= 8;
3065	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
3066	error = chcr_aead_common_init(req);
3067	if (error)
3068		return ERR_PTR(error);
 
 
 
 
 
 
 
 
 
 
 
 
 
3069
3070	error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
3071	if (error)
3072		goto err;
3073	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3074			+ (reqctx->op ? -authsize : authsize),
3075			CHCR_DST_SG_SIZE, 0);
3076	dnents += MIN_CCM_SG; // For IV and B0
3077	dst_size = get_space_for_phys_dsgl(dnents);
3078	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3079			       CHCR_SRC_SG_SIZE, 0);
3080	snents += MIN_CCM_SG; //For B0
3081	kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3082	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3083	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3084		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
3085	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3086				     reqctx->b0_len, 16) :
3087		(sgl_len(snents) *  8);
3088	transhdr_len += temp;
3089	transhdr_len = roundup(transhdr_len, 16);
3090
3091	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3092				reqctx->b0_len, transhdr_len, reqctx->op)) {
3093		atomic_inc(&adap->chcr_stats.fallback);
3094		chcr_aead_common_exit(req);
3095		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3096	}
3097	skb = alloc_skb(transhdr_len,  flags);
3098
3099	if (!skb) {
3100		error = -ENOMEM;
3101		goto err;
3102	}
3103
3104	chcr_req = __skb_put_zero(skb, transhdr_len);
3105
3106	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
 
 
 
3107
3108	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3109	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3110	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3111			aeadctx->key, aeadctx->enckey_len);
3112
3113	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3114	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3115	ulptx = (struct ulptx_sgl *)(ivptr + IV);
3116	error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3117	if (error)
3118		goto dstmap_fail;
3119	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3120	chcr_add_aead_src_ent(req, ulptx);
3121
3122	atomic_inc(&adap->chcr_stats.aead_rqst);
3123	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3124		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3125		reqctx->b0_len) : 0);
3126	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3127		    transhdr_len, temp, 0);
3128	reqctx->skb = skb;
3129
 
 
 
 
 
 
3130	return skb;
3131dstmap_fail:
3132	kfree_skb(skb);
 
3133err:
3134	chcr_aead_common_exit(req);
3135	return ERR_PTR(error);
3136}
3137
3138static struct sk_buff *create_gcm_wr(struct aead_request *req,
3139				     unsigned short qid,
3140				     int size)
 
3141{
3142	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3143	struct chcr_context *ctx = a_ctx(tfm);
3144	struct uld_ctx *u_ctx = ULD_CTX(ctx);
3145	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3146	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3147	struct sk_buff *skb = NULL;
3148	struct chcr_wr *chcr_req;
3149	struct cpl_rx_phys_dsgl *phys_cpl;
3150	struct ulptx_sgl *ulptx;
3151	unsigned int transhdr_len, dnents = 0, snents;
3152	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
 
 
 
 
3153	unsigned int authsize = crypto_aead_authsize(tfm);
3154	int error = -EINVAL;
3155	u8 *ivptr;
3156	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3157		GFP_ATOMIC;
3158	struct adapter *adap = padap(ctx->dev);
3159	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3160
3161	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
3162	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3163		assoclen = req->assoclen - 8;
3164
3165	reqctx->b0_len = 0;
3166	error = chcr_aead_common_init(req);
3167	if (error)
3168		return ERR_PTR(error);
3169	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3170				(reqctx->op ? -authsize : authsize),
3171				CHCR_DST_SG_SIZE, 0);
3172	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3173			       CHCR_SRC_SG_SIZE, 0);
3174	dnents += MIN_GCM_SG; // For IV
3175	dst_size = get_space_for_phys_dsgl(dnents);
3176	kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3177	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3178	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3179			SGE_MAX_WR_LEN;
3180	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3181		(sgl_len(snents) * 8);
3182	transhdr_len += temp;
3183	transhdr_len = roundup(transhdr_len, 16);
3184	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3185			    transhdr_len, reqctx->op)) {
3186
3187		atomic_inc(&adap->chcr_stats.fallback);
3188		chcr_aead_common_exit(req);
3189		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3190	}
3191	skb = alloc_skb(transhdr_len, flags);
3192	if (!skb) {
3193		error = -ENOMEM;
3194		goto err;
 
 
 
 
 
 
 
 
 
3195	}
3196
3197	chcr_req = __skb_put_zero(skb, transhdr_len);
 
 
 
 
 
 
 
 
 
 
 
 
3198
3199	//Offset of tag from end
3200	temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3201	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3202						rx_channel_id, 2, 1);
3203	chcr_req->sec_cpl.pldlen =
3204		htonl(req->assoclen + IV + req->cryptlen);
3205	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3206					assoclen ? 1 + IV : 0,
3207					assoclen ? IV + assoclen : 0,
3208					req->assoclen + IV + 1, 0);
3209	chcr_req->sec_cpl.cipherstop_lo_authinsert =
3210			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3211						temp, temp);
3212	chcr_req->sec_cpl.seqno_numivs =
3213			FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3214					CHCR_ENCRYPT_OP) ? 1 : 0,
3215					CHCR_SCMD_CIPHER_MODE_AES_GCM,
3216					CHCR_SCMD_AUTH_MODE_GHASH,
3217					aeadctx->hmac_ctrl, IV >> 1);
 
 
 
 
 
 
 
 
 
3218	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3219					0, 0, dst_size);
3220	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3221	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3222	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3223	       GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3224
3225	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3226	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3227	/* prepare a 16 byte iv */
3228	/* S   A   L  T |  IV | 0x00000001 */
3229	if (get_aead_subtype(tfm) ==
3230	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3231		memcpy(ivptr, aeadctx->salt, 4);
3232		memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3233	} else {
3234		memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3235	}
3236	put_unaligned_be32(0x01, &ivptr[12]);
3237	ulptx = (struct ulptx_sgl *)(ivptr + 16);
3238
3239	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3240	chcr_add_aead_src_ent(req, ulptx);
3241	atomic_inc(&adap->chcr_stats.aead_rqst);
3242	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3243		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3244	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3245		    transhdr_len, temp, reqctx->verify);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3246	reqctx->skb = skb;
 
3247	return skb;
3248
 
 
 
 
3249err:
3250	chcr_aead_common_exit(req);
3251	return ERR_PTR(error);
3252}
3253
3254
3255
3256static int chcr_aead_cra_init(struct crypto_aead *tfm)
3257{
3258	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3259	struct aead_alg *alg = crypto_aead_alg(tfm);
3260
3261	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3262					       CRYPTO_ALG_NEED_FALLBACK |
3263					       CRYPTO_ALG_ASYNC);
3264	if  (IS_ERR(aeadctx->sw_cipher))
3265		return PTR_ERR(aeadctx->sw_cipher);
3266	crypto_aead_set_reqsize_dma(
3267		tfm, max(sizeof(struct chcr_aead_reqctx),
3268			 sizeof(struct aead_request) +
3269			 crypto_aead_reqsize(aeadctx->sw_cipher)));
3270	return chcr_device_init(a_ctx(tfm));
3271}
3272
3273static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3274{
3275	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3276
3277	crypto_free_aead(aeadctx->sw_cipher);
3278}
3279
3280static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3281					unsigned int authsize)
3282{
3283	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3284
3285	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3286	aeadctx->mayverify = VERIFY_HW;
3287	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3288}
3289static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3290				    unsigned int authsize)
3291{
3292	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3293	u32 maxauth = crypto_aead_maxauthsize(tfm);
3294
3295	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3296	 * true for sha1. authsize == 12 condition should be before
3297	 * authsize == (maxauth >> 1)
3298	 */
3299	if (authsize == ICV_4) {
3300		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3301		aeadctx->mayverify = VERIFY_HW;
3302	} else if (authsize == ICV_6) {
3303		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3304		aeadctx->mayverify = VERIFY_HW;
3305	} else if (authsize == ICV_10) {
3306		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3307		aeadctx->mayverify = VERIFY_HW;
3308	} else if (authsize == ICV_12) {
3309		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3310		aeadctx->mayverify = VERIFY_HW;
3311	} else if (authsize == ICV_14) {
3312		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3313		aeadctx->mayverify = VERIFY_HW;
3314	} else if (authsize == (maxauth >> 1)) {
3315		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3316		aeadctx->mayverify = VERIFY_HW;
3317	} else if (authsize == maxauth) {
3318		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3319		aeadctx->mayverify = VERIFY_HW;
3320	} else {
3321		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3322		aeadctx->mayverify = VERIFY_SW;
3323	}
3324	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3325}
3326
3327
3328static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3329{
3330	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3331
3332	switch (authsize) {
3333	case ICV_4:
3334		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3335		aeadctx->mayverify = VERIFY_HW;
3336		break;
3337	case ICV_8:
3338		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3339		aeadctx->mayverify = VERIFY_HW;
3340		break;
3341	case ICV_12:
3342		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3343		aeadctx->mayverify = VERIFY_HW;
3344		break;
3345	case ICV_14:
3346		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3347		aeadctx->mayverify = VERIFY_HW;
3348		break;
3349	case ICV_16:
3350		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3351		aeadctx->mayverify = VERIFY_HW;
3352		break;
3353	case ICV_13:
3354	case ICV_15:
3355		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3356		aeadctx->mayverify = VERIFY_SW;
3357		break;
3358	default:
 
 
 
3359		return -EINVAL;
3360	}
3361	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3362}
3363
3364static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3365					  unsigned int authsize)
3366{
3367	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3368
3369	switch (authsize) {
3370	case ICV_8:
3371		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3372		aeadctx->mayverify = VERIFY_HW;
3373		break;
3374	case ICV_12:
3375		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3376		aeadctx->mayverify = VERIFY_HW;
3377		break;
3378	case ICV_16:
3379		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3380		aeadctx->mayverify = VERIFY_HW;
3381		break;
3382	default:
 
 
3383		return -EINVAL;
3384	}
3385	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3386}
3387
3388static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3389				unsigned int authsize)
3390{
3391	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3392
3393	switch (authsize) {
3394	case ICV_4:
3395		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3396		aeadctx->mayverify = VERIFY_HW;
3397		break;
3398	case ICV_6:
3399		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3400		aeadctx->mayverify = VERIFY_HW;
3401		break;
3402	case ICV_8:
3403		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3404		aeadctx->mayverify = VERIFY_HW;
3405		break;
3406	case ICV_10:
3407		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3408		aeadctx->mayverify = VERIFY_HW;
3409		break;
3410	case ICV_12:
3411		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3412		aeadctx->mayverify = VERIFY_HW;
3413		break;
3414	case ICV_14:
3415		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3416		aeadctx->mayverify = VERIFY_HW;
3417		break;
3418	case ICV_16:
3419		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3420		aeadctx->mayverify = VERIFY_HW;
3421		break;
3422	default:
 
 
3423		return -EINVAL;
3424	}
3425	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3426}
3427
3428static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3429				const u8 *key,
3430				unsigned int keylen)
3431{
3432	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
 
3433	unsigned char ck_size, mk_size;
3434	int key_ctx_size = 0;
3435
3436	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
 
 
 
3437	if (keylen == AES_KEYSIZE_128) {
 
3438		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3439		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3440	} else if (keylen == AES_KEYSIZE_192) {
3441		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3442		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3443	} else if (keylen == AES_KEYSIZE_256) {
3444		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3445		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3446	} else {
 
 
3447		aeadctx->enckey_len = 0;
3448		return	-EINVAL;
3449	}
3450	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3451						key_ctx_size >> 4);
3452	memcpy(aeadctx->key, key, keylen);
3453	aeadctx->enckey_len = keylen;
3454
3455	return 0;
3456}
3457
3458static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3459				const u8 *key,
3460				unsigned int keylen)
3461{
3462	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3463	int error;
3464
3465	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3466	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3467			      CRYPTO_TFM_REQ_MASK);
3468	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3469	if (error)
3470		return error;
3471	return chcr_ccm_common_setkey(aead, key, keylen);
3472}
3473
3474static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3475				    unsigned int keylen)
3476{
3477	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3478	int error;
3479
3480	if (keylen < 3) {
 
 
3481		aeadctx->enckey_len = 0;
3482		return	-EINVAL;
3483	}
3484	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3485	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3486			      CRYPTO_TFM_REQ_MASK);
3487	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3488	if (error)
3489		return error;
3490	keylen -= 3;
3491	memcpy(aeadctx->salt, key + keylen, 3);
3492	return chcr_ccm_common_setkey(aead, key, keylen);
3493}
3494
3495static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3496			   unsigned int keylen)
3497{
3498	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
 
3499	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
 
 
3500	unsigned int ck_size;
3501	int ret = 0, key_ctx_size = 0;
3502	struct crypto_aes_ctx aes;
3503
3504	aeadctx->enckey_len = 0;
3505	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3506	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3507			      & CRYPTO_TFM_REQ_MASK);
3508	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3509	if (ret)
3510		goto out;
3511
3512	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3513	    keylen > 3) {
3514		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3515		memcpy(aeadctx->salt, key + keylen, 4);
3516	}
3517	if (keylen == AES_KEYSIZE_128) {
3518		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3519	} else if (keylen == AES_KEYSIZE_192) {
3520		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3521	} else if (keylen == AES_KEYSIZE_256) {
3522		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3523	} else {
3524		pr_err("GCM: Invalid key length %d\n", keylen);
 
 
 
3525		ret = -EINVAL;
3526		goto out;
3527	}
3528
3529	memcpy(aeadctx->key, key, keylen);
3530	aeadctx->enckey_len = keylen;
3531	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
 
3532		AEAD_H_SIZE;
3533	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3534						CHCR_KEYCTX_MAC_KEY_SIZE_128,
3535						0, 0,
3536						key_ctx_size >> 4);
3537	/* Calculate the H = CIPH(K, 0 repeated 16 times).
3538	 * It will go in key context
3539	 */
3540	ret = aes_expandkey(&aes, key, keylen);
3541	if (ret) {
3542		aeadctx->enckey_len = 0;
 
3543		goto out;
3544	}
 
 
 
 
 
 
3545	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3546	aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3547	memzero_explicit(&aes, sizeof(aes));
3548
 
 
3549out:
3550	return ret;
3551}
3552
3553static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3554				   unsigned int keylen)
3555{
3556	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
 
3557	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3558	/* it contains auth and cipher key both*/
3559	struct crypto_authenc_keys keys;
3560	unsigned int bs, subtype;
3561	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3562	int err = 0, i, key_ctx_len = 0;
3563	unsigned char ck_size = 0;
3564	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3565	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3566	struct algo_param param;
3567	int align;
3568	u8 *o_ptr = NULL;
3569
3570	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3571	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3572			      & CRYPTO_TFM_REQ_MASK);
3573	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3574	if (err)
3575		goto out;
3576
3577	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3578		goto out;
 
3579
3580	if (get_alg_config(&param, max_authsize)) {
3581		pr_err("Unsupported digest size\n");
3582		goto out;
3583	}
3584	subtype = get_aead_subtype(authenc);
3585	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3586		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3587		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3588			goto out;
3589		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3590		- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3591		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3592	}
3593	if (keys.enckeylen == AES_KEYSIZE_128) {
3594		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3595	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3596		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3597	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3598		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3599	} else {
3600		pr_err("Unsupported cipher key\n");
3601		goto out;
3602	}
3603
3604	/* Copy only encryption key. We use authkey to generate h(ipad) and
3605	 * h(opad) so authkey is not needed again. authkeylen size have the
3606	 * size of the hash digest size.
3607	 */
3608	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3609	aeadctx->enckey_len = keys.enckeylen;
3610	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3611		subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3612
3613		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3614			    aeadctx->enckey_len << 3);
3615	}
3616	base_hash  = chcr_alloc_shash(max_authsize);
3617	if (IS_ERR(base_hash)) {
3618		pr_err("Base driver cannot be loaded\n");
3619		goto out;
3620	}
3621	{
3622		SHASH_DESC_ON_STACK(shash, base_hash);
3623
3624		shash->tfm = base_hash;
 
3625		bs = crypto_shash_blocksize(base_hash);
3626		align = KEYCTX_ALIGN_PAD(max_authsize);
3627		o_ptr =  actx->h_iopad + param.result_size + align;
3628
3629		if (keys.authkeylen > bs) {
3630			err = crypto_shash_digest(shash, keys.authkey,
3631						  keys.authkeylen,
3632						  o_ptr);
3633			if (err) {
3634				pr_err("Base driver cannot be loaded\n");
3635				goto out;
3636			}
3637			keys.authkeylen = max_authsize;
3638		} else
3639			memcpy(o_ptr, keys.authkey, keys.authkeylen);
3640
3641		/* Compute the ipad-digest*/
3642		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3643		memcpy(pad, o_ptr, keys.authkeylen);
3644		for (i = 0; i < bs >> 2; i++)
3645			*((unsigned int *)pad + i) ^= IPAD_DATA;
3646
3647		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3648					      max_authsize))
3649			goto out;
3650		/* Compute the opad-digest */
3651		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3652		memcpy(pad, o_ptr, keys.authkeylen);
3653		for (i = 0; i < bs >> 2; i++)
3654			*((unsigned int *)pad + i) ^= OPAD_DATA;
3655
3656		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3657			goto out;
3658
3659		/* convert the ipad and opad digest to network order */
3660		chcr_change_order(actx->h_iopad, param.result_size);
3661		chcr_change_order(o_ptr, param.result_size);
3662		key_ctx_len = sizeof(struct _key_ctx) +
3663			roundup(keys.enckeylen, 16) +
3664			(param.result_size + align) * 2;
3665		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3666						0, 1, key_ctx_len >> 4);
3667		actx->auth_mode = param.auth_mode;
3668		chcr_free_shash(base_hash);
3669
3670		memzero_explicit(&keys, sizeof(keys));
3671		return 0;
3672	}
3673out:
3674	aeadctx->enckey_len = 0;
3675	memzero_explicit(&keys, sizeof(keys));
3676	if (!IS_ERR(base_hash))
3677		chcr_free_shash(base_hash);
3678	return -EINVAL;
3679}
3680
3681static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3682					const u8 *key, unsigned int keylen)
3683{
3684	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
 
3685	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3686	struct crypto_authenc_keys keys;
3687	int err;
3688	/* it contains auth and cipher key both*/
3689	unsigned int subtype;
3690	int key_ctx_len = 0;
3691	unsigned char ck_size = 0;
3692
3693	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3694	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3695			      & CRYPTO_TFM_REQ_MASK);
3696	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3697	if (err)
3698		goto out;
3699
3700	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3701		goto out;
3702
3703	subtype = get_aead_subtype(authenc);
3704	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3705	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3706		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3707			goto out;
3708		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3709			- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3710		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3711	}
3712	if (keys.enckeylen == AES_KEYSIZE_128) {
3713		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3714	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3715		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3716	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3717		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3718	} else {
3719		pr_err("Unsupported cipher key %d\n", keys.enckeylen);
3720		goto out;
3721	}
3722	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3723	aeadctx->enckey_len = keys.enckeylen;
3724	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3725	    subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3726		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3727				aeadctx->enckey_len << 3);
3728	}
3729	key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3730
3731	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3732						0, key_ctx_len >> 4);
3733	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3734	memzero_explicit(&keys, sizeof(keys));
3735	return 0;
3736out:
3737	aeadctx->enckey_len = 0;
3738	memzero_explicit(&keys, sizeof(keys));
3739	return -EINVAL;
3740}
3741
3742static int chcr_aead_op(struct aead_request *req,
3743			int size,
3744			create_wr_t create_wr_fn)
3745{
3746	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3747	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3748	struct chcr_context *ctx = a_ctx(tfm);
3749	struct uld_ctx *u_ctx = ULD_CTX(ctx);
3750	struct sk_buff *skb;
3751	struct chcr_dev *cdev;
3752
3753	cdev = a_ctx(tfm)->dev;
3754	if (!cdev) {
3755		pr_err("%s : No crypto device.\n", __func__);
3756		return -ENXIO;
3757	}
3758
3759	if (chcr_inc_wrcount(cdev)) {
3760	/* Detach state for CHCR means lldi or padap is freed.
3761	 * We cannot increment fallback here.
3762	 */
3763		return chcr_aead_fallback(req, reqctx->op);
3764	}
3765
3766	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3767					reqctx->txqidx) &&
3768		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3769			chcr_dec_wrcount(cdev);
3770			return -ENOSPC;
3771	}
3772
3773	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3774	    crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3775		pr_err("RFC4106: Invalid value of assoclen %d\n",
3776		       req->assoclen);
3777		return -EINVAL;
3778	}
3779
3780	/* Form a WR from req */
3781	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3782
3783	if (IS_ERR_OR_NULL(skb)) {
3784		chcr_dec_wrcount(cdev);
3785		return PTR_ERR_OR_ZERO(skb);
3786	}
3787
3788	skb->dev = u_ctx->lldi.ports[0];
3789	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3790	chcr_send_wr(skb);
3791	return -EINPROGRESS;
3792}
3793
3794static int chcr_aead_encrypt(struct aead_request *req)
3795{
3796	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3797	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3798	struct chcr_context *ctx = a_ctx(tfm);
3799	unsigned int cpu;
3800
3801	cpu = get_cpu();
3802	reqctx->txqidx = cpu % ctx->ntxq;
3803	reqctx->rxqidx = cpu % ctx->nrxq;
3804	put_cpu();
3805
3806	reqctx->verify = VERIFY_HW;
3807	reqctx->op = CHCR_ENCRYPT_OP;
3808
3809	switch (get_aead_subtype(tfm)) {
3810	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3811	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3812	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3813	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3814		return chcr_aead_op(req, 0, create_authenc_wr);
3815	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3816	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3817		return chcr_aead_op(req, 0, create_aead_ccm_wr);
 
3818	default:
3819		return chcr_aead_op(req, 0, create_gcm_wr);
 
3820	}
3821}
3822
3823static int chcr_aead_decrypt(struct aead_request *req)
3824{
3825	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3826	struct chcr_context *ctx = a_ctx(tfm);
3827	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3828	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3829	int size;
3830	unsigned int cpu;
3831
3832	cpu = get_cpu();
3833	reqctx->txqidx = cpu % ctx->ntxq;
3834	reqctx->rxqidx = cpu % ctx->nrxq;
3835	put_cpu();
3836
3837	if (aeadctx->mayverify == VERIFY_SW) {
3838		size = crypto_aead_maxauthsize(tfm);
3839		reqctx->verify = VERIFY_SW;
3840	} else {
3841		size = 0;
3842		reqctx->verify = VERIFY_HW;
3843	}
3844	reqctx->op = CHCR_DECRYPT_OP;
3845	switch (get_aead_subtype(tfm)) {
3846	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3847	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3848	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3849	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3850		return chcr_aead_op(req, size, create_authenc_wr);
3851	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3852	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3853		return chcr_aead_op(req, size, create_aead_ccm_wr);
 
3854	default:
3855		return chcr_aead_op(req, size, create_gcm_wr);
 
3856	}
3857}
3858
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3859static struct chcr_alg_template driver_algs[] = {
3860	/* AES-CBC */
3861	{
3862		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3863		.is_registered = 0,
3864		.alg.skcipher = {
3865			.base.cra_name		= "cbc(aes)",
3866			.base.cra_driver_name	= "cbc-aes-chcr",
3867			.base.cra_blocksize	= AES_BLOCK_SIZE,
3868
3869			.init			= chcr_init_tfm,
3870			.exit			= chcr_exit_tfm,
3871			.min_keysize		= AES_MIN_KEY_SIZE,
3872			.max_keysize		= AES_MAX_KEY_SIZE,
3873			.ivsize			= AES_BLOCK_SIZE,
3874			.setkey			= chcr_aes_cbc_setkey,
3875			.encrypt		= chcr_aes_encrypt,
3876			.decrypt		= chcr_aes_decrypt,
3877			}
3878	},
3879	{
3880		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3881		.is_registered = 0,
3882		.alg.skcipher = {
3883			.base.cra_name		= "xts(aes)",
3884			.base.cra_driver_name	= "xts-aes-chcr",
3885			.base.cra_blocksize	= AES_BLOCK_SIZE,
3886
3887			.init			= chcr_init_tfm,
3888			.exit			= chcr_exit_tfm,
3889			.min_keysize		= 2 * AES_MIN_KEY_SIZE,
3890			.max_keysize		= 2 * AES_MAX_KEY_SIZE,
3891			.ivsize			= AES_BLOCK_SIZE,
3892			.setkey			= chcr_aes_xts_setkey,
3893			.encrypt		= chcr_aes_encrypt,
3894			.decrypt		= chcr_aes_decrypt,
 
 
 
 
 
 
 
 
3895			}
3896	},
3897	{
3898		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3899		.is_registered = 0,
3900		.alg.skcipher = {
3901			.base.cra_name		= "ctr(aes)",
3902			.base.cra_driver_name	= "ctr-aes-chcr",
3903			.base.cra_blocksize	= 1,
3904
3905			.init			= chcr_init_tfm,
3906			.exit			= chcr_exit_tfm,
3907			.min_keysize		= AES_MIN_KEY_SIZE,
3908			.max_keysize		= AES_MAX_KEY_SIZE,
3909			.ivsize			= AES_BLOCK_SIZE,
3910			.setkey			= chcr_aes_ctr_setkey,
3911			.encrypt		= chcr_aes_encrypt,
3912			.decrypt		= chcr_aes_decrypt,
3913		}
3914	},
3915	{
3916		.type = CRYPTO_ALG_TYPE_SKCIPHER |
3917			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3918		.is_registered = 0,
3919		.alg.skcipher = {
3920			.base.cra_name		= "rfc3686(ctr(aes))",
3921			.base.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3922			.base.cra_blocksize	= 1,
3923
3924			.init			= chcr_rfc3686_init,
3925			.exit			= chcr_exit_tfm,
3926			.min_keysize		= AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3927			.max_keysize		= AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3928			.ivsize			= CTR_RFC3686_IV_SIZE,
3929			.setkey			= chcr_aes_rfc3686_setkey,
3930			.encrypt		= chcr_aes_encrypt,
3931			.decrypt		= chcr_aes_decrypt,
 
 
 
 
 
 
 
 
 
 
 
3932		}
3933	},
3934	/* SHA */
3935	{
3936		.type = CRYPTO_ALG_TYPE_AHASH,
3937		.is_registered = 0,
3938		.alg.hash = {
3939			.halg.digestsize = SHA1_DIGEST_SIZE,
3940			.halg.base = {
3941				.cra_name = "sha1",
3942				.cra_driver_name = "sha1-chcr",
3943				.cra_blocksize = SHA1_BLOCK_SIZE,
3944			}
3945		}
3946	},
3947	{
3948		.type = CRYPTO_ALG_TYPE_AHASH,
3949		.is_registered = 0,
3950		.alg.hash = {
3951			.halg.digestsize = SHA256_DIGEST_SIZE,
3952			.halg.base = {
3953				.cra_name = "sha256",
3954				.cra_driver_name = "sha256-chcr",
3955				.cra_blocksize = SHA256_BLOCK_SIZE,
3956			}
3957		}
3958	},
3959	{
3960		.type = CRYPTO_ALG_TYPE_AHASH,
3961		.is_registered = 0,
3962		.alg.hash = {
3963			.halg.digestsize = SHA224_DIGEST_SIZE,
3964			.halg.base = {
3965				.cra_name = "sha224",
3966				.cra_driver_name = "sha224-chcr",
3967				.cra_blocksize = SHA224_BLOCK_SIZE,
3968			}
3969		}
3970	},
3971	{
3972		.type = CRYPTO_ALG_TYPE_AHASH,
3973		.is_registered = 0,
3974		.alg.hash = {
3975			.halg.digestsize = SHA384_DIGEST_SIZE,
3976			.halg.base = {
3977				.cra_name = "sha384",
3978				.cra_driver_name = "sha384-chcr",
3979				.cra_blocksize = SHA384_BLOCK_SIZE,
3980			}
3981		}
3982	},
3983	{
3984		.type = CRYPTO_ALG_TYPE_AHASH,
3985		.is_registered = 0,
3986		.alg.hash = {
3987			.halg.digestsize = SHA512_DIGEST_SIZE,
3988			.halg.base = {
3989				.cra_name = "sha512",
3990				.cra_driver_name = "sha512-chcr",
3991				.cra_blocksize = SHA512_BLOCK_SIZE,
3992			}
3993		}
3994	},
3995	/* HMAC */
3996	{
3997		.type = CRYPTO_ALG_TYPE_HMAC,
3998		.is_registered = 0,
3999		.alg.hash = {
4000			.halg.digestsize = SHA1_DIGEST_SIZE,
4001			.halg.base = {
4002				.cra_name = "hmac(sha1)",
4003				.cra_driver_name = "hmac-sha1-chcr",
4004				.cra_blocksize = SHA1_BLOCK_SIZE,
4005			}
4006		}
4007	},
4008	{
4009		.type = CRYPTO_ALG_TYPE_HMAC,
4010		.is_registered = 0,
4011		.alg.hash = {
4012			.halg.digestsize = SHA224_DIGEST_SIZE,
4013			.halg.base = {
4014				.cra_name = "hmac(sha224)",
4015				.cra_driver_name = "hmac-sha224-chcr",
4016				.cra_blocksize = SHA224_BLOCK_SIZE,
4017			}
4018		}
4019	},
4020	{
4021		.type = CRYPTO_ALG_TYPE_HMAC,
4022		.is_registered = 0,
4023		.alg.hash = {
4024			.halg.digestsize = SHA256_DIGEST_SIZE,
4025			.halg.base = {
4026				.cra_name = "hmac(sha256)",
4027				.cra_driver_name = "hmac-sha256-chcr",
4028				.cra_blocksize = SHA256_BLOCK_SIZE,
4029			}
4030		}
4031	},
4032	{
4033		.type = CRYPTO_ALG_TYPE_HMAC,
4034		.is_registered = 0,
4035		.alg.hash = {
4036			.halg.digestsize = SHA384_DIGEST_SIZE,
4037			.halg.base = {
4038				.cra_name = "hmac(sha384)",
4039				.cra_driver_name = "hmac-sha384-chcr",
4040				.cra_blocksize = SHA384_BLOCK_SIZE,
4041			}
4042		}
4043	},
4044	{
4045		.type = CRYPTO_ALG_TYPE_HMAC,
4046		.is_registered = 0,
4047		.alg.hash = {
4048			.halg.digestsize = SHA512_DIGEST_SIZE,
4049			.halg.base = {
4050				.cra_name = "hmac(sha512)",
4051				.cra_driver_name = "hmac-sha512-chcr",
4052				.cra_blocksize = SHA512_BLOCK_SIZE,
4053			}
4054		}
4055	},
4056	/* Add AEAD Algorithms */
4057	{
4058		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
4059		.is_registered = 0,
4060		.alg.aead = {
4061			.base = {
4062				.cra_name = "gcm(aes)",
4063				.cra_driver_name = "gcm-aes-chcr",
4064				.cra_blocksize	= 1,
4065				.cra_priority = CHCR_AEAD_PRIORITY,
4066				.cra_ctxsize =	sizeof(struct chcr_context) +
4067						sizeof(struct chcr_aead_ctx) +
4068						sizeof(struct chcr_gcm_ctx),
4069			},
4070			.ivsize = GCM_AES_IV_SIZE,
4071			.maxauthsize = GHASH_DIGEST_SIZE,
4072			.setkey = chcr_gcm_setkey,
4073			.setauthsize = chcr_gcm_setauthsize,
4074		}
4075	},
4076	{
4077		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4078		.is_registered = 0,
4079		.alg.aead = {
4080			.base = {
4081				.cra_name = "rfc4106(gcm(aes))",
4082				.cra_driver_name = "rfc4106-gcm-aes-chcr",
4083				.cra_blocksize	 = 1,
4084				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4085				.cra_ctxsize =	sizeof(struct chcr_context) +
4086						sizeof(struct chcr_aead_ctx) +
4087						sizeof(struct chcr_gcm_ctx),
4088
4089			},
4090			.ivsize = GCM_RFC4106_IV_SIZE,
4091			.maxauthsize	= GHASH_DIGEST_SIZE,
4092			.setkey = chcr_gcm_setkey,
4093			.setauthsize	= chcr_4106_4309_setauthsize,
4094		}
4095	},
4096	{
4097		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4098		.is_registered = 0,
4099		.alg.aead = {
4100			.base = {
4101				.cra_name = "ccm(aes)",
4102				.cra_driver_name = "ccm-aes-chcr",
4103				.cra_blocksize	 = 1,
4104				.cra_priority = CHCR_AEAD_PRIORITY,
4105				.cra_ctxsize =	sizeof(struct chcr_context) +
4106						sizeof(struct chcr_aead_ctx),
4107
4108			},
4109			.ivsize = AES_BLOCK_SIZE,
4110			.maxauthsize	= GHASH_DIGEST_SIZE,
4111			.setkey = chcr_aead_ccm_setkey,
4112			.setauthsize	= chcr_ccm_setauthsize,
4113		}
4114	},
4115	{
4116		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4117		.is_registered = 0,
4118		.alg.aead = {
4119			.base = {
4120				.cra_name = "rfc4309(ccm(aes))",
4121				.cra_driver_name = "rfc4309-ccm-aes-chcr",
4122				.cra_blocksize	 = 1,
4123				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4124				.cra_ctxsize =	sizeof(struct chcr_context) +
4125						sizeof(struct chcr_aead_ctx),
4126
4127			},
4128			.ivsize = 8,
4129			.maxauthsize	= GHASH_DIGEST_SIZE,
4130			.setkey = chcr_aead_rfc4309_setkey,
4131			.setauthsize = chcr_4106_4309_setauthsize,
4132		}
4133	},
4134	{
4135		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4136		.is_registered = 0,
4137		.alg.aead = {
4138			.base = {
4139				.cra_name = "authenc(hmac(sha1),cbc(aes))",
4140				.cra_driver_name =
4141					"authenc-hmac-sha1-cbc-aes-chcr",
4142				.cra_blocksize	 = AES_BLOCK_SIZE,
4143				.cra_priority = CHCR_AEAD_PRIORITY,
4144				.cra_ctxsize =	sizeof(struct chcr_context) +
4145						sizeof(struct chcr_aead_ctx) +
4146						sizeof(struct chcr_authenc_ctx),
4147
4148			},
4149			.ivsize = AES_BLOCK_SIZE,
4150			.maxauthsize = SHA1_DIGEST_SIZE,
4151			.setkey = chcr_authenc_setkey,
4152			.setauthsize = chcr_authenc_setauthsize,
4153		}
4154	},
4155	{
4156		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4157		.is_registered = 0,
4158		.alg.aead = {
4159			.base = {
4160
4161				.cra_name = "authenc(hmac(sha256),cbc(aes))",
4162				.cra_driver_name =
4163					"authenc-hmac-sha256-cbc-aes-chcr",
4164				.cra_blocksize	 = AES_BLOCK_SIZE,
4165				.cra_priority = CHCR_AEAD_PRIORITY,
4166				.cra_ctxsize =	sizeof(struct chcr_context) +
4167						sizeof(struct chcr_aead_ctx) +
4168						sizeof(struct chcr_authenc_ctx),
4169
4170			},
4171			.ivsize = AES_BLOCK_SIZE,
4172			.maxauthsize	= SHA256_DIGEST_SIZE,
4173			.setkey = chcr_authenc_setkey,
4174			.setauthsize = chcr_authenc_setauthsize,
4175		}
4176	},
4177	{
4178		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4179		.is_registered = 0,
4180		.alg.aead = {
4181			.base = {
4182				.cra_name = "authenc(hmac(sha224),cbc(aes))",
4183				.cra_driver_name =
4184					"authenc-hmac-sha224-cbc-aes-chcr",
4185				.cra_blocksize	 = AES_BLOCK_SIZE,
4186				.cra_priority = CHCR_AEAD_PRIORITY,
4187				.cra_ctxsize =	sizeof(struct chcr_context) +
4188						sizeof(struct chcr_aead_ctx) +
4189						sizeof(struct chcr_authenc_ctx),
4190			},
4191			.ivsize = AES_BLOCK_SIZE,
4192			.maxauthsize = SHA224_DIGEST_SIZE,
4193			.setkey = chcr_authenc_setkey,
4194			.setauthsize = chcr_authenc_setauthsize,
4195		}
4196	},
4197	{
4198		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4199		.is_registered = 0,
4200		.alg.aead = {
4201			.base = {
4202				.cra_name = "authenc(hmac(sha384),cbc(aes))",
4203				.cra_driver_name =
4204					"authenc-hmac-sha384-cbc-aes-chcr",
4205				.cra_blocksize	 = AES_BLOCK_SIZE,
4206				.cra_priority = CHCR_AEAD_PRIORITY,
4207				.cra_ctxsize =	sizeof(struct chcr_context) +
4208						sizeof(struct chcr_aead_ctx) +
4209						sizeof(struct chcr_authenc_ctx),
4210
4211			},
4212			.ivsize = AES_BLOCK_SIZE,
4213			.maxauthsize = SHA384_DIGEST_SIZE,
4214			.setkey = chcr_authenc_setkey,
4215			.setauthsize = chcr_authenc_setauthsize,
4216		}
4217	},
4218	{
4219		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4220		.is_registered = 0,
4221		.alg.aead = {
4222			.base = {
4223				.cra_name = "authenc(hmac(sha512),cbc(aes))",
4224				.cra_driver_name =
4225					"authenc-hmac-sha512-cbc-aes-chcr",
4226				.cra_blocksize	 = AES_BLOCK_SIZE,
4227				.cra_priority = CHCR_AEAD_PRIORITY,
4228				.cra_ctxsize =	sizeof(struct chcr_context) +
4229						sizeof(struct chcr_aead_ctx) +
4230						sizeof(struct chcr_authenc_ctx),
4231
4232			},
4233			.ivsize = AES_BLOCK_SIZE,
4234			.maxauthsize = SHA512_DIGEST_SIZE,
4235			.setkey = chcr_authenc_setkey,
4236			.setauthsize = chcr_authenc_setauthsize,
4237		}
4238	},
4239	{
4240		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4241		.is_registered = 0,
4242		.alg.aead = {
4243			.base = {
4244				.cra_name = "authenc(digest_null,cbc(aes))",
4245				.cra_driver_name =
4246					"authenc-digest_null-cbc-aes-chcr",
4247				.cra_blocksize	 = AES_BLOCK_SIZE,
4248				.cra_priority = CHCR_AEAD_PRIORITY,
4249				.cra_ctxsize =	sizeof(struct chcr_context) +
4250						sizeof(struct chcr_aead_ctx) +
4251						sizeof(struct chcr_authenc_ctx),
4252
4253			},
4254			.ivsize  = AES_BLOCK_SIZE,
4255			.maxauthsize = 0,
4256			.setkey  = chcr_aead_digest_null_setkey,
4257			.setauthsize = chcr_authenc_null_setauthsize,
4258		}
4259	},
4260	{
4261		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4262		.is_registered = 0,
4263		.alg.aead = {
4264			.base = {
4265				.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4266				.cra_driver_name =
4267				"authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4268				.cra_blocksize	 = 1,
4269				.cra_priority = CHCR_AEAD_PRIORITY,
4270				.cra_ctxsize =	sizeof(struct chcr_context) +
4271						sizeof(struct chcr_aead_ctx) +
4272						sizeof(struct chcr_authenc_ctx),
4273
4274			},
4275			.ivsize = CTR_RFC3686_IV_SIZE,
4276			.maxauthsize = SHA1_DIGEST_SIZE,
4277			.setkey = chcr_authenc_setkey,
4278			.setauthsize = chcr_authenc_setauthsize,
4279		}
4280	},
4281	{
4282		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4283		.is_registered = 0,
4284		.alg.aead = {
4285			.base = {
4286
4287				.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4288				.cra_driver_name =
4289				"authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4290				.cra_blocksize	 = 1,
4291				.cra_priority = CHCR_AEAD_PRIORITY,
4292				.cra_ctxsize =	sizeof(struct chcr_context) +
4293						sizeof(struct chcr_aead_ctx) +
4294						sizeof(struct chcr_authenc_ctx),
4295
4296			},
4297			.ivsize = CTR_RFC3686_IV_SIZE,
4298			.maxauthsize	= SHA256_DIGEST_SIZE,
4299			.setkey = chcr_authenc_setkey,
4300			.setauthsize = chcr_authenc_setauthsize,
4301		}
4302	},
4303	{
4304		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4305		.is_registered = 0,
4306		.alg.aead = {
4307			.base = {
4308				.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4309				.cra_driver_name =
4310				"authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4311				.cra_blocksize	 = 1,
4312				.cra_priority = CHCR_AEAD_PRIORITY,
4313				.cra_ctxsize =	sizeof(struct chcr_context) +
4314						sizeof(struct chcr_aead_ctx) +
4315						sizeof(struct chcr_authenc_ctx),
4316			},
4317			.ivsize = CTR_RFC3686_IV_SIZE,
4318			.maxauthsize = SHA224_DIGEST_SIZE,
4319			.setkey = chcr_authenc_setkey,
4320			.setauthsize = chcr_authenc_setauthsize,
4321		}
4322	},
4323	{
4324		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4325		.is_registered = 0,
4326		.alg.aead = {
4327			.base = {
4328				.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4329				.cra_driver_name =
4330				"authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4331				.cra_blocksize	 = 1,
4332				.cra_priority = CHCR_AEAD_PRIORITY,
4333				.cra_ctxsize =	sizeof(struct chcr_context) +
4334						sizeof(struct chcr_aead_ctx) +
4335						sizeof(struct chcr_authenc_ctx),
4336
4337			},
4338			.ivsize = CTR_RFC3686_IV_SIZE,
4339			.maxauthsize = SHA384_DIGEST_SIZE,
4340			.setkey = chcr_authenc_setkey,
4341			.setauthsize = chcr_authenc_setauthsize,
4342		}
4343	},
4344	{
4345		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4346		.is_registered = 0,
4347		.alg.aead = {
4348			.base = {
4349				.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4350				.cra_driver_name =
4351				"authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4352				.cra_blocksize	 = 1,
4353				.cra_priority = CHCR_AEAD_PRIORITY,
4354				.cra_ctxsize =	sizeof(struct chcr_context) +
4355						sizeof(struct chcr_aead_ctx) +
4356						sizeof(struct chcr_authenc_ctx),
4357
4358			},
4359			.ivsize = CTR_RFC3686_IV_SIZE,
4360			.maxauthsize = SHA512_DIGEST_SIZE,
4361			.setkey = chcr_authenc_setkey,
4362			.setauthsize = chcr_authenc_setauthsize,
4363		}
4364	},
4365	{
4366		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4367		.is_registered = 0,
4368		.alg.aead = {
4369			.base = {
4370				.cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4371				.cra_driver_name =
4372				"authenc-digest_null-rfc3686-ctr-aes-chcr",
4373				.cra_blocksize	 = 1,
4374				.cra_priority = CHCR_AEAD_PRIORITY,
4375				.cra_ctxsize =	sizeof(struct chcr_context) +
4376						sizeof(struct chcr_aead_ctx) +
4377						sizeof(struct chcr_authenc_ctx),
4378
4379			},
4380			.ivsize  = CTR_RFC3686_IV_SIZE,
4381			.maxauthsize = 0,
4382			.setkey  = chcr_aead_digest_null_setkey,
4383			.setauthsize = chcr_authenc_null_setauthsize,
4384		}
4385	},
4386};
4387
4388/*
4389 *	chcr_unregister_alg - Deregister crypto algorithms with
4390 *	kernel framework.
4391 */
4392static int chcr_unregister_alg(void)
4393{
4394	int i;
4395
4396	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4397		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4398		case CRYPTO_ALG_TYPE_SKCIPHER:
4399			if (driver_algs[i].is_registered && refcount_read(
4400			    &driver_algs[i].alg.skcipher.base.cra_refcnt)
4401			    == 1) {
4402				crypto_unregister_skcipher(
4403						&driver_algs[i].alg.skcipher);
4404				driver_algs[i].is_registered = 0;
4405			}
4406			break;
4407		case CRYPTO_ALG_TYPE_AEAD:
4408			if (driver_algs[i].is_registered && refcount_read(
4409			    &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
4410				crypto_unregister_aead(
4411						&driver_algs[i].alg.aead);
4412				driver_algs[i].is_registered = 0;
4413			}
4414			break;
4415		case CRYPTO_ALG_TYPE_AHASH:
4416			if (driver_algs[i].is_registered && refcount_read(
4417			    &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4418			    == 1) {
4419				crypto_unregister_ahash(
4420						&driver_algs[i].alg.hash);
4421				driver_algs[i].is_registered = 0;
4422			}
4423			break;
4424		}
 
4425	}
4426	return 0;
4427}
4428
4429#define SZ_AHASH_CTX sizeof(struct chcr_context)
4430#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4431#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
 
4432
4433/*
4434 *	chcr_register_alg - Register crypto algorithms with kernel framework.
4435 */
4436static int chcr_register_alg(void)
4437{
4438	struct crypto_alg ai;
4439	struct ahash_alg *a_hash;
4440	int err = 0, i;
4441	char *name = NULL;
4442
4443	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4444		if (driver_algs[i].is_registered)
4445			continue;
4446		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4447		case CRYPTO_ALG_TYPE_SKCIPHER:
4448			driver_algs[i].alg.skcipher.base.cra_priority =
4449				CHCR_CRA_PRIORITY;
4450			driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4451			driver_algs[i].alg.skcipher.base.cra_flags =
4452				CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4453				CRYPTO_ALG_ALLOCATES_MEMORY |
4454				CRYPTO_ALG_NEED_FALLBACK;
4455			driver_algs[i].alg.skcipher.base.cra_ctxsize =
4456				sizeof(struct chcr_context) +
4457				sizeof(struct ablk_ctx);
4458			driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4459
4460			err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4461			name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4462			break;
4463		case CRYPTO_ALG_TYPE_AEAD:
 
 
4464			driver_algs[i].alg.aead.base.cra_flags =
4465				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4466				CRYPTO_ALG_ALLOCATES_MEMORY;
4467			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4468			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4469			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4470			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4471			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4472			err = crypto_register_aead(&driver_algs[i].alg.aead);
4473			name = driver_algs[i].alg.aead.base.cra_driver_name;
4474			break;
4475		case CRYPTO_ALG_TYPE_AHASH:
4476			a_hash = &driver_algs[i].alg.hash;
4477			a_hash->update = chcr_ahash_update;
4478			a_hash->final = chcr_ahash_final;
4479			a_hash->finup = chcr_ahash_finup;
4480			a_hash->digest = chcr_ahash_digest;
4481			a_hash->export = chcr_ahash_export;
4482			a_hash->import = chcr_ahash_import;
4483			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4484			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4485			a_hash->halg.base.cra_module = THIS_MODULE;
4486			a_hash->halg.base.cra_flags =
4487				CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4488			a_hash->halg.base.cra_alignmask = 0;
4489			a_hash->halg.base.cra_exit = NULL;
 
4490
4491			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4492				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4493				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4494				a_hash->init = chcr_hmac_init;
4495				a_hash->setkey = chcr_ahash_setkey;
4496				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4497			} else {
4498				a_hash->init = chcr_sha_init;
4499				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4500				a_hash->halg.base.cra_init = chcr_sha_cra_init;
4501			}
4502			err = crypto_register_ahash(&driver_algs[i].alg.hash);
4503			ai = driver_algs[i].alg.hash.halg.base;
4504			name = ai.cra_driver_name;
4505			break;
4506		}
4507		if (err) {
4508			pr_err("%s : Algorithm registration failed\n", name);
 
4509			goto register_err;
4510		} else {
4511			driver_algs[i].is_registered = 1;
4512		}
4513	}
4514	return 0;
4515
4516register_err:
4517	chcr_unregister_alg();
4518	return err;
4519}
4520
4521/*
4522 *	start_crypto - Register the crypto algorithms.
4523 *	This should called once when the first device comesup. After this
4524 *	kernel will start calling driver APIs for crypto operations.
4525 */
4526int start_crypto(void)
4527{
4528	return chcr_register_alg();
4529}
4530
4531/*
4532 *	stop_crypto - Deregister all the crypto algorithms with kernel.
4533 *	This should be called once when the last device goes down. After this
4534 *	kernel will not call the driver API for crypto operations.
4535 */
4536int stop_crypto(void)
4537{
4538	chcr_unregister_alg();
4539	return 0;
4540}
v4.10.11
   1/*
   2 * This file is part of the Chelsio T6 Crypto driver for Linux.
   3 *
   4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 *
  34 * Written and Maintained by:
  35 *	Manoj Malviya (manojmalviya@chelsio.com)
  36 *	Atul Gupta (atul.gupta@chelsio.com)
  37 *	Jitendra Lulla (jlulla@chelsio.com)
  38 *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  39 *	Harsh Jain (harsh@chelsio.com)
  40 */
  41
  42#define pr_fmt(fmt) "chcr:" fmt
  43
  44#include <linux/kernel.h>
  45#include <linux/module.h>
  46#include <linux/crypto.h>
  47#include <linux/cryptohash.h>
  48#include <linux/skbuff.h>
  49#include <linux/rtnetlink.h>
  50#include <linux/highmem.h>
  51#include <linux/scatterlist.h>
  52
  53#include <crypto/aes.h>
  54#include <crypto/algapi.h>
  55#include <crypto/hash.h>
  56#include <crypto/sha.h>
 
 
  57#include <crypto/authenc.h>
 
 
  58#include <crypto/internal/aead.h>
  59#include <crypto/null.h>
  60#include <crypto/internal/skcipher.h>
  61#include <crypto/aead.h>
  62#include <crypto/scatterwalk.h>
  63#include <crypto/internal/hash.h>
  64
  65#include "t4fw_api.h"
  66#include "t4_msg.h"
  67#include "chcr_core.h"
  68#include "chcr_algo.h"
  69#include "chcr_crypto.h"
  70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  71static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
  72{
  73	return ctx->crypto_ctx->aeadctx;
  74}
  75
  76static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
  77{
  78	return ctx->crypto_ctx->ablkctx;
  79}
  80
  81static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
  82{
  83	return ctx->crypto_ctx->hmacctx;
  84}
  85
  86static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
  87{
  88	return gctx->ctx->gcm;
  89}
  90
  91static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
  92{
  93	return gctx->ctx->authenc;
  94}
  95
  96static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
  97{
  98	return ctx->dev->u_ctx;
  99}
 100
 101static inline int is_ofld_imm(const struct sk_buff *skb)
 102{
 103	return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 104}
 105
 106/*
 107 *	sgl_len - calculates the size of an SGL of the given capacity
 108 *	@n: the number of SGL entries
 109 *	Calculates the number of flits needed for a scatter/gather list that
 110 *	can hold the given number of entries.
 111 */
 112static inline unsigned int sgl_len(unsigned int n)
 113{
 114	n--;
 115	return (3 * n) / 2 + (n & 1) + 2;
 
 
 116}
 117
 118static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
 119{
 120	u8 temp[SHA512_DIGEST_SIZE];
 121	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 122	int authsize = crypto_aead_authsize(tfm);
 123	struct cpl_fw6_pld *fw6_pld;
 124	int cmp = 0;
 125
 126	fw6_pld = (struct cpl_fw6_pld *)input;
 127	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
 128	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
 129		cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
 130	} else {
 131
 132		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
 133				authsize, req->assoclen +
 134				req->cryptlen - authsize);
 135		cmp = memcmp(temp, (fw6_pld + 1), authsize);
 136	}
 137	if (cmp)
 138		*err = -EBADMSG;
 139	else
 140		*err = 0;
 141}
 142
 143/*
 144 *	chcr_handle_resp - Unmap the DMA buffers associated with the request
 145 *	@req: crypto request
 146 */
 147int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 148			 int err)
 149{
 150	struct crypto_tfm *tfm = req->tfm;
 151	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
 152	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 153	struct chcr_req_ctx ctx_req;
 154	struct cpl_fw6_pld *fw6_pld;
 155	unsigned int digestsize, updated_digestsize;
 156
 157	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
 158	case CRYPTO_ALG_TYPE_AEAD:
 159		ctx_req.req.aead_req = (struct aead_request *)req;
 160		ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
 161		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
 162			     ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
 163		if (ctx_req.ctx.reqctx->skb) {
 164			kfree_skb(ctx_req.ctx.reqctx->skb);
 165			ctx_req.ctx.reqctx->skb = NULL;
 166		}
 167		if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
 168			chcr_verify_tag(ctx_req.req.aead_req, input,
 169					&err);
 170			ctx_req.ctx.reqctx->verify = VERIFY_HW;
 171		}
 172		break;
 173
 174	case CRYPTO_ALG_TYPE_BLKCIPHER:
 175		ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
 176		ctx_req.ctx.ablk_ctx =
 177			ablkcipher_request_ctx(ctx_req.req.ablk_req);
 178		if (!err) {
 179			fw6_pld = (struct cpl_fw6_pld *)input;
 180			memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
 181			       AES_BLOCK_SIZE);
 182		}
 183		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
 184			     ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
 185		if (ctx_req.ctx.ablk_ctx->skb) {
 186			kfree_skb(ctx_req.ctx.ablk_ctx->skb);
 187			ctx_req.ctx.ablk_ctx->skb = NULL;
 188		}
 189		break;
 190
 191	case CRYPTO_ALG_TYPE_AHASH:
 192		ctx_req.req.ahash_req = (struct ahash_request *)req;
 193		ctx_req.ctx.ahash_ctx =
 194			ahash_request_ctx(ctx_req.req.ahash_req);
 195		digestsize =
 196			crypto_ahash_digestsize(crypto_ahash_reqtfm(
 197							ctx_req.req.ahash_req));
 198		updated_digestsize = digestsize;
 199		if (digestsize == SHA224_DIGEST_SIZE)
 200			updated_digestsize = SHA256_DIGEST_SIZE;
 201		else if (digestsize == SHA384_DIGEST_SIZE)
 202			updated_digestsize = SHA512_DIGEST_SIZE;
 203		if (ctx_req.ctx.ahash_ctx->skb) {
 204			kfree_skb(ctx_req.ctx.ahash_ctx->skb);
 205			ctx_req.ctx.ahash_ctx->skb = NULL;
 206		}
 207		if (ctx_req.ctx.ahash_ctx->result == 1) {
 208			ctx_req.ctx.ahash_ctx->result = 0;
 209			memcpy(ctx_req.req.ahash_req->result, input +
 210			       sizeof(struct cpl_fw6_pld),
 211			       digestsize);
 212		} else {
 213			memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
 214			       sizeof(struct cpl_fw6_pld),
 215			       updated_digestsize);
 216		}
 217		break;
 218	}
 219	return err;
 220}
 221
 222/*
 223 *	calc_tx_flits_ofld - calculate # of flits for an offload packet
 224 *	@skb: the packet
 225 *	Returns the number of flits needed for the given offload packet.
 226 *	These packets are already fully constructed and no additional headers
 227 *	will be added.
 228 */
 229static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
 230{
 231	unsigned int flits, cnt;
 
 
 232
 233	if (is_ofld_imm(skb))
 234		return DIV_ROUND_UP(skb->len, 8);
 
 
 
 
 
 235
 236	flits = skb_transport_offset(skb) / 8;   /* headers */
 237	cnt = skb_shinfo(skb)->nr_frags;
 238	if (skb_tail_pointer(skb) != skb_transport_header(skb))
 239		cnt++;
 240	return flits + sgl_len(cnt);
 241}
 242
 243static inline void get_aes_decrypt_key(unsigned char *dec_key,
 244				       const unsigned char *key,
 245				       unsigned int keylength)
 246{
 247	u32 temp;
 248	u32 w_ring[MAX_NK];
 249	int i, j, k;
 250	u8  nr, nk;
 251
 252	switch (keylength) {
 253	case AES_KEYLENGTH_128BIT:
 254		nk = KEYLENGTH_4BYTES;
 255		nr = NUMBER_OF_ROUNDS_10;
 256		break;
 257	case AES_KEYLENGTH_192BIT:
 258		nk = KEYLENGTH_6BYTES;
 259		nr = NUMBER_OF_ROUNDS_12;
 260		break;
 261	case AES_KEYLENGTH_256BIT:
 262		nk = KEYLENGTH_8BYTES;
 263		nr = NUMBER_OF_ROUNDS_14;
 264		break;
 265	default:
 266		return;
 267	}
 268	for (i = 0; i < nk; i++)
 269		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
 270
 271	i = 0;
 272	temp = w_ring[nk - 1];
 273	while (i + nk < (nr + 1) * 4) {
 274		if (!(i % nk)) {
 275			/* RotWord(temp) */
 276			temp = (temp << 8) | (temp >> 24);
 277			temp = aes_ks_subword(temp);
 278			temp ^= round_constant[i / nk];
 279		} else if (nk == 8 && (i % 4 == 0)) {
 280			temp = aes_ks_subword(temp);
 281		}
 282		w_ring[i % nk] ^= temp;
 283		temp = w_ring[i % nk];
 284		i++;
 285	}
 286	i--;
 287	for (k = 0, j = i % nk; k < nk; k++) {
 288		*((u32 *)dec_key + k) = htonl(w_ring[j]);
 289		j--;
 290		if (j < 0)
 291			j += nk;
 292	}
 293}
 294
 295static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
 296{
 297	struct crypto_shash *base_hash = NULL;
 298
 299	switch (ds) {
 300	case SHA1_DIGEST_SIZE:
 301		base_hash = crypto_alloc_shash("sha1", 0, 0);
 302		break;
 303	case SHA224_DIGEST_SIZE:
 304		base_hash = crypto_alloc_shash("sha224", 0, 0);
 305		break;
 306	case SHA256_DIGEST_SIZE:
 307		base_hash = crypto_alloc_shash("sha256", 0, 0);
 308		break;
 309	case SHA384_DIGEST_SIZE:
 310		base_hash = crypto_alloc_shash("sha384", 0, 0);
 311		break;
 312	case SHA512_DIGEST_SIZE:
 313		base_hash = crypto_alloc_shash("sha512", 0, 0);
 314		break;
 315	}
 316
 317	return base_hash;
 318}
 319
 320static int chcr_compute_partial_hash(struct shash_desc *desc,
 321				     char *iopad, char *result_hash,
 322				     int digest_size)
 323{
 324	struct sha1_state sha1_st;
 325	struct sha256_state sha256_st;
 326	struct sha512_state sha512_st;
 327	int error;
 328
 329	if (digest_size == SHA1_DIGEST_SIZE) {
 330		error = crypto_shash_init(desc) ?:
 331			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
 332			crypto_shash_export(desc, (void *)&sha1_st);
 333		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
 334	} else if (digest_size == SHA224_DIGEST_SIZE) {
 335		error = crypto_shash_init(desc) ?:
 336			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 337			crypto_shash_export(desc, (void *)&sha256_st);
 338		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 339
 340	} else if (digest_size == SHA256_DIGEST_SIZE) {
 341		error = crypto_shash_init(desc) ?:
 342			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 343			crypto_shash_export(desc, (void *)&sha256_st);
 344		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 345
 346	} else if (digest_size == SHA384_DIGEST_SIZE) {
 347		error = crypto_shash_init(desc) ?:
 348			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 349			crypto_shash_export(desc, (void *)&sha512_st);
 350		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 351
 352	} else if (digest_size == SHA512_DIGEST_SIZE) {
 353		error = crypto_shash_init(desc) ?:
 354			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 355			crypto_shash_export(desc, (void *)&sha512_st);
 356		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 357	} else {
 358		error = -EINVAL;
 359		pr_err("Unknown digest size %d\n", digest_size);
 360	}
 361	return error;
 362}
 363
 364static void chcr_change_order(char *buf, int ds)
 365{
 366	int i;
 367
 368	if (ds == SHA512_DIGEST_SIZE) {
 369		for (i = 0; i < (ds / sizeof(u64)); i++)
 370			*((__be64 *)buf + i) =
 371				cpu_to_be64(*((u64 *)buf + i));
 372	} else {
 373		for (i = 0; i < (ds / sizeof(u32)); i++)
 374			*((__be32 *)buf + i) =
 375				cpu_to_be32(*((u32 *)buf + i));
 376	}
 377}
 378
 379static inline int is_hmac(struct crypto_tfm *tfm)
 380{
 381	struct crypto_alg *alg = tfm->__crt_alg;
 382	struct chcr_alg_template *chcr_crypto_alg =
 383		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
 384			     alg.hash);
 385	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
 386		return 1;
 387	return 0;
 388}
 389
 390static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
 391			   struct scatterlist *sg,
 392			   struct phys_sge_parm *sg_param)
 
 
 
 
 
 
 
 393{
 394	struct phys_sge_pairs *to;
 395	int out_buf_size = sg_param->obsize;
 396	unsigned int nents = sg_param->nents, i, j = 0;
 397
 398	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
 399				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
 400	phys_cpl->pcirlxorder_to_noofsgentr =
 401		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
 402		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
 403		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
 404		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
 405		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
 406		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
 407	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
 408	phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
 409	phys_cpl->rss_hdr_int.hash_val = 0;
 410	to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
 411				       sizeof(struct cpl_rx_phys_dsgl));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 412
 413	for (i = 0; nents; to++) {
 414		for (j = 0; j < 8 && nents; j++, nents--) {
 415			out_buf_size -= sg_dma_len(sg);
 416			to->len[j] = htons(sg_dma_len(sg));
 417			to->addr[j] = cpu_to_be64(sg_dma_address(sg));
 
 
 
 
 
 
 
 
 
 
 
 418			sg = sg_next(sg);
 
 
 
 419		}
 420	}
 421	if (out_buf_size) {
 422		j--;
 423		to--;
 424		to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 425	}
 
 
 
 
 
 
 
 
 
 
 
 
 426}
 427
 428static inline int map_writesg_phys_cpl(struct device *dev,
 429					struct cpl_rx_phys_dsgl *phys_cpl,
 430					struct scatterlist *sg,
 431					struct phys_sge_parm *sg_param)
 
 
 
 
 
 
 432{
 433	if (!sg || !sg_param->nents)
 434		return 0;
 435
 436	sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
 437	if (sg_param->nents == 0) {
 438		pr_err("CHCR : DMA mapping failed\n");
 439		return -EINVAL;
 
 
 
 
 
 440	}
 441	write_phys_cpl(phys_cpl, sg, sg_param);
 442	return 0;
 443}
 444
 445static inline int get_aead_subtype(struct crypto_aead *aead)
 
 
 
 446{
 447	struct aead_alg *alg = crypto_aead_alg(aead);
 448	struct chcr_alg_template *chcr_crypto_alg =
 449		container_of(alg, struct chcr_alg_template, alg.aead);
 450	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 451}
 452
 453static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
 454{
 455	struct crypto_alg *alg = tfm->__crt_alg;
 456	struct chcr_alg_template *chcr_crypto_alg =
 457		container_of(alg, struct chcr_alg_template, alg.crypto);
 458
 459	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 460}
 461
 462static inline void write_buffer_to_skb(struct sk_buff *skb,
 463					unsigned int *frags,
 464					char *bfr,
 465					u8 bfr_len)
 466{
 467	skb->len += bfr_len;
 468	skb->data_len += bfr_len;
 469	skb->truesize += bfr_len;
 470	get_page(virt_to_page(bfr));
 471	skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
 472			   offset_in_page(bfr), bfr_len);
 473	(*frags)++;
 474}
 475
 476
 477static inline void
 478write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
 479			struct scatterlist *sg, unsigned int count)
 480{
 481	struct page *spage;
 482	unsigned int page_len;
 483
 484	skb->len += count;
 485	skb->data_len += count;
 486	skb->truesize += count;
 487
 488	while (count > 0) {
 489		if (!sg || (!(sg->length)))
 490			break;
 491		spage = sg_page(sg);
 492		get_page(spage);
 493		page_len = min(sg->length, count);
 494		skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
 495		(*frags)++;
 496		count -= page_len;
 497		sg = sg_next(sg);
 498	}
 499}
 500
 501static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
 502			       struct _key_ctx *key_ctx)
 503{
 504	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
 505		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
 506	} else {
 507		memcpy(key_ctx->key,
 508		       ablkctx->key + (ablkctx->enckey_len >> 1),
 509		       ablkctx->enckey_len >> 1);
 510		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
 511		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
 512	}
 513	return 0;
 514}
 515
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 516static inline void create_wreq(struct chcr_context *ctx,
 517			       struct chcr_wr *chcr_req,
 518			       void *req, struct sk_buff *skb,
 519			       int kctx_len, int hash_sz,
 520			       int is_iv,
 521			       unsigned int sc_len)
 
 
 522{
 523	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 524	int iv_loc = IV_DSGL;
 525	int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
 526	unsigned int immdatalen = 0, nr_frags = 0;
 
 
 
 
 
 
 
 527
 528	if (is_ofld_imm(skb)) {
 529		immdatalen = skb->data_len;
 530		iv_loc = IV_IMMEDIATE;
 531	} else {
 532		nr_frags = skb_shinfo(skb)->nr_frags;
 533	}
 534
 535	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
 536				((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
 537	chcr_req->wreq.pld_size_hash_size =
 538		htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
 539		      FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
 540	chcr_req->wreq.len16_pkd =
 541		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
 542				    (calc_tx_flits_ofld(skb) * 8), 16)));
 543	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
 544	chcr_req->wreq.rx_chid_to_rx_q_id =
 545		FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
 546				is_iv ? iv_loc : IV_NOP);
 547
 548	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
 549	chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
 550					16) - ((sizeof(chcr_req->wreq)) >> 4)));
 551
 552	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
 
 
 
 553	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
 554				   sizeof(chcr_req->key_ctx) +
 555				   kctx_len + sc_len + immdatalen);
 556}
 557
 558/**
 559 *	create_cipher_wr - form the WR for cipher operations
 560 *	@req: cipher req.
 561 *	@ctx: crypto driver context of the request.
 562 *	@qid: ingress qid where response of this WR should be received.
 563 *	@op_type:	encryption or decryption
 564 */
 565static struct sk_buff
 566*create_cipher_wr(struct ablkcipher_request *req,
 567		  unsigned short qid,
 568		  unsigned short op_type)
 569{
 570	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 571	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
 572	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 573	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
 574	struct sk_buff *skb = NULL;
 575	struct chcr_wr *chcr_req;
 576	struct cpl_rx_phys_dsgl *phys_cpl;
 577	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
 578	struct phys_sge_parm sg_param;
 579	unsigned int frags = 0, transhdr_len, phys_dsgl;
 580	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
 581	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
 582			GFP_ATOMIC;
 583
 584	if (!req->info)
 585		return ERR_PTR(-EINVAL);
 586	reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
 587	if (reqctx->dst_nents <= 0) {
 588		pr_err("AES:Invalid Destination sg lists\n");
 589		return ERR_PTR(-EINVAL);
 590	}
 591	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
 592	    (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
 593		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
 594		       ablkctx->enckey_len, req->nbytes, ivsize);
 595		return ERR_PTR(-EINVAL);
 
 
 
 
 
 
 
 
 
 596	}
 597
 598	phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
 599
 600	kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
 601	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
 602	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
 603	if (!skb)
 604		return ERR_PTR(-ENOMEM);
 605	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
 606	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
 607	memset(chcr_req, 0, transhdr_len);
 608	chcr_req->sec_cpl.op_ivinsrtofst =
 609		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1);
 610
 611	chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
 612	chcr_req->sec_cpl.aadstart_cipherstop_hi =
 613			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
 614
 615	chcr_req->sec_cpl.cipherstop_lo_authinsert =
 616			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
 617	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
 618							 ablkctx->ciph_mode,
 619							 0, 0, ivsize >> 1);
 620	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
 621							  0, 1, phys_dsgl);
 622
 623	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
 624	if (op_type == CHCR_DECRYPT_OP) {
 
 
 
 
 625		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
 626	} else {
 627		if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
 
 628			memcpy(chcr_req->key_ctx.key, ablkctx->key,
 629			       ablkctx->enckey_len);
 630		} else {
 631			memcpy(chcr_req->key_ctx.key, ablkctx->key +
 632			       (ablkctx->enckey_len >> 1),
 633			       ablkctx->enckey_len >> 1);
 634			memcpy(chcr_req->key_ctx.key +
 635			       (ablkctx->enckey_len >> 1),
 636			       ablkctx->key,
 637			       ablkctx->enckey_len >> 1);
 638		}
 639	}
 640	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
 641	sg_param.nents = reqctx->dst_nents;
 642	sg_param.obsize = req->nbytes;
 643	sg_param.qid = qid;
 644	sg_param.align = 1;
 645	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
 646				 &sg_param))
 647		goto map_fail1;
 648
 649	skb_set_transport_header(skb, transhdr_len);
 650	memcpy(reqctx->iv, req->info, ivsize);
 651	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
 652	write_sg_to_skb(skb, &frags, req->src, req->nbytes);
 653	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
 654			sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
 655	reqctx->skb = skb;
 656	skb_get(skb);
 
 
 
 
 
 
 657	return skb;
 658map_fail1:
 659	kfree_skb(skb);
 660	return ERR_PTR(-ENOMEM);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 661}
 662
 663static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 
 664			       unsigned int keylen)
 665{
 666	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
 667	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
 668	unsigned int ck_size, context_size;
 669	u16 alignment = 0;
 
 670
 671	if (keylen == AES_KEYSIZE_128) {
 672		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
 673	} else if (keylen == AES_KEYSIZE_192) {
 674		alignment = 8;
 675		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
 676	} else if (keylen == AES_KEYSIZE_256) {
 677		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
 678	} else {
 679		goto badkey_err;
 680	}
 
 
 681	memcpy(ablkctx->key, key, keylen);
 682	ablkctx->enckey_len = keylen;
 683	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
 684	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 685			keylen + alignment) >> 4;
 686
 687	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 688						0, 0, context_size);
 689	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
 690	return 0;
 691badkey_err:
 692	crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 693	ablkctx->enckey_len = 0;
 694	return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 695}
 696
 697static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
 
 
 
 
 
 
 698{
 699	struct adapter *adap = netdev2adap(dev);
 700	struct sge_uld_txq_info *txq_info =
 701		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
 702	struct sge_uld_txq *txq;
 703	int ret = 0;
 704
 705	local_bh_disable();
 706	txq = &txq_info->uldtxq[idx];
 707	spin_lock(&txq->sendq.lock);
 708	if (txq->full)
 709		ret = -1;
 710	spin_unlock(&txq->sendq.lock);
 711	local_bh_enable();
 
 
 
 
 
 
 
 
 712	return ret;
 
 713}
 714
 715static int chcr_aes_encrypt(struct ablkcipher_request *req)
 
 716{
 717	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 718	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
 719	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 
 
 
 
 
 
 720	struct sk_buff *skb;
 
 721
 722	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
 723					    ctx->tx_channel_id))) {
 724		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
 725			return -EBUSY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 726	}
 
 
 
 727
 728	skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
 729			       CHCR_ENCRYPT_OP);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 730	if (IS_ERR(skb)) {
 731		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
 732		return  PTR_ERR(skb);
 
 733	}
 734	skb->dev = u_ctx->lldi.ports[0];
 735	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
 736	chcr_send_wr(skb);
 737	return -EINPROGRESS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 738}
 739
 740static int chcr_aes_decrypt(struct ablkcipher_request *req)
 741{
 742	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 743	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
 744	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 745	struct sk_buff *skb;
 
 
 
 
 
 
 
 
 
 746
 
 
 
 747	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
 748					    ctx->tx_channel_id))) {
 749		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
 750			return -EBUSY;
 
 751	}
 752
 753	skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
 754			       CHCR_DECRYPT_OP);
 755	if (IS_ERR(skb)) {
 756		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
 757		return PTR_ERR(skb);
 758	}
 759	skb->dev = u_ctx->lldi.ports[0];
 760	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
 761	chcr_send_wr(skb);
 
 
 
 
 
 
 762	return -EINPROGRESS;
 
 
 
 763}
 764
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 765static int chcr_device_init(struct chcr_context *ctx)
 766{
 767	struct uld_ctx *u_ctx;
 768	unsigned int id;
 769	int err = 0, rxq_perchan, rxq_idx;
 770
 771	id = smp_processor_id();
 772	if (!ctx->dev) {
 773		err = assign_chcr_device(&ctx->dev);
 774		if (err) {
 
 775			pr_err("chcr device assignment fails\n");
 776			goto out;
 777		}
 778		u_ctx = ULD_CTX(ctx);
 
 779		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
 780		rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
 781		rxq_idx += id % rxq_perchan;
 782		spin_lock(&ctx->dev->lock_chcr_dev);
 783		ctx->tx_channel_id = rxq_idx;
 784		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
 785		spin_unlock(&ctx->dev->lock_chcr_dev);
 786	}
 787out:
 788	return err;
 789}
 790
 791static int chcr_cra_init(struct crypto_tfm *tfm)
 792{
 793	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
 794	return chcr_device_init(crypto_tfm_ctx(tfm));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 795}
 796
 797static int get_alg_config(struct algo_param *params,
 798			  unsigned int auth_size)
 799{
 800	switch (auth_size) {
 801	case SHA1_DIGEST_SIZE:
 802		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
 803		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
 804		params->result_size = SHA1_DIGEST_SIZE;
 805		break;
 806	case SHA224_DIGEST_SIZE:
 807		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
 808		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
 809		params->result_size = SHA256_DIGEST_SIZE;
 810		break;
 811	case SHA256_DIGEST_SIZE:
 812		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
 813		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
 814		params->result_size = SHA256_DIGEST_SIZE;
 815		break;
 816	case SHA384_DIGEST_SIZE:
 817		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
 818		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
 819		params->result_size = SHA512_DIGEST_SIZE;
 820		break;
 821	case SHA512_DIGEST_SIZE:
 822		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
 823		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
 824		params->result_size = SHA512_DIGEST_SIZE;
 825		break;
 826	default:
 827		pr_err("chcr : ERROR, unsupported digest size\n");
 828		return -EINVAL;
 829	}
 830	return 0;
 831}
 832
 833static inline void chcr_free_shash(struct crypto_shash *base_hash)
 834{
 835		crypto_free_shash(base_hash);
 836}
 837
 838/**
 839 *	create_hash_wr - Create hash work request
 840 *	@req - Cipher req base
 
 841 */
 842static struct sk_buff *create_hash_wr(struct ahash_request *req,
 843				      struct hash_wr_param *param)
 844{
 845	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
 846	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 847	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
 848	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
 849	struct sk_buff *skb = NULL;
 
 850	struct chcr_wr *chcr_req;
 851	unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
 852	unsigned int digestsize = crypto_ahash_digestsize(tfm);
 853	unsigned int kctx_len = 0;
 854	u8 hash_size_in_response = 0;
 855	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
 856		GFP_ATOMIC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 857
 858	iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
 859	kctx_len = param->alg_prm.result_size + iopad_alignment;
 860	if (param->opad_needed)
 861		kctx_len += param->alg_prm.result_size + iopad_alignment;
 862
 863	if (req_ctx->result)
 864		hash_size_in_response = digestsize;
 865	else
 866		hash_size_in_response = param->alg_prm.result_size;
 867	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
 868	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
 869	if (!skb)
 870		return skb;
 
 871
 872	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
 873	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
 874	memset(chcr_req, 0, transhdr_len);
 875
 876	chcr_req->sec_cpl.op_ivinsrtofst =
 877		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0);
 878	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
 879
 880	chcr_req->sec_cpl.aadstart_cipherstop_hi =
 881		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
 882	chcr_req->sec_cpl.cipherstop_lo_authinsert =
 883		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
 884	chcr_req->sec_cpl.seqno_numivs =
 885		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
 886					 param->opad_needed, 0);
 887
 888	chcr_req->sec_cpl.ivgen_hdrlen =
 889		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
 890
 891	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
 892	       param->alg_prm.result_size);
 893
 894	if (param->opad_needed)
 895		memcpy(chcr_req->key_ctx.key +
 896		       ((param->alg_prm.result_size <= 32) ? 32 :
 897			CHCR_HASH_MAX_DIGEST_SIZE),
 898		       hmacctx->opad, param->alg_prm.result_size);
 899
 900	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
 901					    param->alg_prm.mk_size, 0,
 902					    param->opad_needed,
 903					    ((kctx_len +
 904					     sizeof(chcr_req->key_ctx)) >> 4));
 905	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
 906
 907	skb_set_transport_header(skb, transhdr_len);
 908	if (param->bfr_len != 0)
 909		write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
 910				    param->bfr_len);
 911	if (param->sg_len != 0)
 912		write_sg_to_skb(skb, &frags, req->src, param->sg_len);
 913
 914	create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
 915			DUMMY_BYTES);
 916	req_ctx->skb = skb;
 917	skb_get(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 918	return skb;
 
 
 
 919}
 920
 921static int chcr_ahash_update(struct ahash_request *req)
 922{
 923	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
 924	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
 925	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
 926	struct uld_ctx *u_ctx = NULL;
 
 927	struct sk_buff *skb;
 928	u8 remainder = 0, bs;
 929	unsigned int nbytes = req->nbytes;
 930	struct hash_wr_param params;
 
 
 
 
 
 
 
 931
 932	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
 933
 934	u_ctx = ULD_CTX(ctx);
 935	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
 936					    ctx->tx_channel_id))) {
 937		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
 938			return -EBUSY;
 939	}
 940
 941	if (nbytes + req_ctx->reqlen >= bs) {
 942		remainder = (nbytes + req_ctx->reqlen) % bs;
 943		nbytes = nbytes + req_ctx->reqlen - remainder;
 944	} else {
 945		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
 946				   + req_ctx->reqlen, nbytes, 0);
 947		req_ctx->reqlen += nbytes;
 948		return 0;
 949	}
 
 
 
 
 
 
 
 
 
 
 
 
 950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 951	params.opad_needed = 0;
 952	params.more = 1;
 953	params.last = 0;
 954	params.sg_len = nbytes - req_ctx->reqlen;
 955	params.bfr_len = req_ctx->reqlen;
 956	params.scmd1 = 0;
 957	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
 958	req_ctx->result = 0;
 
 959	req_ctx->data_len += params.sg_len + params.bfr_len;
 960	skb = create_hash_wr(req, &params);
 961	if (!skb)
 962		return -ENOMEM;
 
 
 963
 
 964	if (remainder) {
 965		u8 *temp;
 966		/* Swap buffers */
 967		temp = req_ctx->reqbfr;
 968		req_ctx->reqbfr = req_ctx->skbfr;
 969		req_ctx->skbfr = temp;
 970		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
 971				   req_ctx->reqbfr, remainder, req->nbytes -
 972				   remainder);
 973	}
 974	req_ctx->reqlen = remainder;
 975	skb->dev = u_ctx->lldi.ports[0];
 976	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
 977	chcr_send_wr(skb);
 978
 979	return -EINPROGRESS;
 
 
 
 
 
 980}
 981
 982static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
 983{
 984	memset(bfr_ptr, 0, bs);
 985	*bfr_ptr = 0x80;
 986	if (bs == 64)
 987		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
 988	else
 989		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
 990}
 991
 992static int chcr_ahash_final(struct ahash_request *req)
 993{
 994	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
 995	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
 996	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
 997	struct hash_wr_param params;
 998	struct sk_buff *skb;
 999	struct uld_ctx *u_ctx = NULL;
 
1000	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
 
 
1001
1002	u_ctx = ULD_CTX(ctx);
 
 
 
 
 
 
 
 
 
1003	if (is_hmac(crypto_ahash_tfm(rtfm)))
1004		params.opad_needed = 1;
1005	else
1006		params.opad_needed = 0;
1007	params.sg_len = 0;
 
1008	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1009	req_ctx->result = 1;
 
 
 
 
 
 
 
 
1010	params.bfr_len = req_ctx->reqlen;
1011	req_ctx->data_len += params.bfr_len + params.sg_len;
 
1012	if (req_ctx->reqlen == 0) {
1013		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1014		params.last = 0;
1015		params.more = 1;
1016		params.scmd1 = 0;
1017		params.bfr_len = bs;
1018
1019	} else {
1020		params.scmd1 = req_ctx->data_len;
1021		params.last = 1;
1022		params.more = 0;
1023	}
 
1024	skb = create_hash_wr(req, &params);
1025	if (!skb)
1026		return -ENOMEM;
1027
 
 
1028	skb->dev = u_ctx->lldi.ports[0];
1029	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1030	chcr_send_wr(skb);
1031	return -EINPROGRESS;
 
 
 
1032}
1033
1034static int chcr_ahash_finup(struct ahash_request *req)
1035{
1036	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1037	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1038	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1039	struct uld_ctx *u_ctx = NULL;
 
1040	struct sk_buff *skb;
1041	struct hash_wr_param params;
1042	u8  bs;
 
 
 
 
 
 
 
1043
1044	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1045	u_ctx = ULD_CTX(ctx);
 
 
1046
1047	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1048					    ctx->tx_channel_id))) {
1049		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1050			return -EBUSY;
 
 
 
 
 
 
 
1051	}
1052
1053	if (is_hmac(crypto_ahash_tfm(rtfm)))
 
 
 
1054		params.opad_needed = 1;
1055	else
1056		params.opad_needed = 0;
 
1057
1058	params.sg_len = req->nbytes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1059	params.bfr_len = req_ctx->reqlen;
1060	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1061	req_ctx->data_len += params.bfr_len + params.sg_len;
1062	req_ctx->result = 1;
 
1063	if ((req_ctx->reqlen + req->nbytes) == 0) {
1064		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1065		params.last = 0;
1066		params.more = 1;
1067		params.scmd1 = 0;
1068		params.bfr_len = bs;
1069	} else {
1070		params.scmd1 = req_ctx->data_len;
1071		params.last = 1;
1072		params.more = 0;
1073	}
1074
1075	skb = create_hash_wr(req, &params);
1076	if (!skb)
1077		return -ENOMEM;
1078
 
 
 
1079	skb->dev = u_ctx->lldi.ports[0];
1080	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1081	chcr_send_wr(skb);
1082
1083	return -EINPROGRESS;
 
 
 
 
 
1084}
1085
 
 
 
1086static int chcr_ahash_digest(struct ahash_request *req)
1087{
1088	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1089	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1090	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1091	struct uld_ctx *u_ctx = NULL;
 
1092	struct sk_buff *skb;
1093	struct hash_wr_param params;
1094	u8  bs;
 
 
 
 
 
 
 
 
 
 
 
 
1095
1096	rtfm->init(req);
1097	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
 
 
 
1098
1099	u_ctx = ULD_CTX(ctx);
1100	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1101					    ctx->tx_channel_id))) {
1102		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1103			return -EBUSY;
 
 
 
 
 
 
 
 
1104	}
1105
1106	if (is_hmac(crypto_ahash_tfm(rtfm)))
 
 
 
1107		params.opad_needed = 1;
1108	else
1109		params.opad_needed = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1110
1111	params.last = 0;
1112	params.more = 0;
1113	params.sg_len = req->nbytes;
1114	params.bfr_len = 0;
1115	params.scmd1 = 0;
1116	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1117	req_ctx->result = 1;
1118	req_ctx->data_len += params.bfr_len + params.sg_len;
1119
1120	if (req->nbytes == 0) {
1121		create_last_hash_block(req_ctx->reqbfr, bs, 0);
1122		params.more = 1;
1123		params.bfr_len = bs;
1124	}
1125
1126	skb = create_hash_wr(req, &params);
1127	if (!skb)
1128		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1130	skb->dev = u_ctx->lldi.ports[0];
1131	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1132	chcr_send_wr(skb);
1133	return -EINPROGRESS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1134}
1135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1136static int chcr_ahash_export(struct ahash_request *areq, void *out)
1137{
1138	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1139	struct chcr_ahash_req_ctx *state = out;
1140
1141	state->reqlen = req_ctx->reqlen;
1142	state->data_len = req_ctx->data_len;
1143	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
1144	memcpy(state->partial_hash, req_ctx->partial_hash,
1145	       CHCR_HASH_MAX_DIGEST_SIZE);
1146		return 0;
 
1147}
1148
1149static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1150{
1151	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1152	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1153
1154	req_ctx->reqlen = state->reqlen;
1155	req_ctx->data_len = state->data_len;
1156	req_ctx->reqbfr = req_ctx->bfr1;
1157	req_ctx->skbfr = req_ctx->bfr2;
1158	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
1159	memcpy(req_ctx->partial_hash, state->partial_hash,
1160	       CHCR_HASH_MAX_DIGEST_SIZE);
 
1161	return 0;
1162}
1163
1164static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1165			     unsigned int keylen)
1166{
1167	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1168	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1169	unsigned int digestsize = crypto_ahash_digestsize(tfm);
1170	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1171	unsigned int i, err = 0, updated_digestsize;
1172
1173	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
1174
1175	/* use the key to calculate the ipad and opad. ipad will sent with the
1176	 * first request's data. opad will be sent with the final hash result
1177	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
1178	 */
1179	shash->tfm = hmacctx->base_hash;
1180	shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
1181	if (keylen > bs) {
1182		err = crypto_shash_digest(shash, key, keylen,
1183					  hmacctx->ipad);
1184		if (err)
1185			goto out;
1186		keylen = digestsize;
1187	} else {
1188		memcpy(hmacctx->ipad, key, keylen);
1189	}
1190	memset(hmacctx->ipad + keylen, 0, bs - keylen);
1191	memcpy(hmacctx->opad, hmacctx->ipad, bs);
 
1192
1193	for (i = 0; i < bs / sizeof(int); i++) {
1194		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1195		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1196	}
1197
1198	updated_digestsize = digestsize;
1199	if (digestsize == SHA224_DIGEST_SIZE)
1200		updated_digestsize = SHA256_DIGEST_SIZE;
1201	else if (digestsize == SHA384_DIGEST_SIZE)
1202		updated_digestsize = SHA512_DIGEST_SIZE;
1203	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
1204					hmacctx->ipad, digestsize);
1205	if (err)
1206		goto out;
1207	chcr_change_order(hmacctx->ipad, updated_digestsize);
1208
1209	err = chcr_compute_partial_hash(shash, hmacctx->opad,
1210					hmacctx->opad, digestsize);
1211	if (err)
1212		goto out;
1213	chcr_change_order(hmacctx->opad, updated_digestsize);
1214out:
1215	return err;
1216}
1217
1218static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1219			       unsigned int key_len)
1220{
1221	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1222	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1223	unsigned short context_size = 0;
 
1224
1225	if ((key_len != (AES_KEYSIZE_128 << 1)) &&
1226	    (key_len != (AES_KEYSIZE_256 << 1))) {
1227		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
1228				     CRYPTO_TFM_RES_BAD_KEY_LEN);
1229		ablkctx->enckey_len = 0;
1230		return -EINVAL;
1231
1232	}
1233
1234	memcpy(ablkctx->key, key, key_len);
1235	ablkctx->enckey_len = key_len;
1236	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
1237	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1238	ablkctx->key_ctx_hdr =
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1239		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1240				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1241				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1242				 CHCR_KEYCTX_NO_KEY, 1,
1243				 0, context_size);
 
1244	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1245	return 0;
 
 
 
 
1246}
1247
1248static int chcr_sha_init(struct ahash_request *areq)
1249{
1250	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1251	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1252	int digestsize =  crypto_ahash_digestsize(tfm);
1253
1254	req_ctx->data_len = 0;
1255	req_ctx->reqlen = 0;
1256	req_ctx->reqbfr = req_ctx->bfr1;
1257	req_ctx->skbfr = req_ctx->bfr2;
1258	req_ctx->skb = NULL;
1259	req_ctx->result = 0;
1260	copy_hash_init_values(req_ctx->partial_hash, digestsize);
 
1261	return 0;
1262}
1263
1264static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1265{
1266	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1267				 sizeof(struct chcr_ahash_req_ctx));
1268	return chcr_device_init(crypto_tfm_ctx(tfm));
1269}
1270
1271static int chcr_hmac_init(struct ahash_request *areq)
1272{
1273	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1274	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1275	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1276	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1277	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1278	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1279
1280	chcr_sha_init(areq);
1281	req_ctx->data_len = bs;
1282	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1283		if (digestsize == SHA224_DIGEST_SIZE)
1284			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1285			       SHA256_DIGEST_SIZE);
1286		else if (digestsize == SHA384_DIGEST_SIZE)
1287			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1288			       SHA512_DIGEST_SIZE);
1289		else
1290			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1291			       digestsize);
1292	}
1293	return 0;
1294}
1295
1296static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1297{
1298	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1299	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1300	unsigned int digestsize =
1301		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1302
1303	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1304				 sizeof(struct chcr_ahash_req_ctx));
1305	hmacctx->base_hash = chcr_alloc_shash(digestsize);
1306	if (IS_ERR(hmacctx->base_hash))
1307		return PTR_ERR(hmacctx->base_hash);
1308	return chcr_device_init(crypto_tfm_ctx(tfm));
1309}
1310
1311static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
1312{
1313	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1314	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1315
1316	if (hmacctx->base_hash) {
1317		chcr_free_shash(hmacctx->base_hash);
1318		hmacctx->base_hash = NULL;
1319	}
1320}
1321
1322static int chcr_copy_assoc(struct aead_request *req,
1323				struct chcr_aead_ctx *ctx)
1324{
1325	SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
 
 
1326
1327	skcipher_request_set_tfm(skreq, ctx->null);
1328	skcipher_request_set_callback(skreq, aead_request_flags(req),
1329			NULL, NULL);
1330	skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
1331			NULL);
1332
1333	return crypto_skcipher_encrypt(skreq);
1334}
1335
1336static unsigned char get_hmac(unsigned int authsize)
1337{
1338	switch (authsize) {
1339	case ICV_8:
1340		return CHCR_SCMD_HMAC_CTRL_PL1;
1341	case ICV_10:
1342		return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
1343	case ICV_12:
1344		return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1345	}
1346	return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
 
 
 
1347}
1348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1349
1350static struct sk_buff *create_authenc_wr(struct aead_request *req,
1351					 unsigned short qid,
1352					 int size,
1353					 unsigned short op_type)
1354{
1355	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1356	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1357	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1358	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1359	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
1360	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1361	struct sk_buff *skb = NULL;
1362	struct chcr_wr *chcr_req;
1363	struct cpl_rx_phys_dsgl *phys_cpl;
1364	struct phys_sge_parm sg_param;
1365	struct scatterlist *src;
1366	unsigned int frags = 0, transhdr_len;
1367	unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
1368	unsigned int   kctx_len = 0;
1369	unsigned short stop_offset = 0;
1370	unsigned int  assoclen = req->assoclen;
1371	unsigned int  authsize = crypto_aead_authsize(tfm);
1372	int err = 0;
 
1373	int null = 0;
1374	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1375		GFP_ATOMIC;
 
 
1376
1377	if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
1378		goto err;
1379
1380	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1381		goto err;
1382
1383	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1384		goto err;
1385	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1386	reqctx->dst = src;
1387
1388	if (req->src != req->dst) {
1389		err = chcr_copy_assoc(req, aeadctx);
1390		if (err)
1391			return ERR_PTR(err);
1392		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1393					       req->assoclen);
1394	}
1395	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
1396		null = 1;
1397		assoclen = 0;
1398	}
1399	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1400					     (op_type ? -authsize : authsize));
1401	if (reqctx->dst_nents <= 0) {
1402		pr_err("AUTHENC:Invalid Destination sg entries\n");
1403		goto err;
1404	}
1405	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1406	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
 
 
 
 
 
1407		- sizeof(chcr_req->key_ctx);
1408	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1409	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
1410	if (!skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1411		goto err;
 
1412
1413	/* LLD is going to write the sge hdr. */
1414	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1415
1416	/* Write WR */
1417	chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
1418	memset(chcr_req, 0, transhdr_len);
1419
1420	stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
1421
1422	/*
1423	 * Input order	is AAD,IV and Payload. where IV should be included as
1424	 * the part of authdata. All other fields should be filled according
1425	 * to the hardware spec
1426	 */
1427	chcr_req->sec_cpl.op_ivinsrtofst =
1428		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2,
1429				       (ivsize ? (assoclen + 1) : 0));
1430	chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
1431	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1432					assoclen ? 1 : 0, assoclen,
1433					assoclen + ivsize + 1,
1434					(stop_offset & 0x1F0) >> 4);
 
1435	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
1436					stop_offset & 0xF,
1437					null ? 0 : assoclen + ivsize + 1,
1438					stop_offset, stop_offset);
1439	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
1440					(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
1441					CHCR_SCMD_CIPHER_MODE_AES_CBC,
 
 
 
 
 
1442					actx->auth_mode, aeadctx->hmac_ctrl,
1443					ivsize >> 1);
1444	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
1445					 0, 1, dst_size);
1446
1447	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
1448	if (op_type == CHCR_ENCRYPT_OP)
 
 
1449		memcpy(chcr_req->key_ctx.key, aeadctx->key,
1450		       aeadctx->enckey_len);
1451	else
1452		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
1453		       aeadctx->enckey_len);
1454
1455	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
1456					4), actx->h_iopad, kctx_len -
1457				(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1458
1459	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1460	sg_param.nents = reqctx->dst_nents;
1461	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1462	sg_param.qid = qid;
1463	sg_param.align = 0;
1464	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1465				  &sg_param))
1466		goto dstmap_fail;
1467
1468	skb_set_transport_header(skb, transhdr_len);
 
1469
1470	if (assoclen) {
1471		/* AAD buffer in */
1472		write_sg_to_skb(skb, &frags, req->src, assoclen);
 
 
 
 
 
 
1473
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1474	}
1475	write_buffer_to_skb(skb, &frags, req->iv, ivsize);
1476	write_sg_to_skb(skb, &frags, src, req->cryptlen);
1477	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
1478		   sizeof(struct cpl_rx_phys_dsgl) + dst_size);
1479	reqctx->skb = skb;
1480	skb_get(skb);
1481
1482	return skb;
1483dstmap_fail:
1484	/* ivmap_fail: */
1485	kfree_skb(skb);
1486err:
1487	return ERR_PTR(-EINVAL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1488}
1489
1490static void aes_gcm_empty_pld_pad(struct scatterlist *sg,
1491				  unsigned short offset)
1492{
1493	struct page *spage;
1494	unsigned char *addr;
1495
1496	spage = sg_page(sg);
1497	get_page(spage); /* so that it is not freed by NIC */
1498#ifdef KMAP_ATOMIC_ARGS
1499	addr = kmap_atomic(spage, KM_SOFTIRQ0);
1500#else
1501	addr = kmap_atomic(spage);
1502#endif
1503	memset(addr + sg->offset, 0, offset + 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
1504
1505	kunmap_atomic(addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1506}
1507
1508static int set_msg_len(u8 *block, unsigned int msglen, int csize)
1509{
1510	__be32 data;
1511
1512	memset(block, 0, csize);
1513	block += csize;
1514
1515	if (csize >= 4)
1516		csize = 4;
1517	else if (msglen > (unsigned int)(1 << (8 * csize)))
1518		return -EOVERFLOW;
1519
1520	data = cpu_to_be32(msglen);
1521	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1522
1523	return 0;
1524}
1525
1526static void generate_b0(struct aead_request *req,
1527			struct chcr_aead_ctx *aeadctx,
1528			unsigned short op_type)
1529{
1530	unsigned int l, lp, m;
1531	int rc;
1532	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1533	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1534	u8 *b0 = reqctx->scratch_pad;
1535
1536	m = crypto_aead_authsize(aead);
1537
1538	memcpy(b0, reqctx->iv, 16);
1539
1540	lp = b0[0];
1541	l = lp + 1;
1542
1543	/* set m, bits 3-5 */
1544	*b0 |= (8 * ((m - 2) / 2));
1545
1546	/* set adata, bit 6, if associated data is used */
1547	if (req->assoclen)
1548		*b0 |= 64;
1549	rc = set_msg_len(b0 + 16 - l,
1550			 (op_type == CHCR_DECRYPT_OP) ?
1551			 req->cryptlen - m : req->cryptlen, l);
 
 
1552}
1553
1554static inline int crypto_ccm_check_iv(const u8 *iv)
1555{
1556	/* 2 <= L <= 8, so 1 <= L' <= 7. */
1557	if (iv[0] < 1 || iv[0] > 7)
1558		return -EINVAL;
1559
1560	return 0;
1561}
1562
1563static int ccm_format_packet(struct aead_request *req,
1564			     struct chcr_aead_ctx *aeadctx,
1565			     unsigned int sub_type,
1566			     unsigned short op_type)
 
1567{
1568	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
 
 
1569	int rc = 0;
1570
1571	if (req->assoclen > T5_MAX_AAD_SIZE) {
1572		pr_err("CCM: Unsupported AAD data. It should be < %d\n",
1573		       T5_MAX_AAD_SIZE);
1574		return -EINVAL;
1575	}
1576	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
1577		reqctx->iv[0] = 3;
1578		memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
1579		memcpy(reqctx->iv + 4, req->iv, 8);
1580		memset(reqctx->iv + 12, 0, 4);
1581		*((unsigned short *)(reqctx->scratch_pad + 16)) =
1582			htons(req->assoclen - 8);
1583	} else {
1584		memcpy(reqctx->iv, req->iv, 16);
1585		*((unsigned short *)(reqctx->scratch_pad + 16)) =
1586			htons(req->assoclen);
1587	}
1588	generate_b0(req, aeadctx, op_type);
 
 
 
1589	/* zero the ctr value */
1590	memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
1591	return rc;
1592}
1593
1594static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
1595				  unsigned int dst_size,
1596				  struct aead_request *req,
1597				  unsigned short op_type,
1598					  struct chcr_context *chcrctx)
1599{
1600	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1601	unsigned int ivsize = AES_BLOCK_SIZE;
 
 
 
1602	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
1603	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
1604	unsigned int c_id = chcrctx->dev->tx_channel_id;
1605	unsigned int ccm_xtra;
1606	unsigned char tag_offset = 0, auth_offset = 0;
1607	unsigned char hmac_ctrl = get_hmac(crypto_aead_authsize(tfm));
1608	unsigned int assoclen;
1609
 
 
1610	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
1611		assoclen = req->assoclen - 8;
1612	else
1613		assoclen = req->assoclen;
1614	ccm_xtra = CCM_B0_SIZE +
1615		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
1616
1617	auth_offset = req->cryptlen ?
1618		(assoclen + ivsize + 1 + ccm_xtra) : 0;
1619	if (op_type == CHCR_DECRYPT_OP) {
1620		if (crypto_aead_authsize(tfm) != req->cryptlen)
1621			tag_offset = crypto_aead_authsize(tfm);
1622		else
1623			auth_offset = 0;
1624	}
1625
1626
1627	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
1628					 2, (ivsize ?  (assoclen + 1) :  0) +
1629					 ccm_xtra);
1630	sec_cpl->pldlen =
1631		htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
1632	/* For CCM there wil be b0 always. So AAD start will be 1 always */
1633	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1634					1, assoclen + ccm_xtra, assoclen
1635					+ ivsize + 1 + ccm_xtra, 0);
1636
1637	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
1638					auth_offset, tag_offset,
1639					(op_type == CHCR_ENCRYPT_OP) ? 0 :
1640					crypto_aead_authsize(tfm));
1641	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
1642					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
1643					cipher_mode, mac_mode, hmac_ctrl,
1644					ivsize >> 1);
1645
1646	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
1647					1, dst_size);
1648}
1649
1650int aead_ccm_validate_input(unsigned short op_type,
1651			    struct aead_request *req,
1652			    struct chcr_aead_ctx *aeadctx,
1653			    unsigned int sub_type)
1654{
1655	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
1656		if (crypto_ccm_check_iv(req->iv)) {
1657			pr_err("CCM: IV check fails\n");
1658			return -EINVAL;
1659		}
1660	} else {
1661		if (req->assoclen != 16 && req->assoclen != 20) {
1662			pr_err("RFC4309: Invalid AAD length %d\n",
1663			       req->assoclen);
1664			return -EINVAL;
1665		}
1666	}
1667	if (aeadctx->enckey_len == 0) {
1668		pr_err("CCM: Encryption key not set\n");
1669		return -EINVAL;
1670	}
1671	return 0;
1672}
1673
1674unsigned int fill_aead_req_fields(struct sk_buff *skb,
1675				  struct aead_request *req,
1676				  struct scatterlist *src,
1677				  unsigned int ivsize,
1678				  struct chcr_aead_ctx *aeadctx)
1679{
1680	unsigned int frags = 0;
1681	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1682	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1683	/* b0 and aad length(if available) */
1684
1685	write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
1686				(req->assoclen ?  CCM_AAD_FIELD_SIZE : 0));
1687	if (req->assoclen) {
1688		if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
1689			write_sg_to_skb(skb, &frags, req->src,
1690					req->assoclen - 8);
1691		else
1692			write_sg_to_skb(skb, &frags, req->src, req->assoclen);
1693	}
1694	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
1695	if (req->cryptlen)
1696		write_sg_to_skb(skb, &frags, src, req->cryptlen);
1697
1698	return frags;
1699}
1700
1701static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1702					  unsigned short qid,
1703					  int size,
1704					  unsigned short op_type)
1705{
1706	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1707	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1708	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1709	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1710	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1711	struct sk_buff *skb = NULL;
1712	struct chcr_wr *chcr_req;
1713	struct cpl_rx_phys_dsgl *phys_cpl;
1714	struct phys_sge_parm sg_param;
1715	struct scatterlist *src;
1716	unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
1717	unsigned int dst_size = 0, kctx_len;
1718	unsigned int sub_type;
1719	unsigned int authsize = crypto_aead_authsize(tfm);
1720	int err = 0;
 
1721	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1722		GFP_ATOMIC;
 
1723
1724
1725	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1726		goto err;
1727
1728	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1729		goto err;
1730	sub_type = get_aead_subtype(tfm);
1731	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1732	reqctx->dst = src;
1733
1734	if (req->src != req->dst) {
1735		err = chcr_copy_assoc(req, aeadctx);
1736		if (err) {
1737			pr_err("AAD copy to destination buffer fails\n");
1738			return ERR_PTR(err);
1739		}
1740		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1741					       req->assoclen);
1742	}
1743	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1744					     (op_type ? -authsize : authsize));
1745	if (reqctx->dst_nents <= 0) {
1746		pr_err("CCM:Invalid Destination sg entries\n");
1747		goto err;
1748	}
1749
1750
1751	if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type))
 
1752		goto err;
1753
1754	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1755	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
 
 
 
 
 
 
1756	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1757	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),  flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1758
1759	if (!skb)
 
1760		goto err;
 
1761
1762	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1763
1764	chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
1765	memset(chcr_req, 0, transhdr_len);
1766
1767	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
1768
1769	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
1770	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
1771	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
1772					16), aeadctx->key, aeadctx->enckey_len);
1773
1774	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1775	if (ccm_format_packet(req, aeadctx, sub_type, op_type))
 
 
 
1776		goto dstmap_fail;
 
 
1777
1778	sg_param.nents = reqctx->dst_nents;
1779	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1780	sg_param.qid = qid;
1781	sg_param.align = 0;
1782	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1783				  &sg_param))
1784		goto dstmap_fail;
1785
1786	skb_set_transport_header(skb, transhdr_len);
1787	frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
1788	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
1789		    sizeof(struct cpl_rx_phys_dsgl) + dst_size);
1790	reqctx->skb = skb;
1791	skb_get(skb);
1792	return skb;
1793dstmap_fail:
1794	kfree_skb(skb);
1795	skb = NULL;
1796err:
1797	return ERR_PTR(-EINVAL);
 
1798}
1799
1800static struct sk_buff *create_gcm_wr(struct aead_request *req,
1801				     unsigned short qid,
1802				     int size,
1803				     unsigned short op_type)
1804{
1805	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1806	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1807	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1808	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1809	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
1810	struct sk_buff *skb = NULL;
1811	struct chcr_wr *chcr_req;
1812	struct cpl_rx_phys_dsgl *phys_cpl;
1813	struct phys_sge_parm sg_param;
1814	struct scatterlist *src;
1815	unsigned int frags = 0, transhdr_len;
1816	unsigned int ivsize = AES_BLOCK_SIZE;
1817	unsigned int dst_size = 0, kctx_len;
1818	unsigned char tag_offset = 0;
1819	unsigned int crypt_len = 0;
1820	unsigned int authsize = crypto_aead_authsize(tfm);
1821	unsigned char hmac_ctrl = get_hmac(authsize);
1822	int err = 0;
1823	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1824		GFP_ATOMIC;
 
 
1825
1826	/* validate key size */
1827	if (aeadctx->enckey_len == 0)
1828		goto err;
1829
1830	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1831		goto err;
1832
1833	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1834		goto err;
1835
1836	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1837	reqctx->dst = src;
1838	if (req->src != req->dst) {
1839		err = chcr_copy_assoc(req, aeadctx);
1840		if (err)
1841			return	ERR_PTR(err);
1842		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1843					       req->assoclen);
1844	}
1845
1846	if (!req->cryptlen)
1847		/* null-payload is not supported in the hardware.
1848		 * software is sending block size
1849		 */
1850		crypt_len = AES_BLOCK_SIZE;
1851	else
1852		crypt_len = req->cryptlen;
1853	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1854					     (op_type ? -authsize : authsize));
1855	if (reqctx->dst_nents <= 0) {
1856		pr_err("GCM:Invalid Destination sg entries\n");
1857		goto err;
1858	}
1859
1860
1861	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1862	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
1863		AEAD_H_SIZE;
1864	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1865	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
1866	if (!skb)
1867		goto err;
1868
1869	/* NIC driver is going to write the sge hdr. */
1870	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1871
1872	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
1873	memset(chcr_req, 0, transhdr_len);
1874
1875	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
1876		req->assoclen -= 8;
1877
1878	tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
1879	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
1880					ctx->dev->tx_channel_id, 2, (ivsize ?
1881					(req->assoclen + 1) : 0));
1882	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + ivsize + crypt_len);
1883	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1884					req->assoclen ? 1 : 0, req->assoclen,
1885					req->assoclen + ivsize + 1, 0);
1886	if (req->cryptlen) {
1887		chcr_req->sec_cpl.cipherstop_lo_authinsert =
1888			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
1889						tag_offset, tag_offset);
1890		chcr_req->sec_cpl.seqno_numivs =
1891			FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
1892					CHCR_ENCRYPT_OP) ? 1 : 0,
1893					CHCR_SCMD_CIPHER_MODE_AES_GCM,
1894					CHCR_SCMD_AUTH_MODE_GHASH, hmac_ctrl,
1895					ivsize >> 1);
1896	} else {
1897		chcr_req->sec_cpl.cipherstop_lo_authinsert =
1898			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
1899		chcr_req->sec_cpl.seqno_numivs =
1900			FILL_SEC_CPL_SCMD0_SEQNO(op_type,
1901					(op_type ==  CHCR_ENCRYPT_OP) ?
1902					1 : 0, CHCR_SCMD_CIPHER_MODE_AES_CBC,
1903					0, 0, ivsize >> 1);
1904	}
1905	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
1906					0, 1, dst_size);
1907	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
1908	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
1909	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
1910				16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
1911
 
 
1912	/* prepare a 16 byte iv */
1913	/* S   A   L  T |  IV | 0x00000001 */
1914	if (get_aead_subtype(tfm) ==
1915	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
1916		memcpy(reqctx->iv, aeadctx->salt, 4);
1917		memcpy(reqctx->iv + 4, req->iv, 8);
1918	} else {
1919		memcpy(reqctx->iv, req->iv, 12);
1920	}
1921	*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
 
1922
1923	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1924	sg_param.nents = reqctx->dst_nents;
1925	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1926	sg_param.qid = qid;
1927	sg_param.align = 0;
1928	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1929				  &sg_param))
1930		goto dstmap_fail;
1931
1932	skb_set_transport_header(skb, transhdr_len);
1933
1934	write_sg_to_skb(skb, &frags, req->src, req->assoclen);
1935
1936	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
1937
1938	if (req->cryptlen) {
1939		write_sg_to_skb(skb, &frags, src, req->cryptlen);
1940	} else {
1941		aes_gcm_empty_pld_pad(req->dst, authsize - 1);
1942		write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
1943
1944	}
1945
1946	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
1947			sizeof(struct cpl_rx_phys_dsgl) + dst_size);
1948	reqctx->skb = skb;
1949	skb_get(skb);
1950	return skb;
1951
1952dstmap_fail:
1953	/* ivmap_fail: */
1954	kfree_skb(skb);
1955	skb = NULL;
1956err:
1957	return skb;
 
1958}
1959
1960
1961
1962static int chcr_aead_cra_init(struct crypto_aead *tfm)
1963{
1964	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1965	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1966
1967	crypto_aead_set_reqsize(tfm, sizeof(struct chcr_aead_reqctx));
1968	aeadctx->null = crypto_get_default_null_skcipher();
1969	if (IS_ERR(aeadctx->null))
1970		return PTR_ERR(aeadctx->null);
1971	return chcr_device_init(ctx);
 
 
 
 
 
1972}
1973
1974static void chcr_aead_cra_exit(struct crypto_aead *tfm)
1975{
1976	crypto_put_default_null_skcipher();
 
 
1977}
1978
1979static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
1980					unsigned int authsize)
1981{
1982	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
1983
1984	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
1985	aeadctx->mayverify = VERIFY_HW;
1986	return 0;
1987}
1988static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
1989				    unsigned int authsize)
1990{
1991	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
1992	u32 maxauth = crypto_aead_maxauthsize(tfm);
1993
1994	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
1995	 * true for sha1. authsize == 12 condition should be before
1996	 * authsize == (maxauth >> 1)
1997	 */
1998	if (authsize == ICV_4) {
1999		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2000		aeadctx->mayverify = VERIFY_HW;
2001	} else if (authsize == ICV_6) {
2002		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2003		aeadctx->mayverify = VERIFY_HW;
2004	} else if (authsize == ICV_10) {
2005		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2006		aeadctx->mayverify = VERIFY_HW;
2007	} else if (authsize == ICV_12) {
2008		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2009		aeadctx->mayverify = VERIFY_HW;
2010	} else if (authsize == ICV_14) {
2011		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2012		aeadctx->mayverify = VERIFY_HW;
2013	} else if (authsize == (maxauth >> 1)) {
2014		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2015		aeadctx->mayverify = VERIFY_HW;
2016	} else if (authsize == maxauth) {
2017		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2018		aeadctx->mayverify = VERIFY_HW;
2019	} else {
2020		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2021		aeadctx->mayverify = VERIFY_SW;
2022	}
2023	return 0;
2024}
2025
2026
2027static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
2028{
2029	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2030
2031	switch (authsize) {
2032	case ICV_4:
2033		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2034		aeadctx->mayverify = VERIFY_HW;
2035		break;
2036	case ICV_8:
2037		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2038		aeadctx->mayverify = VERIFY_HW;
2039		break;
2040	case ICV_12:
2041		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2042		 aeadctx->mayverify = VERIFY_HW;
2043		break;
2044	case ICV_14:
2045		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2046		 aeadctx->mayverify = VERIFY_HW;
2047		break;
2048	case ICV_16:
2049		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2050		aeadctx->mayverify = VERIFY_HW;
2051		break;
2052	case ICV_13:
2053	case ICV_15:
2054		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2055		aeadctx->mayverify = VERIFY_SW;
2056		break;
2057	default:
2058
2059		  crypto_tfm_set_flags((struct crypto_tfm *) tfm,
2060			CRYPTO_TFM_RES_BAD_KEY_LEN);
2061		return -EINVAL;
2062	}
2063	return 0;
2064}
2065
2066static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
2067					  unsigned int authsize)
2068{
2069	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2070
2071	switch (authsize) {
2072	case ICV_8:
2073		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2074		aeadctx->mayverify = VERIFY_HW;
2075		break;
2076	case ICV_12:
2077		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2078		aeadctx->mayverify = VERIFY_HW;
2079		break;
2080	case ICV_16:
2081		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2082		aeadctx->mayverify = VERIFY_HW;
2083		break;
2084	default:
2085		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
2086				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2087		return -EINVAL;
2088	}
2089	return 0;
2090}
2091
2092static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
2093				unsigned int authsize)
2094{
2095	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2096
2097	switch (authsize) {
2098	case ICV_4:
2099		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2100		aeadctx->mayverify = VERIFY_HW;
2101		break;
2102	case ICV_6:
2103		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2104		aeadctx->mayverify = VERIFY_HW;
2105		break;
2106	case ICV_8:
2107		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2108		aeadctx->mayverify = VERIFY_HW;
2109		break;
2110	case ICV_10:
2111		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2112		aeadctx->mayverify = VERIFY_HW;
2113		break;
2114	case ICV_12:
2115		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2116		aeadctx->mayverify = VERIFY_HW;
2117		break;
2118	case ICV_14:
2119		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2120		aeadctx->mayverify = VERIFY_HW;
2121		break;
2122	case ICV_16:
2123		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2124		aeadctx->mayverify = VERIFY_HW;
2125		break;
2126	default:
2127		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
2128				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2129		return -EINVAL;
2130	}
2131	return 0;
2132}
2133
2134static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
2135				const u8 *key,
2136				unsigned int keylen)
2137{
2138	struct chcr_context *ctx = crypto_aead_ctx(aead);
2139	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2140	unsigned char ck_size, mk_size;
2141	int key_ctx_size = 0;
2142
2143	memcpy(aeadctx->key, key, keylen);
2144	aeadctx->enckey_len = keylen;
2145	key_ctx_size = sizeof(struct _key_ctx) +
2146		((DIV_ROUND_UP(keylen, 16)) << 4)  * 2;
2147	if (keylen == AES_KEYSIZE_128) {
2148		mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2149		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
 
2150	} else if (keylen == AES_KEYSIZE_192) {
2151		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2152		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
2153	} else if (keylen == AES_KEYSIZE_256) {
2154		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2155		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2156	} else {
2157		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2158				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2159		aeadctx->enckey_len = 0;
2160		return	-EINVAL;
2161	}
2162	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
2163						key_ctx_size >> 4);
 
 
 
2164	return 0;
2165}
2166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2167static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
2168				    unsigned int keylen)
2169{
2170	struct chcr_context *ctx = crypto_aead_ctx(aead);
2171	 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2172
2173	if (keylen < 3) {
2174		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2175				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2176		aeadctx->enckey_len = 0;
2177		return	-EINVAL;
2178	}
 
 
 
 
 
 
2179	keylen -= 3;
2180	memcpy(aeadctx->salt, key + keylen, 3);
2181	return chcr_aead_ccm_setkey(aead, key, keylen);
2182}
2183
2184static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
2185			   unsigned int keylen)
2186{
2187	struct chcr_context *ctx = crypto_aead_ctx(aead);
2188	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2189	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
2190	struct blkcipher_desc h_desc;
2191	struct scatterlist src[1];
2192	unsigned int ck_size;
2193	int ret = 0, key_ctx_size = 0;
 
 
 
 
 
 
 
 
 
2194
2195	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
2196	    keylen > 3) {
2197		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
2198		memcpy(aeadctx->salt, key + keylen, 4);
2199	}
2200	if (keylen == AES_KEYSIZE_128) {
2201		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2202	} else if (keylen == AES_KEYSIZE_192) {
2203		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2204	} else if (keylen == AES_KEYSIZE_256) {
2205		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2206	} else {
2207		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2208				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2209		aeadctx->enckey_len = 0;
2210		pr_err("GCM: Invalid key length %d", keylen);
2211		ret = -EINVAL;
2212		goto out;
2213	}
2214
2215	memcpy(aeadctx->key, key, keylen);
2216	aeadctx->enckey_len = keylen;
2217	key_ctx_size = sizeof(struct _key_ctx) +
2218		((DIV_ROUND_UP(keylen, 16)) << 4) +
2219		AEAD_H_SIZE;
2220		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
2221						CHCR_KEYCTX_MAC_KEY_SIZE_128,
2222						0, 0,
2223						key_ctx_size >> 4);
2224	/* Calculate the H = CIPH(K, 0 repeated 16 times) using sync aes
2225	 * blkcipher It will go on key context
2226	 */
2227	h_desc.tfm = crypto_alloc_blkcipher("cbc(aes-generic)", 0, 0);
2228	if (IS_ERR(h_desc.tfm)) {
2229		aeadctx->enckey_len = 0;
2230		ret = -ENOMEM;
2231		goto out;
2232	}
2233	h_desc.flags = 0;
2234	ret = crypto_blkcipher_setkey(h_desc.tfm, key, keylen);
2235	if (ret) {
2236		aeadctx->enckey_len = 0;
2237		goto out1;
2238	}
2239	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
2240	sg_init_one(&src[0], gctx->ghash_h, AEAD_H_SIZE);
2241	ret = crypto_blkcipher_encrypt(&h_desc, &src[0], &src[0], AEAD_H_SIZE);
2242
2243out1:
2244	crypto_free_blkcipher(h_desc.tfm);
2245out:
2246	return ret;
2247}
2248
2249static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
2250				   unsigned int keylen)
2251{
2252	struct chcr_context *ctx = crypto_aead_ctx(authenc);
2253	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2254	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2255	/* it contains auth and cipher key both*/
2256	struct crypto_authenc_keys keys;
2257	unsigned int bs;
2258	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
2259	int err = 0, i, key_ctx_len = 0;
2260	unsigned char ck_size = 0;
2261	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
2262	struct crypto_shash *base_hash = NULL;
2263	struct algo_param param;
2264	int align;
2265	u8 *o_ptr = NULL;
2266
2267	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
2268		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
 
 
 
 
 
 
2269		goto out;
2270	}
2271
2272	if (get_alg_config(&param, max_authsize)) {
2273		pr_err("chcr : Unsupported digest size\n");
2274		goto out;
2275	}
 
 
 
 
 
 
 
 
 
2276	if (keys.enckeylen == AES_KEYSIZE_128) {
2277		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2278	} else if (keys.enckeylen == AES_KEYSIZE_192) {
2279		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2280	} else if (keys.enckeylen == AES_KEYSIZE_256) {
2281		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2282	} else {
2283		pr_err("chcr : Unsupported cipher key\n");
2284		goto out;
2285	}
2286
2287	/* Copy only encryption key. We use authkey to generate h(ipad) and
2288	 * h(opad) so authkey is not needed again. authkeylen size have the
2289	 * size of the hash digest size.
2290	 */
2291	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
2292	aeadctx->enckey_len = keys.enckeylen;
2293	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
 
 
 
2294			    aeadctx->enckey_len << 3);
2295
2296	base_hash  = chcr_alloc_shash(max_authsize);
2297	if (IS_ERR(base_hash)) {
2298		pr_err("chcr : Base driver cannot be loaded\n");
2299		goto out;
2300	}
2301	{
2302		SHASH_DESC_ON_STACK(shash, base_hash);
 
2303		shash->tfm = base_hash;
2304		shash->flags = crypto_shash_get_flags(base_hash);
2305		bs = crypto_shash_blocksize(base_hash);
2306		align = KEYCTX_ALIGN_PAD(max_authsize);
2307		o_ptr =  actx->h_iopad + param.result_size + align;
2308
2309		if (keys.authkeylen > bs) {
2310			err = crypto_shash_digest(shash, keys.authkey,
2311						  keys.authkeylen,
2312						  o_ptr);
2313			if (err) {
2314				pr_err("chcr : Base driver cannot be loaded\n");
2315				goto out;
2316			}
2317			keys.authkeylen = max_authsize;
2318		} else
2319			memcpy(o_ptr, keys.authkey, keys.authkeylen);
2320
2321		/* Compute the ipad-digest*/
2322		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
2323		memcpy(pad, o_ptr, keys.authkeylen);
2324		for (i = 0; i < bs >> 2; i++)
2325			*((unsigned int *)pad + i) ^= IPAD_DATA;
2326
2327		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
2328					      max_authsize))
2329			goto out;
2330		/* Compute the opad-digest */
2331		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
2332		memcpy(pad, o_ptr, keys.authkeylen);
2333		for (i = 0; i < bs >> 2; i++)
2334			*((unsigned int *)pad + i) ^= OPAD_DATA;
2335
2336		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
2337			goto out;
2338
2339		/* convert the ipad and opad digest to network order */
2340		chcr_change_order(actx->h_iopad, param.result_size);
2341		chcr_change_order(o_ptr, param.result_size);
2342		key_ctx_len = sizeof(struct _key_ctx) +
2343			((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
2344			(param.result_size + align) * 2;
2345		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
2346						0, 1, key_ctx_len >> 4);
2347		actx->auth_mode = param.auth_mode;
2348		chcr_free_shash(base_hash);
2349
 
2350		return 0;
2351	}
2352out:
2353	aeadctx->enckey_len = 0;
2354	if (base_hash)
 
2355		chcr_free_shash(base_hash);
2356	return -EINVAL;
2357}
2358
2359static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
2360					const u8 *key, unsigned int keylen)
2361{
2362	struct chcr_context *ctx = crypto_aead_ctx(authenc);
2363	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2364	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2365	struct crypto_authenc_keys keys;
2366
2367	/* it contains auth and cipher key both*/
 
2368	int key_ctx_len = 0;
2369	unsigned char ck_size = 0;
2370
2371	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
2372		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
 
 
 
 
 
 
2373		goto out;
 
 
 
 
 
 
 
 
 
2374	}
2375	if (keys.enckeylen == AES_KEYSIZE_128) {
2376		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2377	} else if (keys.enckeylen == AES_KEYSIZE_192) {
2378		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2379	} else if (keys.enckeylen == AES_KEYSIZE_256) {
2380		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2381	} else {
2382		pr_err("chcr : Unsupported cipher key\n");
2383		goto out;
2384	}
2385	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
2386	aeadctx->enckey_len = keys.enckeylen;
2387	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
2388				    aeadctx->enckey_len << 3);
2389	key_ctx_len =  sizeof(struct _key_ctx)
2390		+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
 
 
2391
2392	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
2393						0, key_ctx_len >> 4);
2394	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
 
2395	return 0;
2396out:
2397	aeadctx->enckey_len = 0;
 
2398	return -EINVAL;
2399}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2400static int chcr_aead_encrypt(struct aead_request *req)
2401{
2402	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2403	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
 
 
 
 
 
 
 
2404
2405	reqctx->verify = VERIFY_HW;
 
2406
2407	switch (get_aead_subtype(tfm)) {
2408	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
2409	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
2410		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
2411				    create_authenc_wr);
 
2412	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
2413	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
2414		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
2415				    create_aead_ccm_wr);
2416	default:
2417		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
2418				    create_gcm_wr);
2419	}
2420}
2421
2422static int chcr_aead_decrypt(struct aead_request *req)
2423{
2424	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2425	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2426	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
 
2427	int size;
 
 
 
 
 
 
2428
2429	if (aeadctx->mayverify == VERIFY_SW) {
2430		size = crypto_aead_maxauthsize(tfm);
2431		reqctx->verify = VERIFY_SW;
2432	} else {
2433		size = 0;
2434		reqctx->verify = VERIFY_HW;
2435	}
2436
2437	switch (get_aead_subtype(tfm)) {
2438	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
2439	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
2440		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
2441				    create_authenc_wr);
 
2442	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
2443	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
2444		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
2445				    create_aead_ccm_wr);
2446	default:
2447		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
2448				    create_gcm_wr);
2449	}
2450}
2451
2452static int chcr_aead_op(struct aead_request *req,
2453			  unsigned short op_type,
2454			  int size,
2455			  create_wr_t create_wr_fn)
2456{
2457	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2458	struct chcr_context *ctx = crypto_aead_ctx(tfm);
2459	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2460	struct sk_buff *skb;
2461
2462	if (ctx && !ctx->dev) {
2463		pr_err("chcr : %s : No crypto device.\n", __func__);
2464		return -ENXIO;
2465	}
2466	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2467				   ctx->tx_channel_id)) {
2468		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
2469			return -EBUSY;
2470	}
2471
2472	/* Form a WR from req */
2473	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], size,
2474			   op_type);
2475
2476	if (IS_ERR(skb) || skb == NULL) {
2477		pr_err("chcr : %s : failed to form WR. No memory\n", __func__);
2478		return PTR_ERR(skb);
2479	}
2480
2481	skb->dev = u_ctx->lldi.ports[0];
2482	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
2483	chcr_send_wr(skb);
2484	return -EINPROGRESS;
2485}
2486static struct chcr_alg_template driver_algs[] = {
2487	/* AES-CBC */
2488	{
2489		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2490		.is_registered = 0,
2491		.alg.crypto = {
2492			.cra_name		= "cbc(aes)",
2493			.cra_driver_name	= "cbc-aes-chcr",
2494			.cra_priority		= CHCR_CRA_PRIORITY,
2495			.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
2496				CRYPTO_ALG_ASYNC,
2497			.cra_blocksize		= AES_BLOCK_SIZE,
2498			.cra_ctxsize		= sizeof(struct chcr_context)
2499				+ sizeof(struct ablk_ctx),
2500			.cra_alignmask		= 0,
2501			.cra_type		= &crypto_ablkcipher_type,
2502			.cra_module		= THIS_MODULE,
2503			.cra_init		= chcr_cra_init,
2504			.cra_exit		= NULL,
2505			.cra_u.ablkcipher	= {
2506				.min_keysize	= AES_MIN_KEY_SIZE,
2507				.max_keysize	= AES_MAX_KEY_SIZE,
2508				.ivsize		= AES_BLOCK_SIZE,
2509				.setkey			= chcr_aes_cbc_setkey,
2510				.encrypt		= chcr_aes_encrypt,
2511				.decrypt		= chcr_aes_decrypt,
2512			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2513		}
2514	},
2515	{
2516		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 
2517		.is_registered = 0,
2518		.alg.crypto =   {
2519			.cra_name		= "xts(aes)",
2520			.cra_driver_name	= "xts-aes-chcr",
2521			.cra_priority		= CHCR_CRA_PRIORITY,
2522			.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
2523				CRYPTO_ALG_ASYNC,
2524			.cra_blocksize		= AES_BLOCK_SIZE,
2525			.cra_ctxsize		= sizeof(struct chcr_context) +
2526				sizeof(struct ablk_ctx),
2527			.cra_alignmask		= 0,
2528			.cra_type		= &crypto_ablkcipher_type,
2529			.cra_module		= THIS_MODULE,
2530			.cra_init		= chcr_cra_init,
2531			.cra_exit		= NULL,
2532			.cra_u = {
2533				.ablkcipher = {
2534					.min_keysize	= 2 * AES_MIN_KEY_SIZE,
2535					.max_keysize	= 2 * AES_MAX_KEY_SIZE,
2536					.ivsize		= AES_BLOCK_SIZE,
2537					.setkey		= chcr_aes_xts_setkey,
2538					.encrypt	= chcr_aes_encrypt,
2539					.decrypt	= chcr_aes_decrypt,
2540				}
2541			}
2542		}
2543	},
2544	/* SHA */
2545	{
2546		.type = CRYPTO_ALG_TYPE_AHASH,
2547		.is_registered = 0,
2548		.alg.hash = {
2549			.halg.digestsize = SHA1_DIGEST_SIZE,
2550			.halg.base = {
2551				.cra_name = "sha1",
2552				.cra_driver_name = "sha1-chcr",
2553				.cra_blocksize = SHA1_BLOCK_SIZE,
2554			}
2555		}
2556	},
2557	{
2558		.type = CRYPTO_ALG_TYPE_AHASH,
2559		.is_registered = 0,
2560		.alg.hash = {
2561			.halg.digestsize = SHA256_DIGEST_SIZE,
2562			.halg.base = {
2563				.cra_name = "sha256",
2564				.cra_driver_name = "sha256-chcr",
2565				.cra_blocksize = SHA256_BLOCK_SIZE,
2566			}
2567		}
2568	},
2569	{
2570		.type = CRYPTO_ALG_TYPE_AHASH,
2571		.is_registered = 0,
2572		.alg.hash = {
2573			.halg.digestsize = SHA224_DIGEST_SIZE,
2574			.halg.base = {
2575				.cra_name = "sha224",
2576				.cra_driver_name = "sha224-chcr",
2577				.cra_blocksize = SHA224_BLOCK_SIZE,
2578			}
2579		}
2580	},
2581	{
2582		.type = CRYPTO_ALG_TYPE_AHASH,
2583		.is_registered = 0,
2584		.alg.hash = {
2585			.halg.digestsize = SHA384_DIGEST_SIZE,
2586			.halg.base = {
2587				.cra_name = "sha384",
2588				.cra_driver_name = "sha384-chcr",
2589				.cra_blocksize = SHA384_BLOCK_SIZE,
2590			}
2591		}
2592	},
2593	{
2594		.type = CRYPTO_ALG_TYPE_AHASH,
2595		.is_registered = 0,
2596		.alg.hash = {
2597			.halg.digestsize = SHA512_DIGEST_SIZE,
2598			.halg.base = {
2599				.cra_name = "sha512",
2600				.cra_driver_name = "sha512-chcr",
2601				.cra_blocksize = SHA512_BLOCK_SIZE,
2602			}
2603		}
2604	},
2605	/* HMAC */
2606	{
2607		.type = CRYPTO_ALG_TYPE_HMAC,
2608		.is_registered = 0,
2609		.alg.hash = {
2610			.halg.digestsize = SHA1_DIGEST_SIZE,
2611			.halg.base = {
2612				.cra_name = "hmac(sha1)",
2613				.cra_driver_name = "hmac-sha1-chcr",
2614				.cra_blocksize = SHA1_BLOCK_SIZE,
2615			}
2616		}
2617	},
2618	{
2619		.type = CRYPTO_ALG_TYPE_HMAC,
2620		.is_registered = 0,
2621		.alg.hash = {
2622			.halg.digestsize = SHA224_DIGEST_SIZE,
2623			.halg.base = {
2624				.cra_name = "hmac(sha224)",
2625				.cra_driver_name = "hmac-sha224-chcr",
2626				.cra_blocksize = SHA224_BLOCK_SIZE,
2627			}
2628		}
2629	},
2630	{
2631		.type = CRYPTO_ALG_TYPE_HMAC,
2632		.is_registered = 0,
2633		.alg.hash = {
2634			.halg.digestsize = SHA256_DIGEST_SIZE,
2635			.halg.base = {
2636				.cra_name = "hmac(sha256)",
2637				.cra_driver_name = "hmac-sha256-chcr",
2638				.cra_blocksize = SHA256_BLOCK_SIZE,
2639			}
2640		}
2641	},
2642	{
2643		.type = CRYPTO_ALG_TYPE_HMAC,
2644		.is_registered = 0,
2645		.alg.hash = {
2646			.halg.digestsize = SHA384_DIGEST_SIZE,
2647			.halg.base = {
2648				.cra_name = "hmac(sha384)",
2649				.cra_driver_name = "hmac-sha384-chcr",
2650				.cra_blocksize = SHA384_BLOCK_SIZE,
2651			}
2652		}
2653	},
2654	{
2655		.type = CRYPTO_ALG_TYPE_HMAC,
2656		.is_registered = 0,
2657		.alg.hash = {
2658			.halg.digestsize = SHA512_DIGEST_SIZE,
2659			.halg.base = {
2660				.cra_name = "hmac(sha512)",
2661				.cra_driver_name = "hmac-sha512-chcr",
2662				.cra_blocksize = SHA512_BLOCK_SIZE,
2663			}
2664		}
2665	},
2666	/* Add AEAD Algorithms */
2667	{
2668		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
2669		.is_registered = 0,
2670		.alg.aead = {
2671			.base = {
2672				.cra_name = "gcm(aes)",
2673				.cra_driver_name = "gcm-aes-chcr",
2674				.cra_blocksize	= 1,
 
2675				.cra_ctxsize =	sizeof(struct chcr_context) +
2676						sizeof(struct chcr_aead_ctx) +
2677						sizeof(struct chcr_gcm_ctx),
2678			},
2679			.ivsize = 12,
2680			.maxauthsize = GHASH_DIGEST_SIZE,
2681			.setkey = chcr_gcm_setkey,
2682			.setauthsize = chcr_gcm_setauthsize,
2683		}
2684	},
2685	{
2686		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
2687		.is_registered = 0,
2688		.alg.aead = {
2689			.base = {
2690				.cra_name = "rfc4106(gcm(aes))",
2691				.cra_driver_name = "rfc4106-gcm-aes-chcr",
2692				.cra_blocksize	 = 1,
 
2693				.cra_ctxsize =	sizeof(struct chcr_context) +
2694						sizeof(struct chcr_aead_ctx) +
2695						sizeof(struct chcr_gcm_ctx),
2696
2697			},
2698			.ivsize = 8,
2699			.maxauthsize	= GHASH_DIGEST_SIZE,
2700			.setkey = chcr_gcm_setkey,
2701			.setauthsize	= chcr_4106_4309_setauthsize,
2702		}
2703	},
2704	{
2705		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
2706		.is_registered = 0,
2707		.alg.aead = {
2708			.base = {
2709				.cra_name = "ccm(aes)",
2710				.cra_driver_name = "ccm-aes-chcr",
2711				.cra_blocksize	 = 1,
 
2712				.cra_ctxsize =	sizeof(struct chcr_context) +
2713						sizeof(struct chcr_aead_ctx),
2714
2715			},
2716			.ivsize = AES_BLOCK_SIZE,
2717			.maxauthsize	= GHASH_DIGEST_SIZE,
2718			.setkey = chcr_aead_ccm_setkey,
2719			.setauthsize	= chcr_ccm_setauthsize,
2720		}
2721	},
2722	{
2723		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
2724		.is_registered = 0,
2725		.alg.aead = {
2726			.base = {
2727				.cra_name = "rfc4309(ccm(aes))",
2728				.cra_driver_name = "rfc4309-ccm-aes-chcr",
2729				.cra_blocksize	 = 1,
 
2730				.cra_ctxsize =	sizeof(struct chcr_context) +
2731						sizeof(struct chcr_aead_ctx),
2732
2733			},
2734			.ivsize = 8,
2735			.maxauthsize	= GHASH_DIGEST_SIZE,
2736			.setkey = chcr_aead_rfc4309_setkey,
2737			.setauthsize = chcr_4106_4309_setauthsize,
2738		}
2739	},
2740	{
2741		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2742		.is_registered = 0,
2743		.alg.aead = {
2744			.base = {
2745				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2746				.cra_driver_name =
2747					"authenc-hmac-sha1-cbc-aes-chcr",
2748				.cra_blocksize	 = AES_BLOCK_SIZE,
 
2749				.cra_ctxsize =	sizeof(struct chcr_context) +
2750						sizeof(struct chcr_aead_ctx) +
2751						sizeof(struct chcr_authenc_ctx),
2752
2753			},
2754			.ivsize = AES_BLOCK_SIZE,
2755			.maxauthsize = SHA1_DIGEST_SIZE,
2756			.setkey = chcr_authenc_setkey,
2757			.setauthsize = chcr_authenc_setauthsize,
2758		}
2759	},
2760	{
2761		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2762		.is_registered = 0,
2763		.alg.aead = {
2764			.base = {
2765
2766				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2767				.cra_driver_name =
2768					"authenc-hmac-sha256-cbc-aes-chcr",
2769				.cra_blocksize	 = AES_BLOCK_SIZE,
 
2770				.cra_ctxsize =	sizeof(struct chcr_context) +
2771						sizeof(struct chcr_aead_ctx) +
2772						sizeof(struct chcr_authenc_ctx),
2773
2774			},
2775			.ivsize = AES_BLOCK_SIZE,
2776			.maxauthsize	= SHA256_DIGEST_SIZE,
2777			.setkey = chcr_authenc_setkey,
2778			.setauthsize = chcr_authenc_setauthsize,
2779		}
2780	},
2781	{
2782		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2783		.is_registered = 0,
2784		.alg.aead = {
2785			.base = {
2786				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2787				.cra_driver_name =
2788					"authenc-hmac-sha224-cbc-aes-chcr",
2789				.cra_blocksize	 = AES_BLOCK_SIZE,
 
2790				.cra_ctxsize =	sizeof(struct chcr_context) +
2791						sizeof(struct chcr_aead_ctx) +
2792						sizeof(struct chcr_authenc_ctx),
2793			},
2794			.ivsize = AES_BLOCK_SIZE,
2795			.maxauthsize = SHA224_DIGEST_SIZE,
2796			.setkey = chcr_authenc_setkey,
2797			.setauthsize = chcr_authenc_setauthsize,
2798		}
2799	},
2800	{
2801		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2802		.is_registered = 0,
2803		.alg.aead = {
2804			.base = {
2805				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2806				.cra_driver_name =
2807					"authenc-hmac-sha384-cbc-aes-chcr",
2808				.cra_blocksize	 = AES_BLOCK_SIZE,
 
2809				.cra_ctxsize =	sizeof(struct chcr_context) +
2810						sizeof(struct chcr_aead_ctx) +
2811						sizeof(struct chcr_authenc_ctx),
2812
2813			},
2814			.ivsize = AES_BLOCK_SIZE,
2815			.maxauthsize = SHA384_DIGEST_SIZE,
2816			.setkey = chcr_authenc_setkey,
2817			.setauthsize = chcr_authenc_setauthsize,
2818		}
2819	},
2820	{
2821		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2822		.is_registered = 0,
2823		.alg.aead = {
2824			.base = {
2825				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2826				.cra_driver_name =
2827					"authenc-hmac-sha512-cbc-aes-chcr",
2828				.cra_blocksize	 = AES_BLOCK_SIZE,
 
2829				.cra_ctxsize =	sizeof(struct chcr_context) +
2830						sizeof(struct chcr_aead_ctx) +
2831						sizeof(struct chcr_authenc_ctx),
2832
2833			},
2834			.ivsize = AES_BLOCK_SIZE,
2835			.maxauthsize = SHA512_DIGEST_SIZE,
2836			.setkey = chcr_authenc_setkey,
2837			.setauthsize = chcr_authenc_setauthsize,
2838		}
2839	},
2840	{
2841		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
2842		.is_registered = 0,
2843		.alg.aead = {
2844			.base = {
2845				.cra_name = "authenc(digest_null,cbc(aes))",
2846				.cra_driver_name =
2847					"authenc-digest_null-cbc-aes-chcr",
2848				.cra_blocksize	 = AES_BLOCK_SIZE,
 
2849				.cra_ctxsize =	sizeof(struct chcr_context) +
2850						sizeof(struct chcr_aead_ctx) +
2851						sizeof(struct chcr_authenc_ctx),
2852
2853			},
2854			.ivsize  = AES_BLOCK_SIZE,
2855			.maxauthsize = 0,
2856			.setkey  = chcr_aead_digest_null_setkey,
2857			.setauthsize = chcr_authenc_null_setauthsize,
2858		}
2859	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2860};
2861
2862/*
2863 *	chcr_unregister_alg - Deregister crypto algorithms with
2864 *	kernel framework.
2865 */
2866static int chcr_unregister_alg(void)
2867{
2868	int i;
2869
2870	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2871		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
2872		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2873			if (driver_algs[i].is_registered)
2874				crypto_unregister_alg(
2875						&driver_algs[i].alg.crypto);
 
 
 
 
2876			break;
2877		case CRYPTO_ALG_TYPE_AEAD:
2878			if (driver_algs[i].is_registered)
 
2879				crypto_unregister_aead(
2880						&driver_algs[i].alg.aead);
 
 
2881			break;
2882		case CRYPTO_ALG_TYPE_AHASH:
2883			if (driver_algs[i].is_registered)
 
 
2884				crypto_unregister_ahash(
2885						&driver_algs[i].alg.hash);
 
 
2886			break;
2887		}
2888		driver_algs[i].is_registered = 0;
2889	}
2890	return 0;
2891}
2892
2893#define SZ_AHASH_CTX sizeof(struct chcr_context)
2894#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
2895#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
2896#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
2897
2898/*
2899 *	chcr_register_alg - Register crypto algorithms with kernel framework.
2900 */
2901static int chcr_register_alg(void)
2902{
2903	struct crypto_alg ai;
2904	struct ahash_alg *a_hash;
2905	int err = 0, i;
2906	char *name = NULL;
2907
2908	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2909		if (driver_algs[i].is_registered)
2910			continue;
2911		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
2912		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2913			err = crypto_register_alg(&driver_algs[i].alg.crypto);
2914			name = driver_algs[i].alg.crypto.cra_driver_name;
 
 
 
 
 
 
 
 
 
 
 
 
2915			break;
2916		case CRYPTO_ALG_TYPE_AEAD:
2917			driver_algs[i].alg.aead.base.cra_priority =
2918				CHCR_CRA_PRIORITY;
2919			driver_algs[i].alg.aead.base.cra_flags =
2920				CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
 
2921			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
2922			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
2923			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
2924			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
2925			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
2926			err = crypto_register_aead(&driver_algs[i].alg.aead);
2927			name = driver_algs[i].alg.aead.base.cra_driver_name;
2928			break;
2929		case CRYPTO_ALG_TYPE_AHASH:
2930			a_hash = &driver_algs[i].alg.hash;
2931			a_hash->update = chcr_ahash_update;
2932			a_hash->final = chcr_ahash_final;
2933			a_hash->finup = chcr_ahash_finup;
2934			a_hash->digest = chcr_ahash_digest;
2935			a_hash->export = chcr_ahash_export;
2936			a_hash->import = chcr_ahash_import;
2937			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
2938			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
2939			a_hash->halg.base.cra_module = THIS_MODULE;
2940			a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
 
2941			a_hash->halg.base.cra_alignmask = 0;
2942			a_hash->halg.base.cra_exit = NULL;
2943			a_hash->halg.base.cra_type = &crypto_ahash_type;
2944
2945			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
2946				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
2947				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
2948				a_hash->init = chcr_hmac_init;
2949				a_hash->setkey = chcr_ahash_setkey;
2950				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
2951			} else {
2952				a_hash->init = chcr_sha_init;
2953				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
2954				a_hash->halg.base.cra_init = chcr_sha_cra_init;
2955			}
2956			err = crypto_register_ahash(&driver_algs[i].alg.hash);
2957			ai = driver_algs[i].alg.hash.halg.base;
2958			name = ai.cra_driver_name;
2959			break;
2960		}
2961		if (err) {
2962			pr_err("chcr : %s : Algorithm registration failed\n",
2963			       name);
2964			goto register_err;
2965		} else {
2966			driver_algs[i].is_registered = 1;
2967		}
2968	}
2969	return 0;
2970
2971register_err:
2972	chcr_unregister_alg();
2973	return err;
2974}
2975
2976/*
2977 *	start_crypto - Register the crypto algorithms.
2978 *	This should called once when the first device comesup. After this
2979 *	kernel will start calling driver APIs for crypto operations.
2980 */
2981int start_crypto(void)
2982{
2983	return chcr_register_alg();
2984}
2985
2986/*
2987 *	stop_crypto - Deregister all the crypto algorithms with kernel.
2988 *	This should be called once when the last device goes down. After this
2989 *	kernel will not call the driver API for crypto operations.
2990 */
2991int stop_crypto(void)
2992{
2993	chcr_unregister_alg();
2994	return 0;
2995}