Linux Audio

Check our new training course

Loading...
v5.9
   1/*
   2 * This file is part of the Chelsio T6 Crypto driver for Linux.
   3 *
   4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 *
  34 * Written and Maintained by:
  35 *	Manoj Malviya (manojmalviya@chelsio.com)
  36 *	Atul Gupta (atul.gupta@chelsio.com)
  37 *	Jitendra Lulla (jlulla@chelsio.com)
  38 *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  39 *	Harsh Jain (harsh@chelsio.com)
  40 */
  41
  42#define pr_fmt(fmt) "chcr:" fmt
  43
  44#include <linux/kernel.h>
  45#include <linux/module.h>
  46#include <linux/crypto.h>
 
  47#include <linux/skbuff.h>
  48#include <linux/rtnetlink.h>
  49#include <linux/highmem.h>
  50#include <linux/scatterlist.h>
  51
  52#include <crypto/aes.h>
  53#include <crypto/algapi.h>
  54#include <crypto/hash.h>
  55#include <crypto/gcm.h>
  56#include <crypto/sha.h>
  57#include <crypto/authenc.h>
  58#include <crypto/ctr.h>
  59#include <crypto/gf128mul.h>
  60#include <crypto/internal/aead.h>
  61#include <crypto/null.h>
  62#include <crypto/internal/skcipher.h>
  63#include <crypto/aead.h>
  64#include <crypto/scatterwalk.h>
  65#include <crypto/internal/hash.h>
  66
  67#include "t4fw_api.h"
  68#include "t4_msg.h"
  69#include "chcr_core.h"
  70#include "chcr_algo.h"
  71#include "chcr_crypto.h"
  72
  73#define IV AES_BLOCK_SIZE
  74
  75static unsigned int sgl_ent_len[] = {
  76	0, 0, 16, 24, 40, 48, 64, 72, 88,
  77	96, 112, 120, 136, 144, 160, 168, 184,
  78	192, 208, 216, 232, 240, 256, 264, 280,
  79	288, 304, 312, 328, 336, 352, 360, 376
  80};
  81
  82static unsigned int dsgl_ent_len[] = {
  83	0, 32, 32, 48, 48, 64, 64, 80, 80,
  84	112, 112, 128, 128, 144, 144, 160, 160,
  85	192, 192, 208, 208, 224, 224, 240, 240,
  86	272, 272, 288, 288, 304, 304, 320, 320
  87};
  88
  89static u32 round_constant[11] = {
  90	0x01000000, 0x02000000, 0x04000000, 0x08000000,
  91	0x10000000, 0x20000000, 0x40000000, 0x80000000,
  92	0x1B000000, 0x36000000, 0x6C000000
  93};
  94
  95static int chcr_handle_cipher_resp(struct skcipher_request *req,
  96				   unsigned char *input, int err);
  97
  98static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
  99{
 100	return ctx->crypto_ctx->aeadctx;
 101}
 102
 103static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
 104{
 105	return ctx->crypto_ctx->ablkctx;
 106}
 107
 108static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
 109{
 110	return ctx->crypto_ctx->hmacctx;
 111}
 112
 113static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
 114{
 115	return gctx->ctx->gcm;
 116}
 117
 118static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
 119{
 120	return gctx->ctx->authenc;
 121}
 122
 123static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
 124{
 125	return container_of(ctx->dev, struct uld_ctx, dev);
 126}
 127
 128static inline int is_ofld_imm(const struct sk_buff *skb)
 129{
 130	return (skb->len <= SGE_MAX_WR_LEN);
 131}
 132
 133static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
 134{
 135	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
 136}
 137
 138static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
 139			 unsigned int entlen,
 140			 unsigned int skip)
 141{
 142	int nents = 0;
 143	unsigned int less;
 144	unsigned int skip_len = 0;
 145
 146	while (sg && skip) {
 147		if (sg_dma_len(sg) <= skip) {
 148			skip -= sg_dma_len(sg);
 149			skip_len = 0;
 150			sg = sg_next(sg);
 151		} else {
 152			skip_len = skip;
 153			skip = 0;
 154		}
 155	}
 156
 157	while (sg && reqlen) {
 158		less = min(reqlen, sg_dma_len(sg) - skip_len);
 159		nents += DIV_ROUND_UP(less, entlen);
 160		reqlen -= less;
 161		skip_len = 0;
 162		sg = sg_next(sg);
 163	}
 164	return nents;
 165}
 166
 167static inline int get_aead_subtype(struct crypto_aead *aead)
 168{
 169	struct aead_alg *alg = crypto_aead_alg(aead);
 170	struct chcr_alg_template *chcr_crypto_alg =
 171		container_of(alg, struct chcr_alg_template, alg.aead);
 172	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 173}
 174
 175void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
 176{
 177	u8 temp[SHA512_DIGEST_SIZE];
 178	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 179	int authsize = crypto_aead_authsize(tfm);
 180	struct cpl_fw6_pld *fw6_pld;
 181	int cmp = 0;
 182
 183	fw6_pld = (struct cpl_fw6_pld *)input;
 184	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
 185	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
 186		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
 187	} else {
 188
 189		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
 190				authsize, req->assoclen +
 191				req->cryptlen - authsize);
 192		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
 193	}
 194	if (cmp)
 195		*err = -EBADMSG;
 196	else
 197		*err = 0;
 198}
 199
 200static int chcr_inc_wrcount(struct chcr_dev *dev)
 201{
 202	if (dev->state == CHCR_DETACH)
 203		return 1;
 204	atomic_inc(&dev->inflight);
 205	return 0;
 206}
 207
 208static inline void chcr_dec_wrcount(struct chcr_dev *dev)
 209{
 210	atomic_dec(&dev->inflight);
 211}
 212
 213static inline int chcr_handle_aead_resp(struct aead_request *req,
 214					 unsigned char *input,
 215					 int err)
 216{
 217	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
 218	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 219	struct chcr_dev *dev = a_ctx(tfm)->dev;
 220
 221	chcr_aead_common_exit(req);
 
 
 
 222	if (reqctx->verify == VERIFY_SW) {
 223		chcr_verify_tag(req, input, &err);
 224		reqctx->verify = VERIFY_HW;
 225	}
 226	chcr_dec_wrcount(dev);
 227	req->base.complete(&req->base, err);
 228
 229	return err;
 230}
 231
 232static void get_aes_decrypt_key(unsigned char *dec_key,
 233				       const unsigned char *key,
 234				       unsigned int keylength)
 235{
 236	u32 temp;
 237	u32 w_ring[MAX_NK];
 238	int i, j, k;
 239	u8  nr, nk;
 240
 241	switch (keylength) {
 242	case AES_KEYLENGTH_128BIT:
 243		nk = KEYLENGTH_4BYTES;
 244		nr = NUMBER_OF_ROUNDS_10;
 245		break;
 246	case AES_KEYLENGTH_192BIT:
 247		nk = KEYLENGTH_6BYTES;
 248		nr = NUMBER_OF_ROUNDS_12;
 249		break;
 250	case AES_KEYLENGTH_256BIT:
 251		nk = KEYLENGTH_8BYTES;
 252		nr = NUMBER_OF_ROUNDS_14;
 253		break;
 254	default:
 255		return;
 256	}
 257	for (i = 0; i < nk; i++)
 258		w_ring[i] = get_unaligned_be32(&key[i * 4]);
 259
 260	i = 0;
 261	temp = w_ring[nk - 1];
 262	while (i + nk < (nr + 1) * 4) {
 263		if (!(i % nk)) {
 264			/* RotWord(temp) */
 265			temp = (temp << 8) | (temp >> 24);
 266			temp = aes_ks_subword(temp);
 267			temp ^= round_constant[i / nk];
 268		} else if (nk == 8 && (i % 4 == 0)) {
 269			temp = aes_ks_subword(temp);
 270		}
 271		w_ring[i % nk] ^= temp;
 272		temp = w_ring[i % nk];
 273		i++;
 274	}
 275	i--;
 276	for (k = 0, j = i % nk; k < nk; k++) {
 277		put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
 278		j--;
 279		if (j < 0)
 280			j += nk;
 281	}
 282}
 283
 284static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
 285{
 286	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
 287
 288	switch (ds) {
 289	case SHA1_DIGEST_SIZE:
 290		base_hash = crypto_alloc_shash("sha1", 0, 0);
 291		break;
 292	case SHA224_DIGEST_SIZE:
 293		base_hash = crypto_alloc_shash("sha224", 0, 0);
 294		break;
 295	case SHA256_DIGEST_SIZE:
 296		base_hash = crypto_alloc_shash("sha256", 0, 0);
 297		break;
 298	case SHA384_DIGEST_SIZE:
 299		base_hash = crypto_alloc_shash("sha384", 0, 0);
 300		break;
 301	case SHA512_DIGEST_SIZE:
 302		base_hash = crypto_alloc_shash("sha512", 0, 0);
 303		break;
 304	}
 305
 306	return base_hash;
 307}
 308
 309static int chcr_compute_partial_hash(struct shash_desc *desc,
 310				     char *iopad, char *result_hash,
 311				     int digest_size)
 312{
 313	struct sha1_state sha1_st;
 314	struct sha256_state sha256_st;
 315	struct sha512_state sha512_st;
 316	int error;
 317
 318	if (digest_size == SHA1_DIGEST_SIZE) {
 319		error = crypto_shash_init(desc) ?:
 320			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
 321			crypto_shash_export(desc, (void *)&sha1_st);
 322		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
 323	} else if (digest_size == SHA224_DIGEST_SIZE) {
 324		error = crypto_shash_init(desc) ?:
 325			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 326			crypto_shash_export(desc, (void *)&sha256_st);
 327		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 328
 329	} else if (digest_size == SHA256_DIGEST_SIZE) {
 330		error = crypto_shash_init(desc) ?:
 331			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 332			crypto_shash_export(desc, (void *)&sha256_st);
 333		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 334
 335	} else if (digest_size == SHA384_DIGEST_SIZE) {
 336		error = crypto_shash_init(desc) ?:
 337			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 338			crypto_shash_export(desc, (void *)&sha512_st);
 339		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 340
 341	} else if (digest_size == SHA512_DIGEST_SIZE) {
 342		error = crypto_shash_init(desc) ?:
 343			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 344			crypto_shash_export(desc, (void *)&sha512_st);
 345		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 346	} else {
 347		error = -EINVAL;
 348		pr_err("Unknown digest size %d\n", digest_size);
 349	}
 350	return error;
 351}
 352
 353static void chcr_change_order(char *buf, int ds)
 354{
 355	int i;
 356
 357	if (ds == SHA512_DIGEST_SIZE) {
 358		for (i = 0; i < (ds / sizeof(u64)); i++)
 359			*((__be64 *)buf + i) =
 360				cpu_to_be64(*((u64 *)buf + i));
 361	} else {
 362		for (i = 0; i < (ds / sizeof(u32)); i++)
 363			*((__be32 *)buf + i) =
 364				cpu_to_be32(*((u32 *)buf + i));
 365	}
 366}
 367
 368static inline int is_hmac(struct crypto_tfm *tfm)
 369{
 370	struct crypto_alg *alg = tfm->__crt_alg;
 371	struct chcr_alg_template *chcr_crypto_alg =
 372		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
 373			     alg.hash);
 374	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
 375		return 1;
 376	return 0;
 377}
 378
 379static inline void dsgl_walk_init(struct dsgl_walk *walk,
 380				   struct cpl_rx_phys_dsgl *dsgl)
 381{
 382	walk->dsgl = dsgl;
 383	walk->nents = 0;
 384	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
 385}
 386
 387static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
 388				 int pci_chan_id)
 389{
 390	struct cpl_rx_phys_dsgl *phys_cpl;
 391
 392	phys_cpl = walk->dsgl;
 393
 394	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
 395				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
 396	phys_cpl->pcirlxorder_to_noofsgentr =
 397		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
 398		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
 399		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
 400		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
 401		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
 402		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
 403	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
 404	phys_cpl->rss_hdr_int.qid = htons(qid);
 405	phys_cpl->rss_hdr_int.hash_val = 0;
 406	phys_cpl->rss_hdr_int.channel = pci_chan_id;
 407}
 408
 409static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
 410					size_t size,
 411					dma_addr_t addr)
 412{
 413	int j;
 414
 415	if (!size)
 416		return;
 417	j = walk->nents;
 418	walk->to->len[j % 8] = htons(size);
 419	walk->to->addr[j % 8] = cpu_to_be64(addr);
 420	j++;
 421	if ((j % 8) == 0)
 422		walk->to++;
 423	walk->nents = j;
 424}
 425
 426static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
 427			   struct scatterlist *sg,
 428			      unsigned int slen,
 429			      unsigned int skip)
 430{
 431	int skip_len = 0;
 432	unsigned int left_size = slen, len = 0;
 433	unsigned int j = walk->nents;
 434	int offset, ent_len;
 435
 436	if (!slen)
 437		return;
 438	while (sg && skip) {
 439		if (sg_dma_len(sg) <= skip) {
 440			skip -= sg_dma_len(sg);
 441			skip_len = 0;
 442			sg = sg_next(sg);
 443		} else {
 444			skip_len = skip;
 445			skip = 0;
 446		}
 447	}
 448
 449	while (left_size && sg) {
 450		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 451		offset = 0;
 452		while (len) {
 453			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
 454			walk->to->len[j % 8] = htons(ent_len);
 455			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
 456						      offset + skip_len);
 457			offset += ent_len;
 458			len -= ent_len;
 459			j++;
 460			if ((j % 8) == 0)
 461				walk->to++;
 462		}
 463		walk->last_sg = sg;
 464		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
 465					  skip_len) + skip_len;
 466		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 467		skip_len = 0;
 468		sg = sg_next(sg);
 469	}
 470	walk->nents = j;
 471}
 472
 473static inline void ulptx_walk_init(struct ulptx_walk *walk,
 474				   struct ulptx_sgl *ulp)
 475{
 476	walk->sgl = ulp;
 477	walk->nents = 0;
 478	walk->pair_idx = 0;
 479	walk->pair = ulp->sge;
 480	walk->last_sg = NULL;
 481	walk->last_sg_len = 0;
 482}
 483
 484static inline void ulptx_walk_end(struct ulptx_walk *walk)
 485{
 486	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
 487			      ULPTX_NSGE_V(walk->nents));
 488}
 489
 490
 491static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
 492					size_t size,
 493					dma_addr_t addr)
 494{
 495	if (!size)
 496		return;
 497
 498	if (walk->nents == 0) {
 499		walk->sgl->len0 = cpu_to_be32(size);
 500		walk->sgl->addr0 = cpu_to_be64(addr);
 501	} else {
 502		walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
 503		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
 504		walk->pair_idx = !walk->pair_idx;
 505		if (!walk->pair_idx)
 506			walk->pair++;
 507	}
 508	walk->nents++;
 509}
 510
 511static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
 512					struct scatterlist *sg,
 513			       unsigned int len,
 514			       unsigned int skip)
 515{
 516	int small;
 517	int skip_len = 0;
 518	unsigned int sgmin;
 519
 520	if (!len)
 521		return;
 522	while (sg && skip) {
 523		if (sg_dma_len(sg) <= skip) {
 524			skip -= sg_dma_len(sg);
 525			skip_len = 0;
 526			sg = sg_next(sg);
 527		} else {
 528			skip_len = skip;
 529			skip = 0;
 530		}
 531	}
 532	WARN(!sg, "SG should not be null here\n");
 533	if (sg && (walk->nents == 0)) {
 534		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
 535		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 536		walk->sgl->len0 = cpu_to_be32(sgmin);
 537		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
 538		walk->nents++;
 539		len -= sgmin;
 540		walk->last_sg = sg;
 541		walk->last_sg_len = sgmin + skip_len;
 542		skip_len += sgmin;
 543		if (sg_dma_len(sg) == skip_len) {
 544			sg = sg_next(sg);
 545			skip_len = 0;
 546		}
 547	}
 548
 549	while (sg && len) {
 550		small = min(sg_dma_len(sg) - skip_len, len);
 551		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 552		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
 553		walk->pair->addr[walk->pair_idx] =
 554			cpu_to_be64(sg_dma_address(sg) + skip_len);
 555		walk->pair_idx = !walk->pair_idx;
 556		walk->nents++;
 557		if (!walk->pair_idx)
 558			walk->pair++;
 559		len -= sgmin;
 560		skip_len += sgmin;
 561		walk->last_sg = sg;
 562		walk->last_sg_len = skip_len;
 563		if (sg_dma_len(sg) == skip_len) {
 564			sg = sg_next(sg);
 565			skip_len = 0;
 566		}
 567	}
 568}
 569
 570static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
 571{
 572	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 573	struct chcr_alg_template *chcr_crypto_alg =
 574		container_of(alg, struct chcr_alg_template, alg.skcipher);
 575
 576	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 577}
 578
 579static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
 580{
 581	struct adapter *adap = netdev2adap(dev);
 582	struct sge_uld_txq_info *txq_info =
 583		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
 584	struct sge_uld_txq *txq;
 585	int ret = 0;
 586
 587	local_bh_disable();
 588	txq = &txq_info->uldtxq[idx];
 589	spin_lock(&txq->sendq.lock);
 590	if (txq->full)
 591		ret = -1;
 592	spin_unlock(&txq->sendq.lock);
 593	local_bh_enable();
 594	return ret;
 595}
 596
 597static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
 598			       struct _key_ctx *key_ctx)
 599{
 600	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
 601		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
 602	} else {
 603		memcpy(key_ctx->key,
 604		       ablkctx->key + (ablkctx->enckey_len >> 1),
 605		       ablkctx->enckey_len >> 1);
 606		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
 607		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
 608	}
 609	return 0;
 610}
 611
 612static int chcr_hash_ent_in_wr(struct scatterlist *src,
 613			     unsigned int minsg,
 614			     unsigned int space,
 615			     unsigned int srcskip)
 616{
 617	int srclen = 0;
 618	int srcsg = minsg;
 619	int soffset = 0, sless;
 620
 621	if (sg_dma_len(src) == srcskip) {
 622		src = sg_next(src);
 623		srcskip = 0;
 624	}
 625	while (src && space > (sgl_ent_len[srcsg + 1])) {
 626		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
 627							CHCR_SRC_SG_SIZE);
 628		srclen += sless;
 629		soffset += sless;
 630		srcsg++;
 631		if (sg_dma_len(src) == (soffset + srcskip)) {
 632			src = sg_next(src);
 633			soffset = 0;
 634			srcskip = 0;
 635		}
 636	}
 637	return srclen;
 638}
 639
 640static int chcr_sg_ent_in_wr(struct scatterlist *src,
 641			     struct scatterlist *dst,
 642			     unsigned int minsg,
 643			     unsigned int space,
 644			     unsigned int srcskip,
 645			     unsigned int dstskip)
 646{
 647	int srclen = 0, dstlen = 0;
 648	int srcsg = minsg, dstsg = minsg;
 649	int offset = 0, soffset = 0, less, sless = 0;
 650
 651	if (sg_dma_len(src) == srcskip) {
 652		src = sg_next(src);
 653		srcskip = 0;
 654	}
 
 655	if (sg_dma_len(dst) == dstskip) {
 656		dst = sg_next(dst);
 657		dstskip = 0;
 658	}
 659
 660	while (src && dst &&
 661	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
 662		sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
 663				CHCR_SRC_SG_SIZE);
 664		srclen += sless;
 665		srcsg++;
 666		offset = 0;
 667		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
 668		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
 669			if (srclen <= dstlen)
 670				break;
 671			less = min_t(unsigned int, sg_dma_len(dst) - offset -
 672				     dstskip, CHCR_DST_SG_SIZE);
 673			dstlen += less;
 674			offset += less;
 675			if ((offset + dstskip) == sg_dma_len(dst)) {
 676				dst = sg_next(dst);
 677				offset = 0;
 678			}
 679			dstsg++;
 680			dstskip = 0;
 681		}
 682		soffset += sless;
 683		if ((soffset + srcskip) == sg_dma_len(src)) {
 684			src = sg_next(src);
 685			srcskip = 0;
 686			soffset = 0;
 687		}
 688
 689	}
 690	return min(srclen, dstlen);
 691}
 692
 693static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
 694				struct skcipher_request *req,
 
 
 
 695				u8 *iv,
 696				unsigned short op_type)
 697{
 698	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
 699	int err;
 700
 701	skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
 702	skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
 703				      req->base.complete, req->base.data);
 704	skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
 705				   req->cryptlen, iv);
 706
 707	err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
 708			crypto_skcipher_encrypt(&reqctx->fallback_req);
 
 709
 710	return err;
 711
 712}
 713
 714static inline int get_qidxs(struct crypto_async_request *req,
 715			    unsigned int *txqidx, unsigned int *rxqidx)
 716{
 717	struct crypto_tfm *tfm = req->tfm;
 718	int ret = 0;
 719
 720	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
 721	case CRYPTO_ALG_TYPE_AEAD:
 722	{
 723		struct aead_request *aead_req =
 724			container_of(req, struct aead_request, base);
 725		struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
 726		*txqidx = reqctx->txqidx;
 727		*rxqidx = reqctx->rxqidx;
 728		break;
 729	}
 730	case CRYPTO_ALG_TYPE_SKCIPHER:
 731	{
 732		struct skcipher_request *sk_req =
 733			container_of(req, struct skcipher_request, base);
 734		struct chcr_skcipher_req_ctx *reqctx =
 735			skcipher_request_ctx(sk_req);
 736		*txqidx = reqctx->txqidx;
 737		*rxqidx = reqctx->rxqidx;
 738		break;
 739	}
 740	case CRYPTO_ALG_TYPE_AHASH:
 741	{
 742		struct ahash_request *ahash_req =
 743			container_of(req, struct ahash_request, base);
 744		struct chcr_ahash_req_ctx *reqctx =
 745			ahash_request_ctx(ahash_req);
 746		*txqidx = reqctx->txqidx;
 747		*rxqidx = reqctx->rxqidx;
 748		break;
 749	}
 750	default:
 751		ret = -EINVAL;
 752		/* should never get here */
 753		BUG();
 754		break;
 755	}
 756	return ret;
 757}
 758
 759static inline void create_wreq(struct chcr_context *ctx,
 760			       struct chcr_wr *chcr_req,
 761			       struct crypto_async_request *req,
 762			       unsigned int imm,
 763			       int hash_sz,
 764			       unsigned int len16,
 765			       unsigned int sc_len,
 766			       unsigned int lcb)
 767{
 768	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 769	unsigned int tx_channel_id, rx_channel_id;
 770	unsigned int txqidx = 0, rxqidx = 0;
 771	unsigned int qid, fid;
 772
 773	get_qidxs(req, &txqidx, &rxqidx);
 774	qid = u_ctx->lldi.rxq_ids[rxqidx];
 775	fid = u_ctx->lldi.rxq_ids[0];
 776	tx_channel_id = txqidx / ctx->txq_perchan;
 777	rx_channel_id = rxqidx / ctx->rxq_perchan;
 778
 779
 780	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
 781	chcr_req->wreq.pld_size_hash_size =
 782		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
 783	chcr_req->wreq.len16_pkd =
 784		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
 785	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
 786	chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
 787							    !!lcb, txqidx);
 
 788
 789	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
 
 790	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
 791				((sizeof(chcr_req->wreq)) >> 4)));
 
 792	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
 793	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
 794					   sizeof(chcr_req->key_ctx) + sc_len);
 795}
 796
 797/**
 798 *	create_cipher_wr - form the WR for cipher operations
 799 *	@req: cipher req.
 800 *	@ctx: crypto driver context of the request.
 801 *	@qid: ingress qid where response of this WR should be received.
 802 *	@op_type:	encryption or decryption
 803 */
 804static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
 805{
 806	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
 807	struct chcr_context *ctx = c_ctx(tfm);
 808	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
 809	struct sk_buff *skb = NULL;
 810	struct chcr_wr *chcr_req;
 811	struct cpl_rx_phys_dsgl *phys_cpl;
 812	struct ulptx_sgl *ulptx;
 813	struct chcr_skcipher_req_ctx *reqctx =
 814		skcipher_request_ctx(wrparam->req);
 815	unsigned int temp = 0, transhdr_len, dst_size;
 816	int error;
 817	int nents;
 818	unsigned int kctx_len;
 819	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 820			GFP_KERNEL : GFP_ATOMIC;
 821	struct adapter *adap = padap(ctx->dev);
 822	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
 823
 824	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
 825			      reqctx->dst_ofst);
 826	dst_size = get_space_for_phys_dsgl(nents);
 827	kctx_len = roundup(ablkctx->enckey_len, 16);
 828	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
 829	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
 830				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
 831	temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
 832				     (sgl_len(nents) * 8);
 833	transhdr_len += temp;
 834	transhdr_len = roundup(transhdr_len, 16);
 835	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
 836	if (!skb) {
 837		error = -ENOMEM;
 838		goto err;
 839	}
 840	chcr_req = __skb_put_zero(skb, transhdr_len);
 841	chcr_req->sec_cpl.op_ivinsrtofst =
 842			FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
 843
 844	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
 845	chcr_req->sec_cpl.aadstart_cipherstop_hi =
 846			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
 847
 848	chcr_req->sec_cpl.cipherstop_lo_authinsert =
 849			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
 850	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
 851							 ablkctx->ciph_mode,
 852							 0, 0, IV >> 1);
 853	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
 854							  0, 1, dst_size);
 855
 856	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
 857	if ((reqctx->op == CHCR_DECRYPT_OP) &&
 858	    (!(get_cryptoalg_subtype(tfm) ==
 859	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
 860	    (!(get_cryptoalg_subtype(tfm) ==
 861	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
 862		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
 863	} else {
 864		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
 865		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
 866			memcpy(chcr_req->key_ctx.key, ablkctx->key,
 867			       ablkctx->enckey_len);
 868		} else {
 869			memcpy(chcr_req->key_ctx.key, ablkctx->key +
 870			       (ablkctx->enckey_len >> 1),
 871			       ablkctx->enckey_len >> 1);
 872			memcpy(chcr_req->key_ctx.key +
 873			       (ablkctx->enckey_len >> 1),
 874			       ablkctx->key,
 875			       ablkctx->enckey_len >> 1);
 876		}
 877	}
 878	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
 879	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
 880	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
 881	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
 882
 883	atomic_inc(&adap->chcr_stats.cipher_rqst);
 884	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
 885		+ (reqctx->imm ? (wrparam->bytes) : 0);
 886	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
 887		    transhdr_len, temp,
 888			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
 889	reqctx->skb = skb;
 890
 891	if (reqctx->op && (ablkctx->ciph_mode ==
 892			   CHCR_SCMD_CIPHER_MODE_AES_CBC))
 893		sg_pcopy_to_buffer(wrparam->req->src,
 894			sg_nents(wrparam->req->src), wrparam->req->iv, 16,
 895			reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
 896
 897	return skb;
 898err:
 899	return ERR_PTR(error);
 900}
 901
 902static inline int chcr_keyctx_ck_size(unsigned int keylen)
 903{
 904	int ck_size = 0;
 905
 906	if (keylen == AES_KEYSIZE_128)
 907		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
 908	else if (keylen == AES_KEYSIZE_192)
 909		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
 910	else if (keylen == AES_KEYSIZE_256)
 911		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
 912	else
 913		ck_size = 0;
 914
 915	return ck_size;
 916}
 917static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
 918				       const u8 *key,
 919				       unsigned int keylen)
 920{
 
 921	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 
 922
 923	crypto_skcipher_clear_flags(ablkctx->sw_cipher,
 924				CRYPTO_TFM_REQ_MASK);
 925	crypto_skcipher_set_flags(ablkctx->sw_cipher,
 926				cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 927	return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
 
 
 
 
 928}
 929
 930static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
 931			       const u8 *key,
 932			       unsigned int keylen)
 933{
 934	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 935	unsigned int ck_size, context_size;
 936	u16 alignment = 0;
 937	int err;
 938
 939	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 940	if (err)
 941		goto badkey_err;
 942
 943	ck_size = chcr_keyctx_ck_size(keylen);
 944	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
 945	memcpy(ablkctx->key, key, keylen);
 946	ablkctx->enckey_len = keylen;
 947	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
 948	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 949			keylen + alignment) >> 4;
 950
 951	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 952						0, 0, context_size);
 953	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
 954	return 0;
 955badkey_err:
 
 956	ablkctx->enckey_len = 0;
 957
 958	return err;
 959}
 960
 961static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
 962				   const u8 *key,
 963				   unsigned int keylen)
 964{
 965	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 966	unsigned int ck_size, context_size;
 967	u16 alignment = 0;
 968	int err;
 969
 970	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 971	if (err)
 972		goto badkey_err;
 973	ck_size = chcr_keyctx_ck_size(keylen);
 974	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
 975	memcpy(ablkctx->key, key, keylen);
 976	ablkctx->enckey_len = keylen;
 977	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 978			keylen + alignment) >> 4;
 979
 980	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 981						0, 0, context_size);
 982	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
 983
 984	return 0;
 985badkey_err:
 
 986	ablkctx->enckey_len = 0;
 987
 988	return err;
 989}
 990
 991static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
 992				   const u8 *key,
 993				   unsigned int keylen)
 994{
 995	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 996	unsigned int ck_size, context_size;
 997	u16 alignment = 0;
 998	int err;
 999
1000	if (keylen < CTR_RFC3686_NONCE_SIZE)
1001		return -EINVAL;
1002	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
1003	       CTR_RFC3686_NONCE_SIZE);
1004
1005	keylen -= CTR_RFC3686_NONCE_SIZE;
1006	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1007	if (err)
1008		goto badkey_err;
1009
1010	ck_size = chcr_keyctx_ck_size(keylen);
1011	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1012	memcpy(ablkctx->key, key, keylen);
1013	ablkctx->enckey_len = keylen;
1014	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1015			keylen + alignment) >> 4;
1016
1017	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1018						0, 0, context_size);
1019	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1020
1021	return 0;
1022badkey_err:
 
1023	ablkctx->enckey_len = 0;
1024
1025	return err;
1026}
1027static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1028{
1029	unsigned int size = AES_BLOCK_SIZE;
1030	__be32 *b = (__be32 *)(dstiv + size);
1031	u32 c, prev;
1032
1033	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1034	for (; size >= 4; size -= 4) {
1035		prev = be32_to_cpu(*--b);
1036		c = prev + add;
1037		*b = cpu_to_be32(c);
1038		if (prev < c)
1039			break;
1040		add = 1;
1041	}
1042
1043}
1044
1045static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1046{
1047	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1048	u64 c;
1049	u32 temp = be32_to_cpu(*--b);
1050
1051	temp = ~temp;
1052	c = (u64)temp +  1; // No of block can processed without overflow
1053	if ((bytes / AES_BLOCK_SIZE) >= c)
1054		bytes = c * AES_BLOCK_SIZE;
1055	return bytes;
1056}
1057
1058static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1059			     u32 isfinal)
1060{
1061	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1062	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1063	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1064	struct crypto_aes_ctx aes;
1065	int ret, i;
1066	u8 *key;
1067	unsigned int keylen;
1068	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1069	int round8 = round / 8;
1070
 
1071	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1072
1073	keylen = ablkctx->enckey_len / 2;
1074	key = ablkctx->key + keylen;
1075	/* For a 192 bit key remove the padded zeroes which was
1076	 * added in chcr_xts_setkey
1077	 */
1078	if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1079			== CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1080		ret = aes_expandkey(&aes, key, keylen - 8);
1081	else
1082		ret = aes_expandkey(&aes, key, keylen);
1083	if (ret)
1084		return ret;
1085	aes_encrypt(&aes, iv, iv);
1086	for (i = 0; i < round8; i++)
1087		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1088
1089	for (i = 0; i < (round % 8); i++)
1090		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1091
1092	if (!isfinal)
1093		aes_decrypt(&aes, iv, iv);
1094
1095	memzero_explicit(&aes, sizeof(aes));
1096	return 0;
1097}
1098
1099static int chcr_update_cipher_iv(struct skcipher_request *req,
1100				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1101{
1102	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1103	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1104	int subtype = get_cryptoalg_subtype(tfm);
1105	int ret = 0;
1106
1107	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1108		ctr_add_iv(iv, req->iv, (reqctx->processed /
1109			   AES_BLOCK_SIZE));
1110	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1111		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1112			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1113						AES_BLOCK_SIZE) + 1);
1114	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1115		ret = chcr_update_tweak(req, iv, 0);
1116	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1117		if (reqctx->op)
1118			/*Updated before sending last WR*/
1119			memcpy(iv, req->iv, AES_BLOCK_SIZE);
1120		else
1121			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1122	}
1123
1124	return ret;
1125
1126}
1127
1128/* We need separate function for final iv because in rfc3686  Initial counter
1129 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1130 * for subsequent update requests
1131 */
1132
1133static int chcr_final_cipher_iv(struct skcipher_request *req,
1134				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1135{
1136	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1137	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1138	int subtype = get_cryptoalg_subtype(tfm);
1139	int ret = 0;
1140
1141	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1142		ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1143						       AES_BLOCK_SIZE));
1144	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1145		if (!reqctx->partial_req)
1146			memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1147		else
1148			ret = chcr_update_tweak(req, iv, 1);
1149	}
1150	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1151		/*Already updated for Decrypt*/
1152		if (!reqctx->op)
1153			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1154
1155	}
1156	return ret;
1157
1158}
1159
1160static int chcr_handle_cipher_resp(struct skcipher_request *req,
1161				   unsigned char *input, int err)
1162{
1163	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1164	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1165	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1166	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1167	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1168	struct chcr_dev *dev = c_ctx(tfm)->dev;
1169	struct chcr_context *ctx = c_ctx(tfm);
1170	struct adapter *adap = padap(ctx->dev);
1171	struct cipher_wr_param wrparam;
1172	struct sk_buff *skb;
 
 
 
1173	int bytes;
1174
1175	if (err)
1176		goto unmap;
1177	if (req->cryptlen == reqctx->processed) {
1178		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1179				      req);
1180		err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1181		goto complete;
1182	}
1183
 
 
 
 
 
 
 
 
1184	if (!reqctx->imm) {
1185		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1186					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1187					  reqctx->src_ofst, reqctx->dst_ofst);
1188		if ((bytes + reqctx->processed) >= req->cryptlen)
1189			bytes  = req->cryptlen - reqctx->processed;
1190		else
1191			bytes = rounddown(bytes, 16);
1192	} else {
1193		/*CTR mode counter overfloa*/
1194		bytes  = req->cryptlen - reqctx->processed;
1195	}
 
 
1196	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
 
 
1197	if (err)
1198		goto unmap;
1199
1200	if (unlikely(bytes == 0)) {
1201		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1202				      req);
1203		memcpy(req->iv, reqctx->init_iv, IV);
1204		atomic_inc(&adap->chcr_stats.fallback);
1205		err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1206					   reqctx->op);
 
 
 
1207		goto complete;
1208	}
1209
1210	if (get_cryptoalg_subtype(tfm) ==
1211	    CRYPTO_ALG_SUB_TYPE_CTR)
1212		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1213	wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1214	wrparam.req = req;
1215	wrparam.bytes = bytes;
1216	skb = create_cipher_wr(&wrparam);
1217	if (IS_ERR(skb)) {
1218		pr_err("%s : Failed to form WR. No memory\n", __func__);
1219		err = PTR_ERR(skb);
1220		goto unmap;
1221	}
1222	skb->dev = u_ctx->lldi.ports[0];
1223	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1224	chcr_send_wr(skb);
1225	reqctx->last_req_len = bytes;
1226	reqctx->processed += bytes;
1227	if (get_cryptoalg_subtype(tfm) ==
1228		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1229			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1230		complete(&ctx->cbc_aes_aio_done);
1231	}
1232	return 0;
1233unmap:
1234	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1235complete:
1236	if (get_cryptoalg_subtype(tfm) ==
1237		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1238			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1239		complete(&ctx->cbc_aes_aio_done);
1240	}
1241	chcr_dec_wrcount(dev);
1242	req->base.complete(&req->base, err);
1243	return err;
1244}
1245
1246static int process_cipher(struct skcipher_request *req,
1247				  unsigned short qid,
1248				  struct sk_buff **skb,
1249				  unsigned short op_type)
1250{
1251	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1252	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1253	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1254	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1255	struct adapter *adap = padap(c_ctx(tfm)->dev);
1256	struct	cipher_wr_param wrparam;
1257	int bytes, err = -EINVAL;
1258	int subtype;
1259
1260	reqctx->processed = 0;
1261	reqctx->partial_req = 0;
1262	if (!req->iv)
1263		goto error;
1264	subtype = get_cryptoalg_subtype(tfm);
1265	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1266	    (req->cryptlen == 0) ||
1267	    (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1268		if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1269			goto fallback;
1270		else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1271			 subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1272			goto fallback;
1273		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1274		       ablkctx->enckey_len, req->cryptlen, ivsize);
1275		goto error;
1276	}
1277
1278	err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1279	if (err)
1280		goto error;
1281	if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1282					    AES_MIN_KEY_SIZE +
1283					    sizeof(struct cpl_rx_phys_dsgl) +
1284					/*Min dsgl size*/
1285					    32))) {
1286		/* Can be sent as Imm*/
1287		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1288
1289		dnents = sg_nents_xlen(req->dst, req->cryptlen,
1290				       CHCR_DST_SG_SIZE, 0);
 
1291		phys_dsgl = get_space_for_phys_dsgl(dnents);
1292		kctx_len = roundup(ablkctx->enckey_len, 16);
1293		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1294		reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1295			SGE_MAX_WR_LEN;
1296		bytes = IV + req->cryptlen;
1297
1298	} else {
1299		reqctx->imm = 0;
1300	}
1301
1302	if (!reqctx->imm) {
1303		bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
 
1304					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1305					  0, 0);
1306		if ((bytes + reqctx->processed) >= req->cryptlen)
1307			bytes  = req->cryptlen - reqctx->processed;
1308		else
1309			bytes = rounddown(bytes, 16);
1310	} else {
1311		bytes = req->cryptlen;
1312	}
1313	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1314		bytes = adjust_ctr_overflow(req->iv, bytes);
 
1315	}
1316	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
 
1317		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1318		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1319				CTR_RFC3686_IV_SIZE);
1320
1321		/* initialize counter portion of counter block */
1322		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1323			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1324		memcpy(reqctx->init_iv, reqctx->iv, IV);
1325
1326	} else {
1327
1328		memcpy(reqctx->iv, req->iv, IV);
1329		memcpy(reqctx->init_iv, req->iv, IV);
1330	}
1331	if (unlikely(bytes == 0)) {
1332		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1333				      req);
1334fallback:       atomic_inc(&adap->chcr_stats.fallback);
1335		err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
1336					   subtype ==
1337					   CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1338					   reqctx->iv : req->iv,
 
1339					   op_type);
1340		goto error;
1341	}
1342	reqctx->op = op_type;
1343	reqctx->srcsg = req->src;
1344	reqctx->dstsg = req->dst;
1345	reqctx->src_ofst = 0;
1346	reqctx->dst_ofst = 0;
1347	wrparam.qid = qid;
1348	wrparam.req = req;
1349	wrparam.bytes = bytes;
1350	*skb = create_cipher_wr(&wrparam);
1351	if (IS_ERR(*skb)) {
1352		err = PTR_ERR(*skb);
1353		goto unmap;
1354	}
1355	reqctx->processed = bytes;
1356	reqctx->last_req_len = bytes;
1357	reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1358
1359	return 0;
1360unmap:
1361	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1362error:
1363	return err;
1364}
1365
1366static int chcr_aes_encrypt(struct skcipher_request *req)
1367{
1368	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1369	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1370	struct chcr_dev *dev = c_ctx(tfm)->dev;
1371	struct sk_buff *skb = NULL;
1372	int err;
1373	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1374	struct chcr_context *ctx = c_ctx(tfm);
1375	unsigned int cpu;
1376
1377	cpu = get_cpu();
1378	reqctx->txqidx = cpu % ctx->ntxq;
1379	reqctx->rxqidx = cpu % ctx->nrxq;
1380	put_cpu();
1381
1382	err = chcr_inc_wrcount(dev);
1383	if (err)
1384		return -ENXIO;
1385	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1386						reqctx->txqidx) &&
1387		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1388			err = -ENOSPC;
1389			goto error;
1390	}
1391
1392	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1393			     &skb, CHCR_ENCRYPT_OP);
1394	if (err || !skb)
1395		return  err;
1396	skb->dev = u_ctx->lldi.ports[0];
1397	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1398	chcr_send_wr(skb);
1399	if (get_cryptoalg_subtype(tfm) ==
1400		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1401			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1402			reqctx->partial_req = 1;
1403			wait_for_completion(&ctx->cbc_aes_aio_done);
1404        }
1405	return -EINPROGRESS;
1406error:
1407	chcr_dec_wrcount(dev);
1408	return err;
1409}
1410
1411static int chcr_aes_decrypt(struct skcipher_request *req)
1412{
1413	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1414	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1415	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1416	struct chcr_dev *dev = c_ctx(tfm)->dev;
1417	struct sk_buff *skb = NULL;
1418	int err;
1419	struct chcr_context *ctx = c_ctx(tfm);
1420	unsigned int cpu;
1421
1422	cpu = get_cpu();
1423	reqctx->txqidx = cpu % ctx->ntxq;
1424	reqctx->rxqidx = cpu % ctx->nrxq;
1425	put_cpu();
1426
1427	err = chcr_inc_wrcount(dev);
1428	if (err)
1429		return -ENXIO;
1430
1431	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1432						reqctx->txqidx) &&
1433		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1434			return -ENOSPC;
1435	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1436			     &skb, CHCR_DECRYPT_OP);
 
 
1437	if (err || !skb)
1438		return err;
1439	skb->dev = u_ctx->lldi.ports[0];
1440	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1441	chcr_send_wr(skb);
1442	return -EINPROGRESS;
1443}
 
1444static int chcr_device_init(struct chcr_context *ctx)
1445{
1446	struct uld_ctx *u_ctx = NULL;
1447	int txq_perchan, ntxq;
1448	int err = 0, rxq_perchan;
 
 
1449
 
1450	if (!ctx->dev) {
1451		u_ctx = assign_chcr_device();
1452		if (!u_ctx) {
1453			err = -ENXIO;
1454			pr_err("chcr device assignment fails\n");
1455			goto out;
1456		}
1457		ctx->dev = &u_ctx->dev;
1458		ntxq = u_ctx->lldi.ntxq;
 
 
1459		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1460		txq_perchan = ntxq / u_ctx->lldi.nchan;
1461		ctx->ntxq = ntxq;
1462		ctx->nrxq = u_ctx->lldi.nrxq;
1463		ctx->rxq_perchan = rxq_perchan;
1464		ctx->txq_perchan = txq_perchan;
 
 
 
 
 
 
1465	}
1466out:
1467	return err;
1468}
1469
1470static int chcr_init_tfm(struct crypto_skcipher *tfm)
1471{
1472	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1473	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1474	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1475
1476	ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1477				CRYPTO_ALG_NEED_FALLBACK);
1478	if (IS_ERR(ablkctx->sw_cipher)) {
1479		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1480		return PTR_ERR(ablkctx->sw_cipher);
1481	}
1482	init_completion(&ctx->cbc_aes_aio_done);
1483	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1484					 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1485
1486	return chcr_device_init(ctx);
 
 
 
 
 
 
 
 
 
 
 
1487}
1488
1489static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1490{
1491	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1492	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1493	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1494
1495	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1496	 * cannot be used as fallback in chcr_handle_cipher_response
1497	 */
1498	ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1499				CRYPTO_ALG_NEED_FALLBACK);
1500	if (IS_ERR(ablkctx->sw_cipher)) {
1501		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1502		return PTR_ERR(ablkctx->sw_cipher);
1503	}
1504	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1505				    crypto_skcipher_reqsize(ablkctx->sw_cipher));
1506	return chcr_device_init(ctx);
1507}
1508
1509
1510static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1511{
1512	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1513	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1514
1515	crypto_free_skcipher(ablkctx->sw_cipher);
 
 
1516}
1517
1518static int get_alg_config(struct algo_param *params,
1519			  unsigned int auth_size)
1520{
1521	switch (auth_size) {
1522	case SHA1_DIGEST_SIZE:
1523		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1524		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1525		params->result_size = SHA1_DIGEST_SIZE;
1526		break;
1527	case SHA224_DIGEST_SIZE:
1528		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1529		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1530		params->result_size = SHA256_DIGEST_SIZE;
1531		break;
1532	case SHA256_DIGEST_SIZE:
1533		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1534		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1535		params->result_size = SHA256_DIGEST_SIZE;
1536		break;
1537	case SHA384_DIGEST_SIZE:
1538		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1539		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1540		params->result_size = SHA512_DIGEST_SIZE;
1541		break;
1542	case SHA512_DIGEST_SIZE:
1543		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1544		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1545		params->result_size = SHA512_DIGEST_SIZE;
1546		break;
1547	default:
1548		pr_err("ERROR, unsupported digest size\n");
1549		return -EINVAL;
1550	}
1551	return 0;
1552}
1553
1554static inline void chcr_free_shash(struct crypto_shash *base_hash)
1555{
1556		crypto_free_shash(base_hash);
1557}
1558
1559/**
1560 *	create_hash_wr - Create hash work request
1561 *	@req - Cipher req base
1562 */
1563static struct sk_buff *create_hash_wr(struct ahash_request *req,
1564				      struct hash_wr_param *param)
1565{
1566	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1567	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1568	struct chcr_context *ctx = h_ctx(tfm);
1569	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1570	struct sk_buff *skb = NULL;
1571	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1572	struct chcr_wr *chcr_req;
1573	struct ulptx_sgl *ulptx;
1574	unsigned int nents = 0, transhdr_len;
1575	unsigned int temp = 0;
1576	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1577		GFP_ATOMIC;
1578	struct adapter *adap = padap(h_ctx(tfm)->dev);
1579	int error = 0;
1580	unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1581
1582	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1583	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1584				param->sg_len) <= SGE_MAX_WR_LEN;
1585	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1586		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1587	nents += param->bfr_len ? 1 : 0;
1588	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1589				param->sg_len, 16) : (sgl_len(nents) * 8);
1590	transhdr_len = roundup(transhdr_len, 16);
1591
1592	skb = alloc_skb(transhdr_len, flags);
1593	if (!skb)
1594		return ERR_PTR(-ENOMEM);
1595	chcr_req = __skb_put_zero(skb, transhdr_len);
1596
1597	chcr_req->sec_cpl.op_ivinsrtofst =
1598		FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1599
1600	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1601
1602	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1603		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1604	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1605		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1606	chcr_req->sec_cpl.seqno_numivs =
1607		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1608					 param->opad_needed, 0);
1609
1610	chcr_req->sec_cpl.ivgen_hdrlen =
1611		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1612
1613	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1614	       param->alg_prm.result_size);
1615
1616	if (param->opad_needed)
1617		memcpy(chcr_req->key_ctx.key +
1618		       ((param->alg_prm.result_size <= 32) ? 32 :
1619			CHCR_HASH_MAX_DIGEST_SIZE),
1620		       hmacctx->opad, param->alg_prm.result_size);
1621
1622	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1623					    param->alg_prm.mk_size, 0,
1624					    param->opad_needed,
1625					    ((param->kctx_len +
1626					     sizeof(chcr_req->key_ctx)) >> 4));
1627	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1628	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1629				     DUMMY_BYTES);
1630	if (param->bfr_len != 0) {
1631		req_ctx->hctx_wr.dma_addr =
1632			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1633				       param->bfr_len, DMA_TO_DEVICE);
1634		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1635				       req_ctx->hctx_wr. dma_addr)) {
1636			error = -ENOMEM;
1637			goto err;
1638		}
1639		req_ctx->hctx_wr.dma_len = param->bfr_len;
1640	} else {
1641		req_ctx->hctx_wr.dma_addr = 0;
1642	}
1643	chcr_add_hash_src_ent(req, ulptx, param);
1644	/* Request upto max wr size */
1645	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1646				(param->sg_len + param->bfr_len) : 0);
1647	atomic_inc(&adap->chcr_stats.digest_rqst);
1648	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1649		    param->hash_size, transhdr_len,
1650		    temp,  0);
1651	req_ctx->hctx_wr.skb = skb;
1652	return skb;
1653err:
1654	kfree_skb(skb);
1655	return  ERR_PTR(error);
1656}
1657
1658static int chcr_ahash_update(struct ahash_request *req)
1659{
1660	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1661	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1662	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1663	struct chcr_context *ctx = h_ctx(rtfm);
1664	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1665	struct sk_buff *skb;
1666	u8 remainder = 0, bs;
1667	unsigned int nbytes = req->nbytes;
1668	struct hash_wr_param params;
1669	int error;
1670	unsigned int cpu;
1671
1672	cpu = get_cpu();
1673	req_ctx->txqidx = cpu % ctx->ntxq;
1674	req_ctx->rxqidx = cpu % ctx->nrxq;
1675	put_cpu();
1676
1677	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
 
 
 
 
 
 
1678
1679	if (nbytes + req_ctx->reqlen >= bs) {
1680		remainder = (nbytes + req_ctx->reqlen) % bs;
1681		nbytes = nbytes + req_ctx->reqlen - remainder;
1682	} else {
1683		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1684				   + req_ctx->reqlen, nbytes, 0);
1685		req_ctx->reqlen += nbytes;
1686		return 0;
1687	}
1688	error = chcr_inc_wrcount(dev);
1689	if (error)
1690		return -ENXIO;
1691	/* Detach state for CHCR means lldi or padap is freed. Increasing
1692	 * inflight count for dev guarantees that lldi and padap is valid
1693	 */
1694	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1695						req_ctx->txqidx) &&
1696		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1697			error = -ENOSPC;
1698			goto err;
1699	}
1700
1701	chcr_init_hctx_per_wr(req_ctx);
1702	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1703	if (error) {
1704		error = -ENOMEM;
1705		goto err;
1706	}
1707	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1708	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1709	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1710				     HASH_SPACE_LEFT(params.kctx_len), 0);
1711	if (params.sg_len > req->nbytes)
1712		params.sg_len = req->nbytes;
1713	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1714			req_ctx->reqlen;
1715	params.opad_needed = 0;
1716	params.more = 1;
1717	params.last = 0;
1718	params.bfr_len = req_ctx->reqlen;
1719	params.scmd1 = 0;
1720	req_ctx->hctx_wr.srcsg = req->src;
1721
1722	params.hash_size = params.alg_prm.result_size;
1723	req_ctx->data_len += params.sg_len + params.bfr_len;
1724	skb = create_hash_wr(req, &params);
1725	if (IS_ERR(skb)) {
1726		error = PTR_ERR(skb);
1727		goto unmap;
1728	}
1729
1730	req_ctx->hctx_wr.processed += params.sg_len;
1731	if (remainder) {
1732		/* Swap buffers */
1733		swap(req_ctx->reqbfr, req_ctx->skbfr);
1734		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1735				   req_ctx->reqbfr, remainder, req->nbytes -
1736				   remainder);
1737	}
1738	req_ctx->reqlen = remainder;
1739	skb->dev = u_ctx->lldi.ports[0];
1740	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1741	chcr_send_wr(skb);
 
1742	return -EINPROGRESS;
1743unmap:
1744	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1745err:
1746	chcr_dec_wrcount(dev);
1747	return error;
1748}
1749
1750static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1751{
1752	memset(bfr_ptr, 0, bs);
1753	*bfr_ptr = 0x80;
1754	if (bs == 64)
1755		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1756	else
1757		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1758}
1759
1760static int chcr_ahash_final(struct ahash_request *req)
1761{
1762	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1763	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1764	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1765	struct hash_wr_param params;
1766	struct sk_buff *skb;
1767	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1768	struct chcr_context *ctx = h_ctx(rtfm);
1769	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1770	int error;
1771	unsigned int cpu;
1772
1773	cpu = get_cpu();
1774	req_ctx->txqidx = cpu % ctx->ntxq;
1775	req_ctx->rxqidx = cpu % ctx->nrxq;
1776	put_cpu();
1777
1778	error = chcr_inc_wrcount(dev);
1779	if (error)
1780		return -ENXIO;
1781
1782	chcr_init_hctx_per_wr(req_ctx);
 
1783	if (is_hmac(crypto_ahash_tfm(rtfm)))
1784		params.opad_needed = 1;
1785	else
1786		params.opad_needed = 0;
1787	params.sg_len = 0;
1788	req_ctx->hctx_wr.isfinal = 1;
1789	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1790	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1791	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1792		params.opad_needed = 1;
1793		params.kctx_len *= 2;
1794	} else {
1795		params.opad_needed = 0;
1796	}
1797
1798	req_ctx->hctx_wr.result = 1;
1799	params.bfr_len = req_ctx->reqlen;
1800	req_ctx->data_len += params.bfr_len + params.sg_len;
1801	req_ctx->hctx_wr.srcsg = req->src;
1802	if (req_ctx->reqlen == 0) {
1803		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1804		params.last = 0;
1805		params.more = 1;
1806		params.scmd1 = 0;
1807		params.bfr_len = bs;
1808
1809	} else {
1810		params.scmd1 = req_ctx->data_len;
1811		params.last = 1;
1812		params.more = 0;
1813	}
1814	params.hash_size = crypto_ahash_digestsize(rtfm);
1815	skb = create_hash_wr(req, &params);
1816	if (IS_ERR(skb)) {
1817		error = PTR_ERR(skb);
1818		goto err;
1819	}
1820	req_ctx->reqlen = 0;
1821	skb->dev = u_ctx->lldi.ports[0];
1822	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1823	chcr_send_wr(skb);
1824	return -EINPROGRESS;
1825err:
1826	chcr_dec_wrcount(dev);
1827	return error;
1828}
1829
1830static int chcr_ahash_finup(struct ahash_request *req)
1831{
1832	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1833	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1834	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1835	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1836	struct chcr_context *ctx = h_ctx(rtfm);
1837	struct sk_buff *skb;
1838	struct hash_wr_param params;
1839	u8  bs;
1840	int error;
1841	unsigned int cpu;
1842
1843	cpu = get_cpu();
1844	req_ctx->txqidx = cpu % ctx->ntxq;
1845	req_ctx->rxqidx = cpu % ctx->nrxq;
1846	put_cpu();
1847
1848	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1849	error = chcr_inc_wrcount(dev);
1850	if (error)
1851		return -ENXIO;
1852
1853	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1854						req_ctx->txqidx) &&
1855		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1856			error = -ENOSPC;
1857			goto err;
1858	}
1859	chcr_init_hctx_per_wr(req_ctx);
1860	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1861	if (error) {
1862		error = -ENOMEM;
1863		goto err;
1864	}
1865
1866	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1867	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1868	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1869		params.kctx_len *= 2;
1870		params.opad_needed = 1;
1871	} else {
1872		params.opad_needed = 0;
1873	}
1874
1875	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1876				    HASH_SPACE_LEFT(params.kctx_len), 0);
1877	if (params.sg_len < req->nbytes) {
1878		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1879			params.kctx_len /= 2;
1880			params.opad_needed = 0;
1881		}
1882		params.last = 0;
1883		params.more = 1;
1884		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1885					- req_ctx->reqlen;
1886		params.hash_size = params.alg_prm.result_size;
1887		params.scmd1 = 0;
1888	} else {
1889		params.last = 1;
1890		params.more = 0;
1891		params.sg_len = req->nbytes;
1892		params.hash_size = crypto_ahash_digestsize(rtfm);
1893		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1894				params.sg_len;
1895	}
1896	params.bfr_len = req_ctx->reqlen;
1897	req_ctx->data_len += params.bfr_len + params.sg_len;
1898	req_ctx->hctx_wr.result = 1;
1899	req_ctx->hctx_wr.srcsg = req->src;
1900	if ((req_ctx->reqlen + req->nbytes) == 0) {
1901		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1902		params.last = 0;
1903		params.more = 1;
1904		params.scmd1 = 0;
1905		params.bfr_len = bs;
1906	}
1907	skb = create_hash_wr(req, &params);
1908	if (IS_ERR(skb)) {
1909		error = PTR_ERR(skb);
1910		goto unmap;
1911	}
1912	req_ctx->reqlen = 0;
1913	req_ctx->hctx_wr.processed += params.sg_len;
1914	skb->dev = u_ctx->lldi.ports[0];
1915	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1916	chcr_send_wr(skb);
 
1917	return -EINPROGRESS;
1918unmap:
1919	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1920err:
1921	chcr_dec_wrcount(dev);
1922	return error;
1923}
1924
1925static int chcr_ahash_digest(struct ahash_request *req)
1926{
1927	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1928	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1929	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1930	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1931	struct chcr_context *ctx = h_ctx(rtfm);
1932	struct sk_buff *skb;
1933	struct hash_wr_param params;
1934	u8  bs;
1935	int error;
1936	unsigned int cpu;
1937
1938	cpu = get_cpu();
1939	req_ctx->txqidx = cpu % ctx->ntxq;
1940	req_ctx->rxqidx = cpu % ctx->nrxq;
1941	put_cpu();
1942
1943	rtfm->init(req);
1944	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1945	error = chcr_inc_wrcount(dev);
1946	if (error)
1947		return -ENXIO;
1948
 
1949	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1950						req_ctx->txqidx) &&
1951		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1952			error = -ENOSPC;
1953			goto err;
1954	}
1955
1956	chcr_init_hctx_per_wr(req_ctx);
1957	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1958	if (error) {
1959		error = -ENOMEM;
1960		goto err;
1961	}
1962
1963	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1964	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1965	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1966		params.kctx_len *= 2;
1967		params.opad_needed = 1;
1968	} else {
1969		params.opad_needed = 0;
1970	}
1971	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1972				HASH_SPACE_LEFT(params.kctx_len), 0);
1973	if (params.sg_len < req->nbytes) {
1974		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1975			params.kctx_len /= 2;
1976			params.opad_needed = 0;
1977		}
1978		params.last = 0;
1979		params.more = 1;
1980		params.scmd1 = 0;
1981		params.sg_len = rounddown(params.sg_len, bs);
1982		params.hash_size = params.alg_prm.result_size;
1983	} else {
1984		params.sg_len = req->nbytes;
1985		params.hash_size = crypto_ahash_digestsize(rtfm);
1986		params.last = 1;
1987		params.more = 0;
1988		params.scmd1 = req->nbytes + req_ctx->data_len;
1989
1990	}
1991	params.bfr_len = 0;
1992	req_ctx->hctx_wr.result = 1;
1993	req_ctx->hctx_wr.srcsg = req->src;
1994	req_ctx->data_len += params.bfr_len + params.sg_len;
1995
1996	if (req->nbytes == 0) {
1997		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1998		params.more = 1;
1999		params.bfr_len = bs;
2000	}
2001
2002	skb = create_hash_wr(req, &params);
2003	if (IS_ERR(skb)) {
2004		error = PTR_ERR(skb);
2005		goto unmap;
2006	}
2007	req_ctx->hctx_wr.processed += params.sg_len;
2008	skb->dev = u_ctx->lldi.ports[0];
2009	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
2010	chcr_send_wr(skb);
2011	return -EINPROGRESS;
2012unmap:
2013	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2014err:
2015	chcr_dec_wrcount(dev);
2016	return error;
2017}
2018
2019static int chcr_ahash_continue(struct ahash_request *req)
2020{
2021	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2022	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2023	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2024	struct chcr_context *ctx = h_ctx(rtfm);
2025	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2026	struct sk_buff *skb;
2027	struct hash_wr_param params;
2028	u8  bs;
2029	int error;
2030	unsigned int cpu;
2031
2032	cpu = get_cpu();
2033	reqctx->txqidx = cpu % ctx->ntxq;
2034	reqctx->rxqidx = cpu % ctx->nrxq;
2035	put_cpu();
2036
2037	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
 
 
 
 
 
 
2038	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
2039	params.kctx_len = roundup(params.alg_prm.result_size, 16);
2040	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2041		params.kctx_len *= 2;
2042		params.opad_needed = 1;
2043	} else {
2044		params.opad_needed = 0;
2045	}
2046	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2047					    HASH_SPACE_LEFT(params.kctx_len),
2048					    hctx_wr->src_ofst);
2049	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2050		params.sg_len = req->nbytes - hctx_wr->processed;
2051	if (!hctx_wr->result ||
2052	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2053		if (is_hmac(crypto_ahash_tfm(rtfm))) {
2054			params.kctx_len /= 2;
2055			params.opad_needed = 0;
2056		}
2057		params.last = 0;
2058		params.more = 1;
2059		params.sg_len = rounddown(params.sg_len, bs);
2060		params.hash_size = params.alg_prm.result_size;
2061		params.scmd1 = 0;
2062	} else {
2063		params.last = 1;
2064		params.more = 0;
2065		params.hash_size = crypto_ahash_digestsize(rtfm);
2066		params.scmd1 = reqctx->data_len + params.sg_len;
2067	}
2068	params.bfr_len = 0;
2069	reqctx->data_len += params.sg_len;
2070	skb = create_hash_wr(req, &params);
2071	if (IS_ERR(skb)) {
2072		error = PTR_ERR(skb);
2073		goto err;
2074	}
2075	hctx_wr->processed += params.sg_len;
2076	skb->dev = u_ctx->lldi.ports[0];
2077	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2078	chcr_send_wr(skb);
2079	return 0;
2080err:
2081	return error;
2082}
2083
2084static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2085					  unsigned char *input,
2086					  int err)
2087{
2088	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2089	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2090	int digestsize, updated_digestsize;
2091	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2092	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2093	struct chcr_dev *dev = h_ctx(tfm)->dev;
2094
2095	if (input == NULL)
2096		goto out;
2097	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2098	updated_digestsize = digestsize;
2099	if (digestsize == SHA224_DIGEST_SIZE)
2100		updated_digestsize = SHA256_DIGEST_SIZE;
2101	else if (digestsize == SHA384_DIGEST_SIZE)
2102		updated_digestsize = SHA512_DIGEST_SIZE;
2103
2104	if (hctx_wr->dma_addr) {
2105		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2106				 hctx_wr->dma_len, DMA_TO_DEVICE);
2107		hctx_wr->dma_addr = 0;
2108	}
2109	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2110				 req->nbytes)) {
2111		if (hctx_wr->result == 1) {
2112			hctx_wr->result = 0;
2113			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2114			       digestsize);
2115		} else {
2116			memcpy(reqctx->partial_hash,
2117			       input + sizeof(struct cpl_fw6_pld),
2118			       updated_digestsize);
2119
2120		}
2121		goto unmap;
2122	}
2123	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2124	       updated_digestsize);
2125
2126	err = chcr_ahash_continue(req);
2127	if (err)
2128		goto unmap;
2129	return;
2130unmap:
2131	if (hctx_wr->is_sg_map)
2132		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2133
2134
2135out:
2136	chcr_dec_wrcount(dev);
2137	req->base.complete(&req->base, err);
2138}
2139
2140/*
2141 *	chcr_handle_resp - Unmap the DMA buffers associated with the request
2142 *	@req: crypto request
2143 */
2144int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2145			 int err)
2146{
2147	struct crypto_tfm *tfm = req->tfm;
2148	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2149	struct adapter *adap = padap(ctx->dev);
2150
2151	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2152	case CRYPTO_ALG_TYPE_AEAD:
2153		err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2154		break;
2155
2156	case CRYPTO_ALG_TYPE_SKCIPHER:
2157		 chcr_handle_cipher_resp(skcipher_request_cast(req),
2158					       input, err);
2159		break;
 
2160	case CRYPTO_ALG_TYPE_AHASH:
2161		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2162		}
2163	atomic_inc(&adap->chcr_stats.complete);
2164	return err;
2165}
2166static int chcr_ahash_export(struct ahash_request *areq, void *out)
2167{
2168	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2169	struct chcr_ahash_req_ctx *state = out;
2170
2171	state->reqlen = req_ctx->reqlen;
2172	state->data_len = req_ctx->data_len;
2173	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2174	memcpy(state->partial_hash, req_ctx->partial_hash,
2175	       CHCR_HASH_MAX_DIGEST_SIZE);
2176	chcr_init_hctx_per_wr(state);
2177	return 0;
2178}
2179
2180static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2181{
2182	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2183	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2184
2185	req_ctx->reqlen = state->reqlen;
2186	req_ctx->data_len = state->data_len;
2187	req_ctx->reqbfr = req_ctx->bfr1;
2188	req_ctx->skbfr = req_ctx->bfr2;
2189	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2190	memcpy(req_ctx->partial_hash, state->partial_hash,
2191	       CHCR_HASH_MAX_DIGEST_SIZE);
2192	chcr_init_hctx_per_wr(req_ctx);
2193	return 0;
2194}
2195
2196static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2197			     unsigned int keylen)
2198{
2199	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2200	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2201	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2202	unsigned int i, err = 0, updated_digestsize;
2203
2204	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2205
2206	/* use the key to calculate the ipad and opad. ipad will sent with the
2207	 * first request's data. opad will be sent with the final hash result
2208	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2209	 */
2210	shash->tfm = hmacctx->base_hash;
 
2211	if (keylen > bs) {
2212		err = crypto_shash_digest(shash, key, keylen,
2213					  hmacctx->ipad);
2214		if (err)
2215			goto out;
2216		keylen = digestsize;
2217	} else {
2218		memcpy(hmacctx->ipad, key, keylen);
2219	}
2220	memset(hmacctx->ipad + keylen, 0, bs - keylen);
2221	memcpy(hmacctx->opad, hmacctx->ipad, bs);
2222
2223	for (i = 0; i < bs / sizeof(int); i++) {
2224		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2225		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2226	}
2227
2228	updated_digestsize = digestsize;
2229	if (digestsize == SHA224_DIGEST_SIZE)
2230		updated_digestsize = SHA256_DIGEST_SIZE;
2231	else if (digestsize == SHA384_DIGEST_SIZE)
2232		updated_digestsize = SHA512_DIGEST_SIZE;
2233	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2234					hmacctx->ipad, digestsize);
2235	if (err)
2236		goto out;
2237	chcr_change_order(hmacctx->ipad, updated_digestsize);
2238
2239	err = chcr_compute_partial_hash(shash, hmacctx->opad,
2240					hmacctx->opad, digestsize);
2241	if (err)
2242		goto out;
2243	chcr_change_order(hmacctx->opad, updated_digestsize);
2244out:
2245	return err;
2246}
2247
2248static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2249			       unsigned int key_len)
2250{
2251	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2252	unsigned short context_size = 0;
2253	int err;
2254
2255	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2256	if (err)
2257		goto badkey_err;
2258
2259	memcpy(ablkctx->key, key, key_len);
2260	ablkctx->enckey_len = key_len;
2261	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2262	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2263	/* Both keys for xts must be aligned to 16 byte boundary
2264	 * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2265	 */
2266	if (key_len == 48) {
2267		context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2268				+ 16) >> 4;
2269		memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2270		memset(ablkctx->key + 24, 0, 8);
2271		memset(ablkctx->key + 56, 0, 8);
2272		ablkctx->enckey_len = 64;
2273		ablkctx->key_ctx_hdr =
2274			FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2275					 CHCR_KEYCTX_NO_KEY, 1,
2276					 0, context_size);
2277	} else {
2278		ablkctx->key_ctx_hdr =
2279		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2280				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2281				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2282				 CHCR_KEYCTX_NO_KEY, 1,
2283				 0, context_size);
2284	}
2285	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2286	return 0;
2287badkey_err:
 
2288	ablkctx->enckey_len = 0;
2289
2290	return err;
2291}
2292
2293static int chcr_sha_init(struct ahash_request *areq)
2294{
2295	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2296	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2297	int digestsize =  crypto_ahash_digestsize(tfm);
2298
2299	req_ctx->data_len = 0;
2300	req_ctx->reqlen = 0;
2301	req_ctx->reqbfr = req_ctx->bfr1;
2302	req_ctx->skbfr = req_ctx->bfr2;
2303	copy_hash_init_values(req_ctx->partial_hash, digestsize);
2304
2305	return 0;
2306}
2307
2308static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2309{
2310	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2311				 sizeof(struct chcr_ahash_req_ctx));
2312	return chcr_device_init(crypto_tfm_ctx(tfm));
2313}
2314
2315static int chcr_hmac_init(struct ahash_request *areq)
2316{
2317	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2318	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2319	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2320	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2321	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2322
2323	chcr_sha_init(areq);
2324	req_ctx->data_len = bs;
2325	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2326		if (digestsize == SHA224_DIGEST_SIZE)
2327			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2328			       SHA256_DIGEST_SIZE);
2329		else if (digestsize == SHA384_DIGEST_SIZE)
2330			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2331			       SHA512_DIGEST_SIZE);
2332		else
2333			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2334			       digestsize);
2335	}
2336	return 0;
2337}
2338
2339static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2340{
2341	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2342	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2343	unsigned int digestsize =
2344		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2345
2346	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2347				 sizeof(struct chcr_ahash_req_ctx));
2348	hmacctx->base_hash = chcr_alloc_shash(digestsize);
2349	if (IS_ERR(hmacctx->base_hash))
2350		return PTR_ERR(hmacctx->base_hash);
2351	return chcr_device_init(crypto_tfm_ctx(tfm));
2352}
2353
2354static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2355{
2356	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2357	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2358
2359	if (hmacctx->base_hash) {
2360		chcr_free_shash(hmacctx->base_hash);
2361		hmacctx->base_hash = NULL;
2362	}
2363}
2364
2365inline void chcr_aead_common_exit(struct aead_request *req)
2366{
2367	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2368	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2369	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2370
2371	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2372}
2373
2374static int chcr_aead_common_init(struct aead_request *req)
2375{
2376	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2377	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2378	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2379	unsigned int authsize = crypto_aead_authsize(tfm);
2380	int error = -EINVAL;
 
2381
2382	/* validate key size */
2383	if (aeadctx->enckey_len == 0)
2384		goto err;
2385	if (reqctx->op && req->cryptlen < authsize)
2386		goto err;
2387	if (reqctx->b0_len)
2388		reqctx->scratch_pad = reqctx->iv + IV;
2389	else
2390		reqctx->scratch_pad = NULL;
2391
2392	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2393				  reqctx->op);
2394	if (error) {
2395		error = -ENOMEM;
2396		goto err;
2397	}
2398
 
 
 
2399	return 0;
2400err:
2401	return error;
2402}
2403
2404static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2405				   int aadmax, int wrlen,
2406				   unsigned short op_type)
2407{
2408	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2409
2410	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2411	    dst_nents > MAX_DSGL_ENT ||
2412	    (req->assoclen > aadmax) ||
2413	    (wrlen > SGE_MAX_WR_LEN))
2414		return 1;
2415	return 0;
2416}
2417
2418static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2419{
2420	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2421	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2422	struct aead_request *subreq = aead_request_ctx(req);
2423
2424	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2425	aead_request_set_callback(subreq, req->base.flags,
2426				  req->base.complete, req->base.data);
2427	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2428				 req->iv);
2429	aead_request_set_ad(subreq, req->assoclen);
2430	return op_type ? crypto_aead_decrypt(subreq) :
2431		crypto_aead_encrypt(subreq);
2432}
2433
2434static struct sk_buff *create_authenc_wr(struct aead_request *req,
2435					 unsigned short qid,
2436					 int size)
 
2437{
2438	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2439	struct chcr_context *ctx = a_ctx(tfm);
2440	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2441	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2442	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2443	struct sk_buff *skb = NULL;
2444	struct chcr_wr *chcr_req;
2445	struct cpl_rx_phys_dsgl *phys_cpl;
2446	struct ulptx_sgl *ulptx;
2447	unsigned int transhdr_len;
2448	unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2449	unsigned int   kctx_len = 0, dnents, snents;
 
2450	unsigned int  authsize = crypto_aead_authsize(tfm);
2451	int error = -EINVAL;
2452	u8 *ivptr;
2453	int null = 0;
2454	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2455		GFP_ATOMIC;
2456	struct adapter *adap = padap(ctx->dev);
2457	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2458
2459	if (req->cryptlen == 0)
2460		return NULL;
2461
2462	reqctx->b0_len = 0;
2463	error = chcr_aead_common_init(req);
2464	if (error)
2465		return ERR_PTR(error);
2466
2467	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2468		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2469		null = 1;
 
2470	}
2471	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2472		(reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
 
 
 
 
 
2473	dnents += MIN_AUTH_SG; // For IV
2474	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2475			       CHCR_SRC_SG_SIZE, 0);
2476	dst_size = get_space_for_phys_dsgl(dnents);
2477	kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2478		- sizeof(chcr_req->key_ctx);
2479	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2480	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2481			SGE_MAX_WR_LEN;
2482	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2483			: (sgl_len(snents) * 8);
 
2484	transhdr_len += temp;
2485	transhdr_len = roundup(transhdr_len, 16);
2486
2487	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2488				    transhdr_len, reqctx->op)) {
2489		atomic_inc(&adap->chcr_stats.fallback);
2490		chcr_aead_common_exit(req);
2491		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
 
2492	}
2493	skb = alloc_skb(transhdr_len, flags);
2494	if (!skb) {
2495		error = -ENOMEM;
2496		goto err;
2497	}
2498
2499	chcr_req = __skb_put_zero(skb, transhdr_len);
2500
2501	temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2502
2503	/*
2504	 * Input order	is AAD,IV and Payload. where IV should be included as
2505	 * the part of authdata. All other fields should be filled according
2506	 * to the hardware spec
2507	 */
2508	chcr_req->sec_cpl.op_ivinsrtofst =
2509				FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2510	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
 
2511	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2512					null ? 0 : 1 + IV,
2513					null ? 0 : IV + req->assoclen,
2514					req->assoclen + IV + 1,
2515					(temp & 0x1F0) >> 4);
2516	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2517					temp & 0xF,
2518					null ? 0 : req->assoclen + IV + 1,
2519					temp, temp);
2520	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2521	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2522		temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2523	else
2524		temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2525	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2526					(reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2527					temp,
2528					actx->auth_mode, aeadctx->hmac_ctrl,
2529					IV >> 1);
2530	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2531					 0, 0, dst_size);
2532
2533	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2534	if (reqctx->op == CHCR_ENCRYPT_OP ||
2535		subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2536		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2537		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2538		       aeadctx->enckey_len);
2539	else
2540		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2541		       aeadctx->enckey_len);
2542
2543	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2544	       actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2545	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2546	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2547	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2548	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2549	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2550		memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2551		memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2552				CTR_RFC3686_IV_SIZE);
2553		*(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2554			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2555	} else {
2556		memcpy(ivptr, req->iv, IV);
2557	}
2558	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2559	chcr_add_aead_src_ent(req, ulptx);
 
 
2560	atomic_inc(&adap->chcr_stats.cipher_rqst);
2561	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2562		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2563	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2564		   transhdr_len, temp, 0);
2565	reqctx->skb = skb;
 
2566
2567	return skb;
2568err:
2569	chcr_aead_common_exit(req);
 
2570
2571	return ERR_PTR(error);
2572}
2573
2574int chcr_aead_dma_map(struct device *dev,
2575		      struct aead_request *req,
2576		      unsigned short op_type)
2577{
2578	int error;
2579	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2580	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2581	unsigned int authsize = crypto_aead_authsize(tfm);
2582	int src_len, dst_len;
2583
2584	/* calculate and handle src and dst sg length separately
2585	 * for inplace and out-of place operations
2586	 */
2587	if (req->src == req->dst) {
2588		src_len = req->assoclen + req->cryptlen + (op_type ?
2589							0 : authsize);
2590		dst_len = src_len;
2591	} else {
2592		src_len = req->assoclen + req->cryptlen;
2593		dst_len = req->assoclen + req->cryptlen + (op_type ?
2594							-authsize : authsize);
2595	}
2596
2597	if (!req->cryptlen || !src_len || !dst_len)
 
 
2598		return 0;
2599	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2600					DMA_BIDIRECTIONAL);
2601	if (dma_mapping_error(dev, reqctx->iv_dma))
2602		return -ENOMEM;
2603	if (reqctx->b0_len)
2604		reqctx->b0_dma = reqctx->iv_dma + IV;
2605	else
2606		reqctx->b0_dma = 0;
2607	if (req->src == req->dst) {
2608		error = dma_map_sg(dev, req->src,
2609				sg_nents_for_len(req->src, src_len),
2610					DMA_BIDIRECTIONAL);
2611		if (!error)
2612			goto err;
2613	} else {
2614		error = dma_map_sg(dev, req->src,
2615				   sg_nents_for_len(req->src, src_len),
2616				   DMA_TO_DEVICE);
2617		if (!error)
2618			goto err;
2619		error = dma_map_sg(dev, req->dst,
2620				   sg_nents_for_len(req->dst, dst_len),
2621				   DMA_FROM_DEVICE);
2622		if (!error) {
2623			dma_unmap_sg(dev, req->src,
2624				     sg_nents_for_len(req->src, src_len),
2625				     DMA_TO_DEVICE);
2626			goto err;
2627		}
2628	}
2629
2630	return 0;
2631err:
2632	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2633	return -ENOMEM;
2634}
2635
2636void chcr_aead_dma_unmap(struct device *dev,
2637			 struct aead_request *req,
2638			 unsigned short op_type)
2639{
2640	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2641	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2642	unsigned int authsize = crypto_aead_authsize(tfm);
2643	int src_len, dst_len;
2644
2645	/* calculate and handle src and dst sg length separately
2646	 * for inplace and out-of place operations
2647	 */
2648	if (req->src == req->dst) {
2649		src_len = req->assoclen + req->cryptlen + (op_type ?
2650							0 : authsize);
2651		dst_len = src_len;
2652	} else {
2653		src_len = req->assoclen + req->cryptlen;
2654		dst_len = req->assoclen + req->cryptlen + (op_type ?
2655						-authsize : authsize);
2656	}
2657
2658	if (!req->cryptlen || !src_len || !dst_len)
2659		return;
2660
2661	dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2662					DMA_BIDIRECTIONAL);
2663	if (req->src == req->dst) {
2664		dma_unmap_sg(dev, req->src,
2665			     sg_nents_for_len(req->src, src_len),
2666			     DMA_BIDIRECTIONAL);
2667	} else {
2668		dma_unmap_sg(dev, req->src,
2669			     sg_nents_for_len(req->src, src_len),
2670			     DMA_TO_DEVICE);
2671		dma_unmap_sg(dev, req->dst,
2672			     sg_nents_for_len(req->dst, dst_len),
2673			     DMA_FROM_DEVICE);
2674	}
2675}
2676
2677void chcr_add_aead_src_ent(struct aead_request *req,
2678			   struct ulptx_sgl *ulptx)
 
 
2679{
2680	struct ulptx_walk ulp_walk;
2681	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2682
2683	if (reqctx->imm) {
2684		u8 *buf = (u8 *)ulptx;
2685
2686		if (reqctx->b0_len) {
2687			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2688			buf += reqctx->b0_len;
2689		}
2690		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2691				   buf, req->cryptlen + req->assoclen, 0);
 
 
 
 
 
2692	} else {
2693		ulptx_walk_init(&ulp_walk, ulptx);
2694		if (reqctx->b0_len)
2695			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2696					    reqctx->b0_dma);
2697		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2698				  req->assoclen,  0);
 
 
2699		ulptx_walk_end(&ulp_walk);
2700	}
2701}
2702
2703void chcr_add_aead_dst_ent(struct aead_request *req,
2704			   struct cpl_rx_phys_dsgl *phys_cpl,
 
 
2705			   unsigned short qid)
2706{
2707	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2708	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2709	struct dsgl_walk dsgl_walk;
2710	unsigned int authsize = crypto_aead_authsize(tfm);
2711	struct chcr_context *ctx = a_ctx(tfm);
2712	u32 temp;
2713	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2714
2715	dsgl_walk_init(&dsgl_walk, phys_cpl);
2716	dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2717	temp = req->assoclen + req->cryptlen +
2718		(reqctx->op ? -authsize : authsize);
2719	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2720	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
 
 
2721}
2722
2723void chcr_add_cipher_src_ent(struct skcipher_request *req,
2724			     void *ulptx,
2725			     struct  cipher_wr_param *wrparam)
2726{
2727	struct ulptx_walk ulp_walk;
2728	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2729	u8 *buf = ulptx;
2730
2731	memcpy(buf, reqctx->iv, IV);
2732	buf += IV;
2733	if (reqctx->imm) {
 
 
 
 
2734		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2735				   buf, wrparam->bytes, reqctx->processed);
2736	} else {
2737		ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
 
2738		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2739				  reqctx->src_ofst);
2740		reqctx->srcsg = ulp_walk.last_sg;
2741		reqctx->src_ofst = ulp_walk.last_sg_len;
2742		ulptx_walk_end(&ulp_walk);
2743	}
2744}
2745
2746void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2747			     struct cpl_rx_phys_dsgl *phys_cpl,
2748			     struct  cipher_wr_param *wrparam,
2749			     unsigned short qid)
2750{
2751	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2752	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2753	struct chcr_context *ctx = c_ctx(tfm);
2754	struct dsgl_walk dsgl_walk;
2755	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2756
2757	dsgl_walk_init(&dsgl_walk, phys_cpl);
 
2758	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2759			 reqctx->dst_ofst);
2760	reqctx->dstsg = dsgl_walk.last_sg;
2761	reqctx->dst_ofst = dsgl_walk.last_sg_len;
2762	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
 
2763}
2764
2765void chcr_add_hash_src_ent(struct ahash_request *req,
2766			   struct ulptx_sgl *ulptx,
2767			   struct hash_wr_param *param)
2768{
2769	struct ulptx_walk ulp_walk;
2770	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2771
2772	if (reqctx->hctx_wr.imm) {
2773		u8 *buf = (u8 *)ulptx;
2774
2775		if (param->bfr_len) {
2776			memcpy(buf, reqctx->reqbfr, param->bfr_len);
2777			buf += param->bfr_len;
2778		}
2779
2780		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2781				   sg_nents(reqctx->hctx_wr.srcsg), buf,
2782				   param->sg_len, 0);
2783	} else {
2784		ulptx_walk_init(&ulp_walk, ulptx);
2785		if (param->bfr_len)
2786			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2787					    reqctx->hctx_wr.dma_addr);
2788		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2789				  param->sg_len, reqctx->hctx_wr.src_ofst);
2790		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2791		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2792		ulptx_walk_end(&ulp_walk);
2793	}
2794}
2795
2796int chcr_hash_dma_map(struct device *dev,
2797		      struct ahash_request *req)
2798{
2799	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2800	int error = 0;
2801
2802	if (!req->nbytes)
2803		return 0;
2804	error = dma_map_sg(dev, req->src, sg_nents(req->src),
2805			   DMA_TO_DEVICE);
2806	if (!error)
2807		return -ENOMEM;
2808	req_ctx->hctx_wr.is_sg_map = 1;
2809	return 0;
2810}
2811
2812void chcr_hash_dma_unmap(struct device *dev,
2813			 struct ahash_request *req)
2814{
2815	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2816
2817	if (!req->nbytes)
2818		return;
2819
2820	dma_unmap_sg(dev, req->src, sg_nents(req->src),
2821			   DMA_TO_DEVICE);
2822	req_ctx->hctx_wr.is_sg_map = 0;
2823
2824}
2825
2826int chcr_cipher_dma_map(struct device *dev,
2827			struct skcipher_request *req)
2828{
2829	int error;
 
 
 
 
 
 
2830
2831	if (req->src == req->dst) {
2832		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2833				   DMA_BIDIRECTIONAL);
2834		if (!error)
2835			goto err;
2836	} else {
2837		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2838				   DMA_TO_DEVICE);
2839		if (!error)
2840			goto err;
2841		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2842				   DMA_FROM_DEVICE);
2843		if (!error) {
2844			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2845				   DMA_TO_DEVICE);
2846			goto err;
2847		}
2848	}
2849
2850	return 0;
2851err:
 
2852	return -ENOMEM;
2853}
2854
2855void chcr_cipher_dma_unmap(struct device *dev,
2856			   struct skcipher_request *req)
2857{
 
 
 
 
2858	if (req->src == req->dst) {
2859		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2860				   DMA_BIDIRECTIONAL);
2861	} else {
2862		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2863				   DMA_TO_DEVICE);
2864		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2865				   DMA_FROM_DEVICE);
2866	}
2867}
2868
2869static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2870{
2871	__be32 data;
2872
2873	memset(block, 0, csize);
2874	block += csize;
2875
2876	if (csize >= 4)
2877		csize = 4;
2878	else if (msglen > (unsigned int)(1 << (8 * csize)))
2879		return -EOVERFLOW;
2880
2881	data = cpu_to_be32(msglen);
2882	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2883
2884	return 0;
2885}
2886
2887static int generate_b0(struct aead_request *req, u8 *ivptr,
 
2888			unsigned short op_type)
2889{
2890	unsigned int l, lp, m;
2891	int rc;
2892	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2893	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2894	u8 *b0 = reqctx->scratch_pad;
2895
2896	m = crypto_aead_authsize(aead);
2897
2898	memcpy(b0, ivptr, 16);
2899
2900	lp = b0[0];
2901	l = lp + 1;
2902
2903	/* set m, bits 3-5 */
2904	*b0 |= (8 * ((m - 2) / 2));
2905
2906	/* set adata, bit 6, if associated data is used */
2907	if (req->assoclen)
2908		*b0 |= 64;
2909	rc = set_msg_len(b0 + 16 - l,
2910			 (op_type == CHCR_DECRYPT_OP) ?
2911			 req->cryptlen - m : req->cryptlen, l);
2912
2913	return rc;
2914}
2915
2916static inline int crypto_ccm_check_iv(const u8 *iv)
2917{
2918	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2919	if (iv[0] < 1 || iv[0] > 7)
2920		return -EINVAL;
2921
2922	return 0;
2923}
2924
2925static int ccm_format_packet(struct aead_request *req,
2926			     u8 *ivptr,
2927			     unsigned int sub_type,
2928			     unsigned short op_type,
2929			     unsigned int assoclen)
2930{
2931	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2932	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2933	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2934	int rc = 0;
2935
2936	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2937		ivptr[0] = 3;
2938		memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2939		memcpy(ivptr + 4, req->iv, 8);
2940		memset(ivptr + 12, 0, 4);
2941	} else {
2942		memcpy(ivptr, req->iv, 16);
 
 
 
 
2943	}
2944	if (assoclen)
2945		put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
2946
2947	rc = generate_b0(req, ivptr, op_type);
2948	/* zero the ctr value */
2949	memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2950	return rc;
2951}
2952
2953static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2954				  unsigned int dst_size,
2955				  struct aead_request *req,
2956				  unsigned short op_type)
2957{
2958	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2959	struct chcr_context *ctx = a_ctx(tfm);
2960	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2961	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2962	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2963	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2964	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2965	unsigned int ccm_xtra;
2966	unsigned int tag_offset = 0, auth_offset = 0;
2967	unsigned int assoclen;
2968
2969	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2970		assoclen = req->assoclen - 8;
2971	else
2972		assoclen = req->assoclen;
2973	ccm_xtra = CCM_B0_SIZE +
2974		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2975
2976	auth_offset = req->cryptlen ?
2977		(req->assoclen + IV + 1 + ccm_xtra) : 0;
2978	if (op_type == CHCR_DECRYPT_OP) {
2979		if (crypto_aead_authsize(tfm) != req->cryptlen)
2980			tag_offset = crypto_aead_authsize(tfm);
2981		else
2982			auth_offset = 0;
2983	}
2984
2985	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
 
 
2986	sec_cpl->pldlen =
2987		htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2988	/* For CCM there wil be b0 always. So AAD start will be 1 always */
2989	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2990				1 + IV,	IV + assoclen + ccm_xtra,
2991				req->assoclen + IV + 1 + ccm_xtra, 0);
2992
2993	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2994					auth_offset, tag_offset,
2995					(op_type == CHCR_ENCRYPT_OP) ? 0 :
2996					crypto_aead_authsize(tfm));
2997	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2998					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2999					cipher_mode, mac_mode,
3000					aeadctx->hmac_ctrl, IV >> 1);
3001
3002	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
3003					0, dst_size);
3004}
3005
3006static int aead_ccm_validate_input(unsigned short op_type,
3007				   struct aead_request *req,
3008				   struct chcr_aead_ctx *aeadctx,
3009				   unsigned int sub_type)
3010{
3011	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
3012		if (crypto_ccm_check_iv(req->iv)) {
3013			pr_err("CCM: IV check fails\n");
3014			return -EINVAL;
3015		}
3016	} else {
3017		if (req->assoclen != 16 && req->assoclen != 20) {
3018			pr_err("RFC4309: Invalid AAD length %d\n",
3019			       req->assoclen);
3020			return -EINVAL;
3021		}
3022	}
3023	return 0;
3024}
3025
3026static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
3027					  unsigned short qid,
3028					  int size)
 
3029{
3030	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3031	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3032	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3033	struct sk_buff *skb = NULL;
3034	struct chcr_wr *chcr_req;
3035	struct cpl_rx_phys_dsgl *phys_cpl;
3036	struct ulptx_sgl *ulptx;
3037	unsigned int transhdr_len;
3038	unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
3039	unsigned int sub_type, assoclen = req->assoclen;
3040	unsigned int authsize = crypto_aead_authsize(tfm);
3041	int error = -EINVAL;
3042	u8 *ivptr;
3043	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3044		GFP_ATOMIC;
3045	struct adapter *adap = padap(a_ctx(tfm)->dev);
3046
 
3047	sub_type = get_aead_subtype(tfm);
3048	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
3049		assoclen -= 8;
3050	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
3051	error = chcr_aead_common_init(req);
3052	if (error)
3053		return ERR_PTR(error);
3054
3055	error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
 
 
3056	if (error)
3057		goto err;
3058	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3059			+ (reqctx->op ? -authsize : authsize),
3060			CHCR_DST_SG_SIZE, 0);
 
3061	dnents += MIN_CCM_SG; // For IV and B0
3062	dst_size = get_space_for_phys_dsgl(dnents);
3063	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3064			       CHCR_SRC_SG_SIZE, 0);
3065	snents += MIN_CCM_SG; //For B0
3066	kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3067	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3068	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3069		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
3070	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3071				     reqctx->b0_len, 16) :
3072		(sgl_len(snents) *  8);
 
3073	transhdr_len += temp;
3074	transhdr_len = roundup(transhdr_len, 16);
3075
3076	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3077				reqctx->b0_len, transhdr_len, reqctx->op)) {
3078		atomic_inc(&adap->chcr_stats.fallback);
3079		chcr_aead_common_exit(req);
3080		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
 
3081	}
3082	skb = alloc_skb(transhdr_len,  flags);
3083
3084	if (!skb) {
3085		error = -ENOMEM;
3086		goto err;
3087	}
3088
3089	chcr_req = __skb_put_zero(skb, transhdr_len);
3090
3091	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3092
3093	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3094	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3095	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3096			aeadctx->key, aeadctx->enckey_len);
3097
3098	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3099	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3100	ulptx = (struct ulptx_sgl *)(ivptr + IV);
3101	error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3102	if (error)
3103		goto dstmap_fail;
3104	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3105	chcr_add_aead_src_ent(req, ulptx);
 
 
 
 
 
 
 
 
 
 
3106
3107	atomic_inc(&adap->chcr_stats.aead_rqst);
3108	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3109		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3110		reqctx->b0_len) : 0);
3111	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3112		    transhdr_len, temp, 0);
3113	reqctx->skb = skb;
 
3114
3115	return skb;
3116dstmap_fail:
3117	kfree_skb(skb);
3118err:
3119	chcr_aead_common_exit(req);
3120	return ERR_PTR(error);
3121}
3122
3123static struct sk_buff *create_gcm_wr(struct aead_request *req,
3124				     unsigned short qid,
3125				     int size)
 
3126{
3127	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3128	struct chcr_context *ctx = a_ctx(tfm);
3129	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3130	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3131	struct sk_buff *skb = NULL;
3132	struct chcr_wr *chcr_req;
3133	struct cpl_rx_phys_dsgl *phys_cpl;
3134	struct ulptx_sgl *ulptx;
3135	unsigned int transhdr_len, dnents = 0, snents;
3136	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3137	unsigned int authsize = crypto_aead_authsize(tfm);
3138	int error = -EINVAL;
3139	u8 *ivptr;
3140	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3141		GFP_ATOMIC;
3142	struct adapter *adap = padap(ctx->dev);
3143	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3144
3145	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3146		assoclen = req->assoclen - 8;
3147
3148	reqctx->b0_len = 0;
3149	error = chcr_aead_common_init(req);
3150	if (error)
3151		return ERR_PTR(error);
3152	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3153				(reqctx->op ? -authsize : authsize),
3154				CHCR_DST_SG_SIZE, 0);
3155	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3156			       CHCR_SRC_SG_SIZE, 0);
3157	dnents += MIN_GCM_SG; // For IV
3158	dst_size = get_space_for_phys_dsgl(dnents);
3159	kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3160	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3161	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3162			SGE_MAX_WR_LEN;
3163	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3164		(sgl_len(snents) * 8);
 
3165	transhdr_len += temp;
3166	transhdr_len = roundup(transhdr_len, 16);
3167	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3168			    transhdr_len, reqctx->op)) {
3169
3170		atomic_inc(&adap->chcr_stats.fallback);
3171		chcr_aead_common_exit(req);
3172		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
 
3173	}
3174	skb = alloc_skb(transhdr_len, flags);
3175	if (!skb) {
3176		error = -ENOMEM;
3177		goto err;
3178	}
3179
3180	chcr_req = __skb_put_zero(skb, transhdr_len);
3181
3182	//Offset of tag from end
3183	temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3184	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3185						rx_channel_id, 2, 1);
 
3186	chcr_req->sec_cpl.pldlen =
3187		htonl(req->assoclen + IV + req->cryptlen);
3188	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3189					assoclen ? 1 + IV : 0,
3190					assoclen ? IV + assoclen : 0,
3191					req->assoclen + IV + 1, 0);
3192	chcr_req->sec_cpl.cipherstop_lo_authinsert =
3193			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3194						temp, temp);
3195	chcr_req->sec_cpl.seqno_numivs =
3196			FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3197					CHCR_ENCRYPT_OP) ? 1 : 0,
3198					CHCR_SCMD_CIPHER_MODE_AES_GCM,
3199					CHCR_SCMD_AUTH_MODE_GHASH,
3200					aeadctx->hmac_ctrl, IV >> 1);
3201	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3202					0, 0, dst_size);
3203	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3204	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3205	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3206	       GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3207
3208	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3209	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3210	/* prepare a 16 byte iv */
3211	/* S   A   L  T |  IV | 0x00000001 */
3212	if (get_aead_subtype(tfm) ==
3213	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3214		memcpy(ivptr, aeadctx->salt, 4);
3215		memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3216	} else {
3217		memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3218	}
3219	put_unaligned_be32(0x01, &ivptr[12]);
3220	ulptx = (struct ulptx_sgl *)(ivptr + 16);
 
 
3221
3222	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3223	chcr_add_aead_src_ent(req, ulptx);
3224	atomic_inc(&adap->chcr_stats.aead_rqst);
3225	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3226		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3227	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3228		    transhdr_len, temp, reqctx->verify);
3229	reqctx->skb = skb;
 
3230	return skb;
3231
3232err:
3233	chcr_aead_common_exit(req);
3234	return ERR_PTR(error);
3235}
3236
3237
3238
3239static int chcr_aead_cra_init(struct crypto_aead *tfm)
3240{
3241	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3242	struct aead_alg *alg = crypto_aead_alg(tfm);
3243
3244	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3245					       CRYPTO_ALG_NEED_FALLBACK |
3246					       CRYPTO_ALG_ASYNC);
3247	if  (IS_ERR(aeadctx->sw_cipher))
3248		return PTR_ERR(aeadctx->sw_cipher);
3249	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3250				 sizeof(struct aead_request) +
3251				 crypto_aead_reqsize(aeadctx->sw_cipher)));
3252	return chcr_device_init(a_ctx(tfm));
3253}
3254
3255static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3256{
3257	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3258
3259	crypto_free_aead(aeadctx->sw_cipher);
3260}
3261
3262static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3263					unsigned int authsize)
3264{
3265	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3266
3267	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3268	aeadctx->mayverify = VERIFY_HW;
3269	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3270}
3271static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3272				    unsigned int authsize)
3273{
3274	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3275	u32 maxauth = crypto_aead_maxauthsize(tfm);
3276
3277	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3278	 * true for sha1. authsize == 12 condition should be before
3279	 * authsize == (maxauth >> 1)
3280	 */
3281	if (authsize == ICV_4) {
3282		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3283		aeadctx->mayverify = VERIFY_HW;
3284	} else if (authsize == ICV_6) {
3285		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3286		aeadctx->mayverify = VERIFY_HW;
3287	} else if (authsize == ICV_10) {
3288		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3289		aeadctx->mayverify = VERIFY_HW;
3290	} else if (authsize == ICV_12) {
3291		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3292		aeadctx->mayverify = VERIFY_HW;
3293	} else if (authsize == ICV_14) {
3294		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3295		aeadctx->mayverify = VERIFY_HW;
3296	} else if (authsize == (maxauth >> 1)) {
3297		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3298		aeadctx->mayverify = VERIFY_HW;
3299	} else if (authsize == maxauth) {
3300		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3301		aeadctx->mayverify = VERIFY_HW;
3302	} else {
3303		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3304		aeadctx->mayverify = VERIFY_SW;
3305	}
3306	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3307}
3308
3309
3310static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3311{
3312	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3313
3314	switch (authsize) {
3315	case ICV_4:
3316		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3317		aeadctx->mayverify = VERIFY_HW;
3318		break;
3319	case ICV_8:
3320		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3321		aeadctx->mayverify = VERIFY_HW;
3322		break;
3323	case ICV_12:
3324		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3325		aeadctx->mayverify = VERIFY_HW;
3326		break;
3327	case ICV_14:
3328		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3329		aeadctx->mayverify = VERIFY_HW;
3330		break;
3331	case ICV_16:
3332		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3333		aeadctx->mayverify = VERIFY_HW;
3334		break;
3335	case ICV_13:
3336	case ICV_15:
3337		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3338		aeadctx->mayverify = VERIFY_SW;
3339		break;
3340	default:
 
 
 
3341		return -EINVAL;
3342	}
3343	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3344}
3345
3346static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3347					  unsigned int authsize)
3348{
3349	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3350
3351	switch (authsize) {
3352	case ICV_8:
3353		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3354		aeadctx->mayverify = VERIFY_HW;
3355		break;
3356	case ICV_12:
3357		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3358		aeadctx->mayverify = VERIFY_HW;
3359		break;
3360	case ICV_16:
3361		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3362		aeadctx->mayverify = VERIFY_HW;
3363		break;
3364	default:
 
 
3365		return -EINVAL;
3366	}
3367	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3368}
3369
3370static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3371				unsigned int authsize)
3372{
3373	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3374
3375	switch (authsize) {
3376	case ICV_4:
3377		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3378		aeadctx->mayverify = VERIFY_HW;
3379		break;
3380	case ICV_6:
3381		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3382		aeadctx->mayverify = VERIFY_HW;
3383		break;
3384	case ICV_8:
3385		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3386		aeadctx->mayverify = VERIFY_HW;
3387		break;
3388	case ICV_10:
3389		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3390		aeadctx->mayverify = VERIFY_HW;
3391		break;
3392	case ICV_12:
3393		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3394		aeadctx->mayverify = VERIFY_HW;
3395		break;
3396	case ICV_14:
3397		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3398		aeadctx->mayverify = VERIFY_HW;
3399		break;
3400	case ICV_16:
3401		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3402		aeadctx->mayverify = VERIFY_HW;
3403		break;
3404	default:
 
 
3405		return -EINVAL;
3406	}
3407	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3408}
3409
3410static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3411				const u8 *key,
3412				unsigned int keylen)
3413{
3414	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3415	unsigned char ck_size, mk_size;
3416	int key_ctx_size = 0;
3417
3418	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3419	if (keylen == AES_KEYSIZE_128) {
3420		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3421		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3422	} else if (keylen == AES_KEYSIZE_192) {
3423		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3424		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3425	} else if (keylen == AES_KEYSIZE_256) {
3426		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3427		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3428	} else {
 
 
3429		aeadctx->enckey_len = 0;
3430		return	-EINVAL;
3431	}
3432	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3433						key_ctx_size >> 4);
3434	memcpy(aeadctx->key, key, keylen);
3435	aeadctx->enckey_len = keylen;
3436
3437	return 0;
3438}
3439
3440static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3441				const u8 *key,
3442				unsigned int keylen)
3443{
3444	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3445	int error;
3446
3447	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3448	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3449			      CRYPTO_TFM_REQ_MASK);
3450	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
 
 
 
3451	if (error)
3452		return error;
3453	return chcr_ccm_common_setkey(aead, key, keylen);
3454}
3455
3456static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3457				    unsigned int keylen)
3458{
3459	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3460	int error;
3461
3462	if (keylen < 3) {
 
 
3463		aeadctx->enckey_len = 0;
3464		return	-EINVAL;
3465	}
3466	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3467	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3468			      CRYPTO_TFM_REQ_MASK);
3469	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
 
 
 
3470	if (error)
3471		return error;
3472	keylen -= 3;
3473	memcpy(aeadctx->salt, key + keylen, 3);
3474	return chcr_ccm_common_setkey(aead, key, keylen);
3475}
3476
3477static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3478			   unsigned int keylen)
3479{
3480	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3481	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
 
3482	unsigned int ck_size;
3483	int ret = 0, key_ctx_size = 0;
3484	struct crypto_aes_ctx aes;
3485
3486	aeadctx->enckey_len = 0;
3487	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3488	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3489			      & CRYPTO_TFM_REQ_MASK);
3490	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
 
 
 
3491	if (ret)
3492		goto out;
3493
3494	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3495	    keylen > 3) {
3496		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3497		memcpy(aeadctx->salt, key + keylen, 4);
3498	}
3499	if (keylen == AES_KEYSIZE_128) {
3500		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3501	} else if (keylen == AES_KEYSIZE_192) {
3502		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3503	} else if (keylen == AES_KEYSIZE_256) {
3504		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3505	} else {
 
 
3506		pr_err("GCM: Invalid key length %d\n", keylen);
3507		ret = -EINVAL;
3508		goto out;
3509	}
3510
3511	memcpy(aeadctx->key, key, keylen);
3512	aeadctx->enckey_len = keylen;
3513	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3514		AEAD_H_SIZE;
3515	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3516						CHCR_KEYCTX_MAC_KEY_SIZE_128,
3517						0, 0,
3518						key_ctx_size >> 4);
3519	/* Calculate the H = CIPH(K, 0 repeated 16 times).
3520	 * It will go in key context
3521	 */
3522	ret = aes_expandkey(&aes, key, keylen);
3523	if (ret) {
3524		aeadctx->enckey_len = 0;
 
3525		goto out;
3526	}
 
 
 
 
 
 
3527	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3528	aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3529	memzero_explicit(&aes, sizeof(aes));
3530
 
 
3531out:
3532	return ret;
3533}
3534
3535static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3536				   unsigned int keylen)
3537{
3538	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3539	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3540	/* it contains auth and cipher key both*/
3541	struct crypto_authenc_keys keys;
3542	unsigned int bs, subtype;
3543	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3544	int err = 0, i, key_ctx_len = 0;
3545	unsigned char ck_size = 0;
3546	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3547	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3548	struct algo_param param;
3549	int align;
3550	u8 *o_ptr = NULL;
3551
3552	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3553	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3554			      & CRYPTO_TFM_REQ_MASK);
3555	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
 
 
 
3556	if (err)
3557		goto out;
3558
3559	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 
3560		goto out;
 
3561
3562	if (get_alg_config(&param, max_authsize)) {
3563		pr_err("Unsupported digest size\n");
3564		goto out;
3565	}
3566	subtype = get_aead_subtype(authenc);
3567	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3568		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3569		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3570			goto out;
3571		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3572		- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3573		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3574	}
3575	if (keys.enckeylen == AES_KEYSIZE_128) {
3576		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3577	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3578		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3579	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3580		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3581	} else {
3582		pr_err("Unsupported cipher key\n");
3583		goto out;
3584	}
3585
3586	/* Copy only encryption key. We use authkey to generate h(ipad) and
3587	 * h(opad) so authkey is not needed again. authkeylen size have the
3588	 * size of the hash digest size.
3589	 */
3590	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3591	aeadctx->enckey_len = keys.enckeylen;
3592	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3593		subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3594
3595		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3596			    aeadctx->enckey_len << 3);
3597	}
3598	base_hash  = chcr_alloc_shash(max_authsize);
3599	if (IS_ERR(base_hash)) {
3600		pr_err("Base driver cannot be loaded\n");
3601		goto out;
 
 
3602	}
3603	{
3604		SHASH_DESC_ON_STACK(shash, base_hash);
3605
3606		shash->tfm = base_hash;
 
3607		bs = crypto_shash_blocksize(base_hash);
3608		align = KEYCTX_ALIGN_PAD(max_authsize);
3609		o_ptr =  actx->h_iopad + param.result_size + align;
3610
3611		if (keys.authkeylen > bs) {
3612			err = crypto_shash_digest(shash, keys.authkey,
3613						  keys.authkeylen,
3614						  o_ptr);
3615			if (err) {
3616				pr_err("Base driver cannot be loaded\n");
3617				goto out;
3618			}
3619			keys.authkeylen = max_authsize;
3620		} else
3621			memcpy(o_ptr, keys.authkey, keys.authkeylen);
3622
3623		/* Compute the ipad-digest*/
3624		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3625		memcpy(pad, o_ptr, keys.authkeylen);
3626		for (i = 0; i < bs >> 2; i++)
3627			*((unsigned int *)pad + i) ^= IPAD_DATA;
3628
3629		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3630					      max_authsize))
3631			goto out;
3632		/* Compute the opad-digest */
3633		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3634		memcpy(pad, o_ptr, keys.authkeylen);
3635		for (i = 0; i < bs >> 2; i++)
3636			*((unsigned int *)pad + i) ^= OPAD_DATA;
3637
3638		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3639			goto out;
3640
3641		/* convert the ipad and opad digest to network order */
3642		chcr_change_order(actx->h_iopad, param.result_size);
3643		chcr_change_order(o_ptr, param.result_size);
3644		key_ctx_len = sizeof(struct _key_ctx) +
3645			roundup(keys.enckeylen, 16) +
3646			(param.result_size + align) * 2;
3647		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3648						0, 1, key_ctx_len >> 4);
3649		actx->auth_mode = param.auth_mode;
3650		chcr_free_shash(base_hash);
3651
3652		memzero_explicit(&keys, sizeof(keys));
3653		return 0;
3654	}
3655out:
3656	aeadctx->enckey_len = 0;
3657	memzero_explicit(&keys, sizeof(keys));
3658	if (!IS_ERR(base_hash))
3659		chcr_free_shash(base_hash);
3660	return -EINVAL;
3661}
3662
3663static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3664					const u8 *key, unsigned int keylen)
3665{
3666	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3667	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3668	struct crypto_authenc_keys keys;
3669	int err;
3670	/* it contains auth and cipher key both*/
3671	unsigned int subtype;
3672	int key_ctx_len = 0;
3673	unsigned char ck_size = 0;
3674
3675	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3676	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3677			      & CRYPTO_TFM_REQ_MASK);
3678	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
 
 
 
3679	if (err)
3680		goto out;
3681
3682	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 
3683		goto out;
3684
3685	subtype = get_aead_subtype(authenc);
3686	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3687	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3688		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3689			goto out;
3690		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3691			- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3692		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3693	}
3694	if (keys.enckeylen == AES_KEYSIZE_128) {
3695		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3696	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3697		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3698	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3699		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3700	} else {
3701		pr_err("Unsupported cipher key %d\n", keys.enckeylen);
3702		goto out;
3703	}
3704	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3705	aeadctx->enckey_len = keys.enckeylen;
3706	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3707	    subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3708		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3709				aeadctx->enckey_len << 3);
3710	}
3711	key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3712
3713	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3714						0, key_ctx_len >> 4);
3715	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3716	memzero_explicit(&keys, sizeof(keys));
3717	return 0;
3718out:
3719	aeadctx->enckey_len = 0;
3720	memzero_explicit(&keys, sizeof(keys));
3721	return -EINVAL;
3722}
3723
3724static int chcr_aead_op(struct aead_request *req,
 
3725			int size,
3726			create_wr_t create_wr_fn)
3727{
3728	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3729	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3730	struct chcr_context *ctx = a_ctx(tfm);
3731	struct uld_ctx *u_ctx = ULD_CTX(ctx);
3732	struct sk_buff *skb;
3733	struct chcr_dev *cdev;
3734
3735	cdev = a_ctx(tfm)->dev;
3736	if (!cdev) {
3737		pr_err("%s : No crypto device.\n", __func__);
3738		return -ENXIO;
3739	}
3740
3741	if (chcr_inc_wrcount(cdev)) {
3742	/* Detach state for CHCR means lldi or padap is freed.
3743	 * We cannot increment fallback here.
3744	 */
3745		return chcr_aead_fallback(req, reqctx->op);
3746	}
3747
3748	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3749					reqctx->txqidx) &&
3750		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3751			chcr_dec_wrcount(cdev);
3752			return -ENOSPC;
3753	}
3754
3755	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3756	    crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3757		pr_err("RFC4106: Invalid value of assoclen %d\n",
3758		       req->assoclen);
3759		return -EINVAL;
3760	}
3761
3762	/* Form a WR from req */
3763	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
 
3764
3765	if (IS_ERR_OR_NULL(skb)) {
3766		chcr_dec_wrcount(cdev);
3767		return PTR_ERR_OR_ZERO(skb);
3768	}
3769
3770	skb->dev = u_ctx->lldi.ports[0];
3771	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3772	chcr_send_wr(skb);
3773	return -EINPROGRESS;
3774}
3775
3776static int chcr_aead_encrypt(struct aead_request *req)
3777{
3778	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3779	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3780	struct chcr_context *ctx = a_ctx(tfm);
3781	unsigned int cpu;
3782
3783	cpu = get_cpu();
3784	reqctx->txqidx = cpu % ctx->ntxq;
3785	reqctx->rxqidx = cpu % ctx->nrxq;
3786	put_cpu();
3787
3788	reqctx->verify = VERIFY_HW;
3789	reqctx->op = CHCR_ENCRYPT_OP;
3790
3791	switch (get_aead_subtype(tfm)) {
3792	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3793	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3794	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3795	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3796		return chcr_aead_op(req, 0, create_authenc_wr);
 
3797	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3798	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3799		return chcr_aead_op(req, 0, create_aead_ccm_wr);
 
3800	default:
3801		return chcr_aead_op(req, 0, create_gcm_wr);
 
3802	}
3803}
3804
3805static int chcr_aead_decrypt(struct aead_request *req)
3806{
3807	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3808	struct chcr_context *ctx = a_ctx(tfm);
3809	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3810	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3811	int size;
3812	unsigned int cpu;
3813
3814	cpu = get_cpu();
3815	reqctx->txqidx = cpu % ctx->ntxq;
3816	reqctx->rxqidx = cpu % ctx->nrxq;
3817	put_cpu();
3818
3819	if (aeadctx->mayverify == VERIFY_SW) {
3820		size = crypto_aead_maxauthsize(tfm);
3821		reqctx->verify = VERIFY_SW;
3822	} else {
3823		size = 0;
3824		reqctx->verify = VERIFY_HW;
3825	}
3826	reqctx->op = CHCR_DECRYPT_OP;
3827	switch (get_aead_subtype(tfm)) {
3828	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3829	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3830	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3831	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3832		return chcr_aead_op(req, size, create_authenc_wr);
 
3833	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3834	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3835		return chcr_aead_op(req, size, create_aead_ccm_wr);
 
3836	default:
3837		return chcr_aead_op(req, size, create_gcm_wr);
 
3838	}
3839}
3840
3841static struct chcr_alg_template driver_algs[] = {
3842	/* AES-CBC */
3843	{
3844		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3845		.is_registered = 0,
3846		.alg.skcipher = {
3847			.base.cra_name		= "cbc(aes)",
3848			.base.cra_driver_name	= "cbc-aes-chcr",
3849			.base.cra_blocksize	= AES_BLOCK_SIZE,
3850
3851			.init			= chcr_init_tfm,
3852			.exit			= chcr_exit_tfm,
3853			.min_keysize		= AES_MIN_KEY_SIZE,
3854			.max_keysize		= AES_MAX_KEY_SIZE,
3855			.ivsize			= AES_BLOCK_SIZE,
3856			.setkey			= chcr_aes_cbc_setkey,
3857			.encrypt		= chcr_aes_encrypt,
3858			.decrypt		= chcr_aes_decrypt,
3859			}
 
3860	},
3861	{
3862		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3863		.is_registered = 0,
3864		.alg.skcipher = {
3865			.base.cra_name		= "xts(aes)",
3866			.base.cra_driver_name	= "xts-aes-chcr",
3867			.base.cra_blocksize	= AES_BLOCK_SIZE,
3868
3869			.init			= chcr_init_tfm,
3870			.exit			= chcr_exit_tfm,
3871			.min_keysize		= 2 * AES_MIN_KEY_SIZE,
3872			.max_keysize		= 2 * AES_MAX_KEY_SIZE,
3873			.ivsize			= AES_BLOCK_SIZE,
3874			.setkey			= chcr_aes_xts_setkey,
3875			.encrypt		= chcr_aes_encrypt,
3876			.decrypt		= chcr_aes_decrypt,
 
3877			}
3878	},
3879	{
3880		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3881		.is_registered = 0,
3882		.alg.skcipher = {
3883			.base.cra_name		= "ctr(aes)",
3884			.base.cra_driver_name	= "ctr-aes-chcr",
3885			.base.cra_blocksize	= 1,
3886
3887			.init			= chcr_init_tfm,
3888			.exit			= chcr_exit_tfm,
3889			.min_keysize		= AES_MIN_KEY_SIZE,
3890			.max_keysize		= AES_MAX_KEY_SIZE,
3891			.ivsize			= AES_BLOCK_SIZE,
3892			.setkey			= chcr_aes_ctr_setkey,
3893			.encrypt		= chcr_aes_encrypt,
3894			.decrypt		= chcr_aes_decrypt,
 
3895		}
3896	},
3897	{
3898		.type = CRYPTO_ALG_TYPE_SKCIPHER |
3899			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3900		.is_registered = 0,
3901		.alg.skcipher = {
3902			.base.cra_name		= "rfc3686(ctr(aes))",
3903			.base.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3904			.base.cra_blocksize	= 1,
3905
3906			.init			= chcr_rfc3686_init,
3907			.exit			= chcr_exit_tfm,
3908			.min_keysize		= AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3909			.max_keysize		= AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3910			.ivsize			= CTR_RFC3686_IV_SIZE,
3911			.setkey			= chcr_aes_rfc3686_setkey,
3912			.encrypt		= chcr_aes_encrypt,
3913			.decrypt		= chcr_aes_decrypt,
 
 
 
 
3914		}
3915	},
3916	/* SHA */
3917	{
3918		.type = CRYPTO_ALG_TYPE_AHASH,
3919		.is_registered = 0,
3920		.alg.hash = {
3921			.halg.digestsize = SHA1_DIGEST_SIZE,
3922			.halg.base = {
3923				.cra_name = "sha1",
3924				.cra_driver_name = "sha1-chcr",
3925				.cra_blocksize = SHA1_BLOCK_SIZE,
3926			}
3927		}
3928	},
3929	{
3930		.type = CRYPTO_ALG_TYPE_AHASH,
3931		.is_registered = 0,
3932		.alg.hash = {
3933			.halg.digestsize = SHA256_DIGEST_SIZE,
3934			.halg.base = {
3935				.cra_name = "sha256",
3936				.cra_driver_name = "sha256-chcr",
3937				.cra_blocksize = SHA256_BLOCK_SIZE,
3938			}
3939		}
3940	},
3941	{
3942		.type = CRYPTO_ALG_TYPE_AHASH,
3943		.is_registered = 0,
3944		.alg.hash = {
3945			.halg.digestsize = SHA224_DIGEST_SIZE,
3946			.halg.base = {
3947				.cra_name = "sha224",
3948				.cra_driver_name = "sha224-chcr",
3949				.cra_blocksize = SHA224_BLOCK_SIZE,
3950			}
3951		}
3952	},
3953	{
3954		.type = CRYPTO_ALG_TYPE_AHASH,
3955		.is_registered = 0,
3956		.alg.hash = {
3957			.halg.digestsize = SHA384_DIGEST_SIZE,
3958			.halg.base = {
3959				.cra_name = "sha384",
3960				.cra_driver_name = "sha384-chcr",
3961				.cra_blocksize = SHA384_BLOCK_SIZE,
3962			}
3963		}
3964	},
3965	{
3966		.type = CRYPTO_ALG_TYPE_AHASH,
3967		.is_registered = 0,
3968		.alg.hash = {
3969			.halg.digestsize = SHA512_DIGEST_SIZE,
3970			.halg.base = {
3971				.cra_name = "sha512",
3972				.cra_driver_name = "sha512-chcr",
3973				.cra_blocksize = SHA512_BLOCK_SIZE,
3974			}
3975		}
3976	},
3977	/* HMAC */
3978	{
3979		.type = CRYPTO_ALG_TYPE_HMAC,
3980		.is_registered = 0,
3981		.alg.hash = {
3982			.halg.digestsize = SHA1_DIGEST_SIZE,
3983			.halg.base = {
3984				.cra_name = "hmac(sha1)",
3985				.cra_driver_name = "hmac-sha1-chcr",
3986				.cra_blocksize = SHA1_BLOCK_SIZE,
3987			}
3988		}
3989	},
3990	{
3991		.type = CRYPTO_ALG_TYPE_HMAC,
3992		.is_registered = 0,
3993		.alg.hash = {
3994			.halg.digestsize = SHA224_DIGEST_SIZE,
3995			.halg.base = {
3996				.cra_name = "hmac(sha224)",
3997				.cra_driver_name = "hmac-sha224-chcr",
3998				.cra_blocksize = SHA224_BLOCK_SIZE,
3999			}
4000		}
4001	},
4002	{
4003		.type = CRYPTO_ALG_TYPE_HMAC,
4004		.is_registered = 0,
4005		.alg.hash = {
4006			.halg.digestsize = SHA256_DIGEST_SIZE,
4007			.halg.base = {
4008				.cra_name = "hmac(sha256)",
4009				.cra_driver_name = "hmac-sha256-chcr",
4010				.cra_blocksize = SHA256_BLOCK_SIZE,
4011			}
4012		}
4013	},
4014	{
4015		.type = CRYPTO_ALG_TYPE_HMAC,
4016		.is_registered = 0,
4017		.alg.hash = {
4018			.halg.digestsize = SHA384_DIGEST_SIZE,
4019			.halg.base = {
4020				.cra_name = "hmac(sha384)",
4021				.cra_driver_name = "hmac-sha384-chcr",
4022				.cra_blocksize = SHA384_BLOCK_SIZE,
4023			}
4024		}
4025	},
4026	{
4027		.type = CRYPTO_ALG_TYPE_HMAC,
4028		.is_registered = 0,
4029		.alg.hash = {
4030			.halg.digestsize = SHA512_DIGEST_SIZE,
4031			.halg.base = {
4032				.cra_name = "hmac(sha512)",
4033				.cra_driver_name = "hmac-sha512-chcr",
4034				.cra_blocksize = SHA512_BLOCK_SIZE,
4035			}
4036		}
4037	},
4038	/* Add AEAD Algorithms */
4039	{
4040		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
4041		.is_registered = 0,
4042		.alg.aead = {
4043			.base = {
4044				.cra_name = "gcm(aes)",
4045				.cra_driver_name = "gcm-aes-chcr",
4046				.cra_blocksize	= 1,
4047				.cra_priority = CHCR_AEAD_PRIORITY,
4048				.cra_ctxsize =	sizeof(struct chcr_context) +
4049						sizeof(struct chcr_aead_ctx) +
4050						sizeof(struct chcr_gcm_ctx),
4051			},
4052			.ivsize = GCM_AES_IV_SIZE,
4053			.maxauthsize = GHASH_DIGEST_SIZE,
4054			.setkey = chcr_gcm_setkey,
4055			.setauthsize = chcr_gcm_setauthsize,
4056		}
4057	},
4058	{
4059		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4060		.is_registered = 0,
4061		.alg.aead = {
4062			.base = {
4063				.cra_name = "rfc4106(gcm(aes))",
4064				.cra_driver_name = "rfc4106-gcm-aes-chcr",
4065				.cra_blocksize	 = 1,
4066				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4067				.cra_ctxsize =	sizeof(struct chcr_context) +
4068						sizeof(struct chcr_aead_ctx) +
4069						sizeof(struct chcr_gcm_ctx),
4070
4071			},
4072			.ivsize = GCM_RFC4106_IV_SIZE,
4073			.maxauthsize	= GHASH_DIGEST_SIZE,
4074			.setkey = chcr_gcm_setkey,
4075			.setauthsize	= chcr_4106_4309_setauthsize,
4076		}
4077	},
4078	{
4079		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4080		.is_registered = 0,
4081		.alg.aead = {
4082			.base = {
4083				.cra_name = "ccm(aes)",
4084				.cra_driver_name = "ccm-aes-chcr",
4085				.cra_blocksize	 = 1,
4086				.cra_priority = CHCR_AEAD_PRIORITY,
4087				.cra_ctxsize =	sizeof(struct chcr_context) +
4088						sizeof(struct chcr_aead_ctx),
4089
4090			},
4091			.ivsize = AES_BLOCK_SIZE,
4092			.maxauthsize	= GHASH_DIGEST_SIZE,
4093			.setkey = chcr_aead_ccm_setkey,
4094			.setauthsize	= chcr_ccm_setauthsize,
4095		}
4096	},
4097	{
4098		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4099		.is_registered = 0,
4100		.alg.aead = {
4101			.base = {
4102				.cra_name = "rfc4309(ccm(aes))",
4103				.cra_driver_name = "rfc4309-ccm-aes-chcr",
4104				.cra_blocksize	 = 1,
4105				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4106				.cra_ctxsize =	sizeof(struct chcr_context) +
4107						sizeof(struct chcr_aead_ctx),
4108
4109			},
4110			.ivsize = 8,
4111			.maxauthsize	= GHASH_DIGEST_SIZE,
4112			.setkey = chcr_aead_rfc4309_setkey,
4113			.setauthsize = chcr_4106_4309_setauthsize,
4114		}
4115	},
4116	{
4117		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4118		.is_registered = 0,
4119		.alg.aead = {
4120			.base = {
4121				.cra_name = "authenc(hmac(sha1),cbc(aes))",
4122				.cra_driver_name =
4123					"authenc-hmac-sha1-cbc-aes-chcr",
4124				.cra_blocksize	 = AES_BLOCK_SIZE,
4125				.cra_priority = CHCR_AEAD_PRIORITY,
4126				.cra_ctxsize =	sizeof(struct chcr_context) +
4127						sizeof(struct chcr_aead_ctx) +
4128						sizeof(struct chcr_authenc_ctx),
4129
4130			},
4131			.ivsize = AES_BLOCK_SIZE,
4132			.maxauthsize = SHA1_DIGEST_SIZE,
4133			.setkey = chcr_authenc_setkey,
4134			.setauthsize = chcr_authenc_setauthsize,
4135		}
4136	},
4137	{
4138		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4139		.is_registered = 0,
4140		.alg.aead = {
4141			.base = {
4142
4143				.cra_name = "authenc(hmac(sha256),cbc(aes))",
4144				.cra_driver_name =
4145					"authenc-hmac-sha256-cbc-aes-chcr",
4146				.cra_blocksize	 = AES_BLOCK_SIZE,
4147				.cra_priority = CHCR_AEAD_PRIORITY,
4148				.cra_ctxsize =	sizeof(struct chcr_context) +
4149						sizeof(struct chcr_aead_ctx) +
4150						sizeof(struct chcr_authenc_ctx),
4151
4152			},
4153			.ivsize = AES_BLOCK_SIZE,
4154			.maxauthsize	= SHA256_DIGEST_SIZE,
4155			.setkey = chcr_authenc_setkey,
4156			.setauthsize = chcr_authenc_setauthsize,
4157		}
4158	},
4159	{
4160		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4161		.is_registered = 0,
4162		.alg.aead = {
4163			.base = {
4164				.cra_name = "authenc(hmac(sha224),cbc(aes))",
4165				.cra_driver_name =
4166					"authenc-hmac-sha224-cbc-aes-chcr",
4167				.cra_blocksize	 = AES_BLOCK_SIZE,
4168				.cra_priority = CHCR_AEAD_PRIORITY,
4169				.cra_ctxsize =	sizeof(struct chcr_context) +
4170						sizeof(struct chcr_aead_ctx) +
4171						sizeof(struct chcr_authenc_ctx),
4172			},
4173			.ivsize = AES_BLOCK_SIZE,
4174			.maxauthsize = SHA224_DIGEST_SIZE,
4175			.setkey = chcr_authenc_setkey,
4176			.setauthsize = chcr_authenc_setauthsize,
4177		}
4178	},
4179	{
4180		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4181		.is_registered = 0,
4182		.alg.aead = {
4183			.base = {
4184				.cra_name = "authenc(hmac(sha384),cbc(aes))",
4185				.cra_driver_name =
4186					"authenc-hmac-sha384-cbc-aes-chcr",
4187				.cra_blocksize	 = AES_BLOCK_SIZE,
4188				.cra_priority = CHCR_AEAD_PRIORITY,
4189				.cra_ctxsize =	sizeof(struct chcr_context) +
4190						sizeof(struct chcr_aead_ctx) +
4191						sizeof(struct chcr_authenc_ctx),
4192
4193			},
4194			.ivsize = AES_BLOCK_SIZE,
4195			.maxauthsize = SHA384_DIGEST_SIZE,
4196			.setkey = chcr_authenc_setkey,
4197			.setauthsize = chcr_authenc_setauthsize,
4198		}
4199	},
4200	{
4201		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4202		.is_registered = 0,
4203		.alg.aead = {
4204			.base = {
4205				.cra_name = "authenc(hmac(sha512),cbc(aes))",
4206				.cra_driver_name =
4207					"authenc-hmac-sha512-cbc-aes-chcr",
4208				.cra_blocksize	 = AES_BLOCK_SIZE,
4209				.cra_priority = CHCR_AEAD_PRIORITY,
4210				.cra_ctxsize =	sizeof(struct chcr_context) +
4211						sizeof(struct chcr_aead_ctx) +
4212						sizeof(struct chcr_authenc_ctx),
4213
4214			},
4215			.ivsize = AES_BLOCK_SIZE,
4216			.maxauthsize = SHA512_DIGEST_SIZE,
4217			.setkey = chcr_authenc_setkey,
4218			.setauthsize = chcr_authenc_setauthsize,
4219		}
4220	},
4221	{
4222		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4223		.is_registered = 0,
4224		.alg.aead = {
4225			.base = {
4226				.cra_name = "authenc(digest_null,cbc(aes))",
4227				.cra_driver_name =
4228					"authenc-digest_null-cbc-aes-chcr",
4229				.cra_blocksize	 = AES_BLOCK_SIZE,
4230				.cra_priority = CHCR_AEAD_PRIORITY,
4231				.cra_ctxsize =	sizeof(struct chcr_context) +
4232						sizeof(struct chcr_aead_ctx) +
4233						sizeof(struct chcr_authenc_ctx),
4234
4235			},
4236			.ivsize  = AES_BLOCK_SIZE,
4237			.maxauthsize = 0,
4238			.setkey  = chcr_aead_digest_null_setkey,
4239			.setauthsize = chcr_authenc_null_setauthsize,
4240		}
4241	},
4242	{
4243		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4244		.is_registered = 0,
4245		.alg.aead = {
4246			.base = {
4247				.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4248				.cra_driver_name =
4249				"authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4250				.cra_blocksize	 = 1,
4251				.cra_priority = CHCR_AEAD_PRIORITY,
4252				.cra_ctxsize =	sizeof(struct chcr_context) +
4253						sizeof(struct chcr_aead_ctx) +
4254						sizeof(struct chcr_authenc_ctx),
4255
4256			},
4257			.ivsize = CTR_RFC3686_IV_SIZE,
4258			.maxauthsize = SHA1_DIGEST_SIZE,
4259			.setkey = chcr_authenc_setkey,
4260			.setauthsize = chcr_authenc_setauthsize,
4261		}
4262	},
4263	{
4264		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4265		.is_registered = 0,
4266		.alg.aead = {
4267			.base = {
4268
4269				.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4270				.cra_driver_name =
4271				"authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4272				.cra_blocksize	 = 1,
4273				.cra_priority = CHCR_AEAD_PRIORITY,
4274				.cra_ctxsize =	sizeof(struct chcr_context) +
4275						sizeof(struct chcr_aead_ctx) +
4276						sizeof(struct chcr_authenc_ctx),
4277
4278			},
4279			.ivsize = CTR_RFC3686_IV_SIZE,
4280			.maxauthsize	= SHA256_DIGEST_SIZE,
4281			.setkey = chcr_authenc_setkey,
4282			.setauthsize = chcr_authenc_setauthsize,
4283		}
4284	},
4285	{
4286		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4287		.is_registered = 0,
4288		.alg.aead = {
4289			.base = {
4290				.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4291				.cra_driver_name =
4292				"authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4293				.cra_blocksize	 = 1,
4294				.cra_priority = CHCR_AEAD_PRIORITY,
4295				.cra_ctxsize =	sizeof(struct chcr_context) +
4296						sizeof(struct chcr_aead_ctx) +
4297						sizeof(struct chcr_authenc_ctx),
4298			},
4299			.ivsize = CTR_RFC3686_IV_SIZE,
4300			.maxauthsize = SHA224_DIGEST_SIZE,
4301			.setkey = chcr_authenc_setkey,
4302			.setauthsize = chcr_authenc_setauthsize,
4303		}
4304	},
4305	{
4306		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4307		.is_registered = 0,
4308		.alg.aead = {
4309			.base = {
4310				.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4311				.cra_driver_name =
4312				"authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4313				.cra_blocksize	 = 1,
4314				.cra_priority = CHCR_AEAD_PRIORITY,
4315				.cra_ctxsize =	sizeof(struct chcr_context) +
4316						sizeof(struct chcr_aead_ctx) +
4317						sizeof(struct chcr_authenc_ctx),
4318
4319			},
4320			.ivsize = CTR_RFC3686_IV_SIZE,
4321			.maxauthsize = SHA384_DIGEST_SIZE,
4322			.setkey = chcr_authenc_setkey,
4323			.setauthsize = chcr_authenc_setauthsize,
4324		}
4325	},
4326	{
4327		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4328		.is_registered = 0,
4329		.alg.aead = {
4330			.base = {
4331				.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4332				.cra_driver_name =
4333				"authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4334				.cra_blocksize	 = 1,
4335				.cra_priority = CHCR_AEAD_PRIORITY,
4336				.cra_ctxsize =	sizeof(struct chcr_context) +
4337						sizeof(struct chcr_aead_ctx) +
4338						sizeof(struct chcr_authenc_ctx),
4339
4340			},
4341			.ivsize = CTR_RFC3686_IV_SIZE,
4342			.maxauthsize = SHA512_DIGEST_SIZE,
4343			.setkey = chcr_authenc_setkey,
4344			.setauthsize = chcr_authenc_setauthsize,
4345		}
4346	},
4347	{
4348		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4349		.is_registered = 0,
4350		.alg.aead = {
4351			.base = {
4352				.cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4353				.cra_driver_name =
4354				"authenc-digest_null-rfc3686-ctr-aes-chcr",
4355				.cra_blocksize	 = 1,
4356				.cra_priority = CHCR_AEAD_PRIORITY,
4357				.cra_ctxsize =	sizeof(struct chcr_context) +
4358						sizeof(struct chcr_aead_ctx) +
4359						sizeof(struct chcr_authenc_ctx),
4360
4361			},
4362			.ivsize  = CTR_RFC3686_IV_SIZE,
4363			.maxauthsize = 0,
4364			.setkey  = chcr_aead_digest_null_setkey,
4365			.setauthsize = chcr_authenc_null_setauthsize,
4366		}
4367	},
 
4368};
4369
4370/*
4371 *	chcr_unregister_alg - Deregister crypto algorithms with
4372 *	kernel framework.
4373 */
4374static int chcr_unregister_alg(void)
4375{
4376	int i;
4377
4378	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4379		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4380		case CRYPTO_ALG_TYPE_SKCIPHER:
4381			if (driver_algs[i].is_registered && refcount_read(
4382			    &driver_algs[i].alg.skcipher.base.cra_refcnt)
4383			    == 1) {
4384				crypto_unregister_skcipher(
4385						&driver_algs[i].alg.skcipher);
4386				driver_algs[i].is_registered = 0;
4387			}
4388			break;
4389		case CRYPTO_ALG_TYPE_AEAD:
4390			if (driver_algs[i].is_registered && refcount_read(
4391			    &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
4392				crypto_unregister_aead(
4393						&driver_algs[i].alg.aead);
4394				driver_algs[i].is_registered = 0;
4395			}
4396			break;
4397		case CRYPTO_ALG_TYPE_AHASH:
4398			if (driver_algs[i].is_registered && refcount_read(
4399			    &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4400			    == 1) {
4401				crypto_unregister_ahash(
4402						&driver_algs[i].alg.hash);
4403				driver_algs[i].is_registered = 0;
4404			}
4405			break;
4406		}
 
4407	}
4408	return 0;
4409}
4410
4411#define SZ_AHASH_CTX sizeof(struct chcr_context)
4412#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4413#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
 
4414
4415/*
4416 *	chcr_register_alg - Register crypto algorithms with kernel framework.
4417 */
4418static int chcr_register_alg(void)
4419{
4420	struct crypto_alg ai;
4421	struct ahash_alg *a_hash;
4422	int err = 0, i;
4423	char *name = NULL;
4424
4425	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4426		if (driver_algs[i].is_registered)
4427			continue;
4428		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4429		case CRYPTO_ALG_TYPE_SKCIPHER:
4430			driver_algs[i].alg.skcipher.base.cra_priority =
4431				CHCR_CRA_PRIORITY;
4432			driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4433			driver_algs[i].alg.skcipher.base.cra_flags =
4434				CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4435				CRYPTO_ALG_ALLOCATES_MEMORY |
4436				CRYPTO_ALG_NEED_FALLBACK;
4437			driver_algs[i].alg.skcipher.base.cra_ctxsize =
4438				sizeof(struct chcr_context) +
4439				sizeof(struct ablk_ctx);
4440			driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4441
4442			err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4443			name = driver_algs[i].alg.skcipher.base.cra_driver_name;
 
4444			break;
4445		case CRYPTO_ALG_TYPE_AEAD:
4446			driver_algs[i].alg.aead.base.cra_flags =
4447				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4448				CRYPTO_ALG_ALLOCATES_MEMORY;
4449			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4450			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4451			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4452			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4453			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4454			err = crypto_register_aead(&driver_algs[i].alg.aead);
4455			name = driver_algs[i].alg.aead.base.cra_driver_name;
4456			break;
4457		case CRYPTO_ALG_TYPE_AHASH:
4458			a_hash = &driver_algs[i].alg.hash;
4459			a_hash->update = chcr_ahash_update;
4460			a_hash->final = chcr_ahash_final;
4461			a_hash->finup = chcr_ahash_finup;
4462			a_hash->digest = chcr_ahash_digest;
4463			a_hash->export = chcr_ahash_export;
4464			a_hash->import = chcr_ahash_import;
4465			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4466			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4467			a_hash->halg.base.cra_module = THIS_MODULE;
4468			a_hash->halg.base.cra_flags =
4469				CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4470			a_hash->halg.base.cra_alignmask = 0;
4471			a_hash->halg.base.cra_exit = NULL;
 
4472
4473			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4474				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4475				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4476				a_hash->init = chcr_hmac_init;
4477				a_hash->setkey = chcr_ahash_setkey;
4478				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4479			} else {
4480				a_hash->init = chcr_sha_init;
4481				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4482				a_hash->halg.base.cra_init = chcr_sha_cra_init;
4483			}
4484			err = crypto_register_ahash(&driver_algs[i].alg.hash);
4485			ai = driver_algs[i].alg.hash.halg.base;
4486			name = ai.cra_driver_name;
4487			break;
4488		}
4489		if (err) {
4490			pr_err("%s : Algorithm registration failed\n", name);
 
4491			goto register_err;
4492		} else {
4493			driver_algs[i].is_registered = 1;
4494		}
4495	}
4496	return 0;
4497
4498register_err:
4499	chcr_unregister_alg();
4500	return err;
4501}
4502
4503/*
4504 *	start_crypto - Register the crypto algorithms.
4505 *	This should called once when the first device comesup. After this
4506 *	kernel will start calling driver APIs for crypto operations.
4507 */
4508int start_crypto(void)
4509{
4510	return chcr_register_alg();
4511}
4512
4513/*
4514 *	stop_crypto - Deregister all the crypto algorithms with kernel.
4515 *	This should be called once when the last device goes down. After this
4516 *	kernel will not call the driver API for crypto operations.
4517 */
4518int stop_crypto(void)
4519{
4520	chcr_unregister_alg();
4521	return 0;
4522}
v4.17
   1/*
   2 * This file is part of the Chelsio T6 Crypto driver for Linux.
   3 *
   4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 *
  34 * Written and Maintained by:
  35 *	Manoj Malviya (manojmalviya@chelsio.com)
  36 *	Atul Gupta (atul.gupta@chelsio.com)
  37 *	Jitendra Lulla (jlulla@chelsio.com)
  38 *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  39 *	Harsh Jain (harsh@chelsio.com)
  40 */
  41
  42#define pr_fmt(fmt) "chcr:" fmt
  43
  44#include <linux/kernel.h>
  45#include <linux/module.h>
  46#include <linux/crypto.h>
  47#include <linux/cryptohash.h>
  48#include <linux/skbuff.h>
  49#include <linux/rtnetlink.h>
  50#include <linux/highmem.h>
  51#include <linux/scatterlist.h>
  52
  53#include <crypto/aes.h>
  54#include <crypto/algapi.h>
  55#include <crypto/hash.h>
  56#include <crypto/gcm.h>
  57#include <crypto/sha.h>
  58#include <crypto/authenc.h>
  59#include <crypto/ctr.h>
  60#include <crypto/gf128mul.h>
  61#include <crypto/internal/aead.h>
  62#include <crypto/null.h>
  63#include <crypto/internal/skcipher.h>
  64#include <crypto/aead.h>
  65#include <crypto/scatterwalk.h>
  66#include <crypto/internal/hash.h>
  67
  68#include "t4fw_api.h"
  69#include "t4_msg.h"
  70#include "chcr_core.h"
  71#include "chcr_algo.h"
  72#include "chcr_crypto.h"
  73
  74#define IV AES_BLOCK_SIZE
  75
  76static unsigned int sgl_ent_len[] = {
  77	0, 0, 16, 24, 40, 48, 64, 72, 88,
  78	96, 112, 120, 136, 144, 160, 168, 184,
  79	192, 208, 216, 232, 240, 256, 264, 280,
  80	288, 304, 312, 328, 336, 352, 360, 376
  81};
  82
  83static unsigned int dsgl_ent_len[] = {
  84	0, 32, 32, 48, 48, 64, 64, 80, 80,
  85	112, 112, 128, 128, 144, 144, 160, 160,
  86	192, 192, 208, 208, 224, 224, 240, 240,
  87	272, 272, 288, 288, 304, 304, 320, 320
  88};
  89
  90static u32 round_constant[11] = {
  91	0x01000000, 0x02000000, 0x04000000, 0x08000000,
  92	0x10000000, 0x20000000, 0x40000000, 0x80000000,
  93	0x1B000000, 0x36000000, 0x6C000000
  94};
  95
  96static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
  97				   unsigned char *input, int err);
  98
  99static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
 100{
 101	return ctx->crypto_ctx->aeadctx;
 102}
 103
 104static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
 105{
 106	return ctx->crypto_ctx->ablkctx;
 107}
 108
 109static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
 110{
 111	return ctx->crypto_ctx->hmacctx;
 112}
 113
 114static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
 115{
 116	return gctx->ctx->gcm;
 117}
 118
 119static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
 120{
 121	return gctx->ctx->authenc;
 122}
 123
 124static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
 125{
 126	return ctx->dev->u_ctx;
 127}
 128
 129static inline int is_ofld_imm(const struct sk_buff *skb)
 130{
 131	return (skb->len <= SGE_MAX_WR_LEN);
 132}
 133
 134static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
 135{
 136	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
 137}
 138
 139static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
 140			 unsigned int entlen,
 141			 unsigned int skip)
 142{
 143	int nents = 0;
 144	unsigned int less;
 145	unsigned int skip_len = 0;
 146
 147	while (sg && skip) {
 148		if (sg_dma_len(sg) <= skip) {
 149			skip -= sg_dma_len(sg);
 150			skip_len = 0;
 151			sg = sg_next(sg);
 152		} else {
 153			skip_len = skip;
 154			skip = 0;
 155		}
 156	}
 157
 158	while (sg && reqlen) {
 159		less = min(reqlen, sg_dma_len(sg) - skip_len);
 160		nents += DIV_ROUND_UP(less, entlen);
 161		reqlen -= less;
 162		skip_len = 0;
 163		sg = sg_next(sg);
 164	}
 165	return nents;
 166}
 167
 168static inline int get_aead_subtype(struct crypto_aead *aead)
 169{
 170	struct aead_alg *alg = crypto_aead_alg(aead);
 171	struct chcr_alg_template *chcr_crypto_alg =
 172		container_of(alg, struct chcr_alg_template, alg.aead);
 173	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 174}
 175
 176void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
 177{
 178	u8 temp[SHA512_DIGEST_SIZE];
 179	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 180	int authsize = crypto_aead_authsize(tfm);
 181	struct cpl_fw6_pld *fw6_pld;
 182	int cmp = 0;
 183
 184	fw6_pld = (struct cpl_fw6_pld *)input;
 185	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
 186	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
 187		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
 188	} else {
 189
 190		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
 191				authsize, req->assoclen +
 192				req->cryptlen - authsize);
 193		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
 194	}
 195	if (cmp)
 196		*err = -EBADMSG;
 197	else
 198		*err = 0;
 199}
 200
 201static inline void chcr_handle_aead_resp(struct aead_request *req,
 
 
 
 
 
 
 
 
 
 
 
 
 
 202					 unsigned char *input,
 203					 int err)
 204{
 205	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
 206	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 207	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
 208
 209	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
 210	if (reqctx->b0_dma)
 211		dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
 212				 reqctx->b0_len, DMA_BIDIRECTIONAL);
 213	if (reqctx->verify == VERIFY_SW) {
 214		chcr_verify_tag(req, input, &err);
 215		reqctx->verify = VERIFY_HW;
 216	}
 
 217	req->base.complete(&req->base, err);
 
 
 218}
 219
 220static void get_aes_decrypt_key(unsigned char *dec_key,
 221				       const unsigned char *key,
 222				       unsigned int keylength)
 223{
 224	u32 temp;
 225	u32 w_ring[MAX_NK];
 226	int i, j, k;
 227	u8  nr, nk;
 228
 229	switch (keylength) {
 230	case AES_KEYLENGTH_128BIT:
 231		nk = KEYLENGTH_4BYTES;
 232		nr = NUMBER_OF_ROUNDS_10;
 233		break;
 234	case AES_KEYLENGTH_192BIT:
 235		nk = KEYLENGTH_6BYTES;
 236		nr = NUMBER_OF_ROUNDS_12;
 237		break;
 238	case AES_KEYLENGTH_256BIT:
 239		nk = KEYLENGTH_8BYTES;
 240		nr = NUMBER_OF_ROUNDS_14;
 241		break;
 242	default:
 243		return;
 244	}
 245	for (i = 0; i < nk; i++)
 246		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
 247
 248	i = 0;
 249	temp = w_ring[nk - 1];
 250	while (i + nk < (nr + 1) * 4) {
 251		if (!(i % nk)) {
 252			/* RotWord(temp) */
 253			temp = (temp << 8) | (temp >> 24);
 254			temp = aes_ks_subword(temp);
 255			temp ^= round_constant[i / nk];
 256		} else if (nk == 8 && (i % 4 == 0)) {
 257			temp = aes_ks_subword(temp);
 258		}
 259		w_ring[i % nk] ^= temp;
 260		temp = w_ring[i % nk];
 261		i++;
 262	}
 263	i--;
 264	for (k = 0, j = i % nk; k < nk; k++) {
 265		*((u32 *)dec_key + k) = htonl(w_ring[j]);
 266		j--;
 267		if (j < 0)
 268			j += nk;
 269	}
 270}
 271
 272static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
 273{
 274	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
 275
 276	switch (ds) {
 277	case SHA1_DIGEST_SIZE:
 278		base_hash = crypto_alloc_shash("sha1", 0, 0);
 279		break;
 280	case SHA224_DIGEST_SIZE:
 281		base_hash = crypto_alloc_shash("sha224", 0, 0);
 282		break;
 283	case SHA256_DIGEST_SIZE:
 284		base_hash = crypto_alloc_shash("sha256", 0, 0);
 285		break;
 286	case SHA384_DIGEST_SIZE:
 287		base_hash = crypto_alloc_shash("sha384", 0, 0);
 288		break;
 289	case SHA512_DIGEST_SIZE:
 290		base_hash = crypto_alloc_shash("sha512", 0, 0);
 291		break;
 292	}
 293
 294	return base_hash;
 295}
 296
 297static int chcr_compute_partial_hash(struct shash_desc *desc,
 298				     char *iopad, char *result_hash,
 299				     int digest_size)
 300{
 301	struct sha1_state sha1_st;
 302	struct sha256_state sha256_st;
 303	struct sha512_state sha512_st;
 304	int error;
 305
 306	if (digest_size == SHA1_DIGEST_SIZE) {
 307		error = crypto_shash_init(desc) ?:
 308			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
 309			crypto_shash_export(desc, (void *)&sha1_st);
 310		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
 311	} else if (digest_size == SHA224_DIGEST_SIZE) {
 312		error = crypto_shash_init(desc) ?:
 313			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 314			crypto_shash_export(desc, (void *)&sha256_st);
 315		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 316
 317	} else if (digest_size == SHA256_DIGEST_SIZE) {
 318		error = crypto_shash_init(desc) ?:
 319			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 320			crypto_shash_export(desc, (void *)&sha256_st);
 321		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 322
 323	} else if (digest_size == SHA384_DIGEST_SIZE) {
 324		error = crypto_shash_init(desc) ?:
 325			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 326			crypto_shash_export(desc, (void *)&sha512_st);
 327		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 328
 329	} else if (digest_size == SHA512_DIGEST_SIZE) {
 330		error = crypto_shash_init(desc) ?:
 331			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 332			crypto_shash_export(desc, (void *)&sha512_st);
 333		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 334	} else {
 335		error = -EINVAL;
 336		pr_err("Unknown digest size %d\n", digest_size);
 337	}
 338	return error;
 339}
 340
 341static void chcr_change_order(char *buf, int ds)
 342{
 343	int i;
 344
 345	if (ds == SHA512_DIGEST_SIZE) {
 346		for (i = 0; i < (ds / sizeof(u64)); i++)
 347			*((__be64 *)buf + i) =
 348				cpu_to_be64(*((u64 *)buf + i));
 349	} else {
 350		for (i = 0; i < (ds / sizeof(u32)); i++)
 351			*((__be32 *)buf + i) =
 352				cpu_to_be32(*((u32 *)buf + i));
 353	}
 354}
 355
 356static inline int is_hmac(struct crypto_tfm *tfm)
 357{
 358	struct crypto_alg *alg = tfm->__crt_alg;
 359	struct chcr_alg_template *chcr_crypto_alg =
 360		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
 361			     alg.hash);
 362	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
 363		return 1;
 364	return 0;
 365}
 366
 367static inline void dsgl_walk_init(struct dsgl_walk *walk,
 368				   struct cpl_rx_phys_dsgl *dsgl)
 369{
 370	walk->dsgl = dsgl;
 371	walk->nents = 0;
 372	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
 373}
 374
 375static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
 
 376{
 377	struct cpl_rx_phys_dsgl *phys_cpl;
 378
 379	phys_cpl = walk->dsgl;
 380
 381	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
 382				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
 383	phys_cpl->pcirlxorder_to_noofsgentr =
 384		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
 385		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
 386		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
 387		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
 388		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
 389		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
 390	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
 391	phys_cpl->rss_hdr_int.qid = htons(qid);
 392	phys_cpl->rss_hdr_int.hash_val = 0;
 
 393}
 394
 395static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
 396					size_t size,
 397					dma_addr_t *addr)
 398{
 399	int j;
 400
 401	if (!size)
 402		return;
 403	j = walk->nents;
 404	walk->to->len[j % 8] = htons(size);
 405	walk->to->addr[j % 8] = cpu_to_be64(*addr);
 406	j++;
 407	if ((j % 8) == 0)
 408		walk->to++;
 409	walk->nents = j;
 410}
 411
 412static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
 413			   struct scatterlist *sg,
 414			      unsigned int slen,
 415			      unsigned int skip)
 416{
 417	int skip_len = 0;
 418	unsigned int left_size = slen, len = 0;
 419	unsigned int j = walk->nents;
 420	int offset, ent_len;
 421
 422	if (!slen)
 423		return;
 424	while (sg && skip) {
 425		if (sg_dma_len(sg) <= skip) {
 426			skip -= sg_dma_len(sg);
 427			skip_len = 0;
 428			sg = sg_next(sg);
 429		} else {
 430			skip_len = skip;
 431			skip = 0;
 432		}
 433	}
 434
 435	while (left_size && sg) {
 436		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 437		offset = 0;
 438		while (len) {
 439			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
 440			walk->to->len[j % 8] = htons(ent_len);
 441			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
 442						      offset + skip_len);
 443			offset += ent_len;
 444			len -= ent_len;
 445			j++;
 446			if ((j % 8) == 0)
 447				walk->to++;
 448		}
 449		walk->last_sg = sg;
 450		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
 451					  skip_len) + skip_len;
 452		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 453		skip_len = 0;
 454		sg = sg_next(sg);
 455	}
 456	walk->nents = j;
 457}
 458
 459static inline void ulptx_walk_init(struct ulptx_walk *walk,
 460				   struct ulptx_sgl *ulp)
 461{
 462	walk->sgl = ulp;
 463	walk->nents = 0;
 464	walk->pair_idx = 0;
 465	walk->pair = ulp->sge;
 466	walk->last_sg = NULL;
 467	walk->last_sg_len = 0;
 468}
 469
 470static inline void ulptx_walk_end(struct ulptx_walk *walk)
 471{
 472	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
 473			      ULPTX_NSGE_V(walk->nents));
 474}
 475
 476
 477static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
 478					size_t size,
 479					dma_addr_t *addr)
 480{
 481	if (!size)
 482		return;
 483
 484	if (walk->nents == 0) {
 485		walk->sgl->len0 = cpu_to_be32(size);
 486		walk->sgl->addr0 = cpu_to_be64(*addr);
 487	} else {
 488		walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
 489		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
 490		walk->pair_idx = !walk->pair_idx;
 491		if (!walk->pair_idx)
 492			walk->pair++;
 493	}
 494	walk->nents++;
 495}
 496
 497static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
 498					struct scatterlist *sg,
 499			       unsigned int len,
 500			       unsigned int skip)
 501{
 502	int small;
 503	int skip_len = 0;
 504	unsigned int sgmin;
 505
 506	if (!len)
 507		return;
 508	while (sg && skip) {
 509		if (sg_dma_len(sg) <= skip) {
 510			skip -= sg_dma_len(sg);
 511			skip_len = 0;
 512			sg = sg_next(sg);
 513		} else {
 514			skip_len = skip;
 515			skip = 0;
 516		}
 517	}
 518	WARN(!sg, "SG should not be null here\n");
 519	if (sg && (walk->nents == 0)) {
 520		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
 521		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 522		walk->sgl->len0 = cpu_to_be32(sgmin);
 523		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
 524		walk->nents++;
 525		len -= sgmin;
 526		walk->last_sg = sg;
 527		walk->last_sg_len = sgmin + skip_len;
 528		skip_len += sgmin;
 529		if (sg_dma_len(sg) == skip_len) {
 530			sg = sg_next(sg);
 531			skip_len = 0;
 532		}
 533	}
 534
 535	while (sg && len) {
 536		small = min(sg_dma_len(sg) - skip_len, len);
 537		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 538		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
 539		walk->pair->addr[walk->pair_idx] =
 540			cpu_to_be64(sg_dma_address(sg) + skip_len);
 541		walk->pair_idx = !walk->pair_idx;
 542		walk->nents++;
 543		if (!walk->pair_idx)
 544			walk->pair++;
 545		len -= sgmin;
 546		skip_len += sgmin;
 547		walk->last_sg = sg;
 548		walk->last_sg_len = skip_len;
 549		if (sg_dma_len(sg) == skip_len) {
 550			sg = sg_next(sg);
 551			skip_len = 0;
 552		}
 553	}
 554}
 555
 556static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
 557{
 558	struct crypto_alg *alg = tfm->__crt_alg;
 559	struct chcr_alg_template *chcr_crypto_alg =
 560		container_of(alg, struct chcr_alg_template, alg.crypto);
 561
 562	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 563}
 564
 565static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
 566{
 567	struct adapter *adap = netdev2adap(dev);
 568	struct sge_uld_txq_info *txq_info =
 569		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
 570	struct sge_uld_txq *txq;
 571	int ret = 0;
 572
 573	local_bh_disable();
 574	txq = &txq_info->uldtxq[idx];
 575	spin_lock(&txq->sendq.lock);
 576	if (txq->full)
 577		ret = -1;
 578	spin_unlock(&txq->sendq.lock);
 579	local_bh_enable();
 580	return ret;
 581}
 582
 583static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
 584			       struct _key_ctx *key_ctx)
 585{
 586	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
 587		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
 588	} else {
 589		memcpy(key_ctx->key,
 590		       ablkctx->key + (ablkctx->enckey_len >> 1),
 591		       ablkctx->enckey_len >> 1);
 592		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
 593		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
 594	}
 595	return 0;
 596}
 597
 598static int chcr_hash_ent_in_wr(struct scatterlist *src,
 599			     unsigned int minsg,
 600			     unsigned int space,
 601			     unsigned int srcskip)
 602{
 603	int srclen = 0;
 604	int srcsg = minsg;
 605	int soffset = 0, sless;
 606
 607	if (sg_dma_len(src) == srcskip) {
 608		src = sg_next(src);
 609		srcskip = 0;
 610	}
 611	while (src && space > (sgl_ent_len[srcsg + 1])) {
 612		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
 613							CHCR_SRC_SG_SIZE);
 614		srclen += sless;
 615		soffset += sless;
 616		srcsg++;
 617		if (sg_dma_len(src) == (soffset + srcskip)) {
 618			src = sg_next(src);
 619			soffset = 0;
 620			srcskip = 0;
 621		}
 622	}
 623	return srclen;
 624}
 625
 626static int chcr_sg_ent_in_wr(struct scatterlist *src,
 627			     struct scatterlist *dst,
 628			     unsigned int minsg,
 629			     unsigned int space,
 630			     unsigned int srcskip,
 631			     unsigned int dstskip)
 632{
 633	int srclen = 0, dstlen = 0;
 634	int srcsg = minsg, dstsg = minsg;
 635	int offset = 0, soffset = 0, less, sless = 0;
 636
 637	if (sg_dma_len(src) == srcskip) {
 638		src = sg_next(src);
 639		srcskip = 0;
 640	}
 641
 642	if (sg_dma_len(dst) == dstskip) {
 643		dst = sg_next(dst);
 644		dstskip = 0;
 645	}
 646
 647	while (src && dst &&
 648	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
 649		sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
 650				CHCR_SRC_SG_SIZE);
 651		srclen += sless;
 652		srcsg++;
 653		offset = 0;
 654		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
 655		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
 656			if (srclen <= dstlen)
 657				break;
 658			less = min_t(unsigned int, sg_dma_len(dst) - offset -
 659				     dstskip, CHCR_DST_SG_SIZE);
 660			dstlen += less;
 661			offset += less;
 662			if ((offset + dstskip) == sg_dma_len(dst)) {
 663				dst = sg_next(dst);
 664				offset = 0;
 665			}
 666			dstsg++;
 667			dstskip = 0;
 668		}
 669		soffset += sless;
 670		if ((soffset + srcskip) == sg_dma_len(src)) {
 671			src = sg_next(src);
 672			srcskip = 0;
 673			soffset = 0;
 674		}
 675
 676	}
 677	return min(srclen, dstlen);
 678}
 679
 680static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
 681				u32 flags,
 682				struct scatterlist *src,
 683				struct scatterlist *dst,
 684				unsigned int nbytes,
 685				u8 *iv,
 686				unsigned short op_type)
 687{
 
 688	int err;
 689
 690	SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
 691	skcipher_request_set_tfm(subreq, cipher);
 692	skcipher_request_set_callback(subreq, flags, NULL, NULL);
 693	skcipher_request_set_crypt(subreq, src, dst,
 694				   nbytes, iv);
 695
 696	err = op_type ? crypto_skcipher_decrypt(subreq) :
 697		crypto_skcipher_encrypt(subreq);
 698	skcipher_request_zero(subreq);
 699
 700	return err;
 701
 702}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 703static inline void create_wreq(struct chcr_context *ctx,
 704			       struct chcr_wr *chcr_req,
 705			       struct crypto_async_request *req,
 706			       unsigned int imm,
 707			       int hash_sz,
 708			       unsigned int len16,
 709			       unsigned int sc_len,
 710			       unsigned int lcb)
 711{
 712	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 713	int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
 
 
 
 
 
 
 
 
 714
 715
 716	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
 717	chcr_req->wreq.pld_size_hash_size =
 718		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
 719	chcr_req->wreq.len16_pkd =
 720		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
 721	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
 722	chcr_req->wreq.rx_chid_to_rx_q_id =
 723		FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
 724				!!lcb, ctx->tx_qidx);
 725
 726	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
 727						       qid);
 728	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
 729				     ((sizeof(chcr_req->wreq)) >> 4)));
 730
 731	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
 732	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
 733					   sizeof(chcr_req->key_ctx) + sc_len);
 734}
 735
 736/**
 737 *	create_cipher_wr - form the WR for cipher operations
 738 *	@req: cipher req.
 739 *	@ctx: crypto driver context of the request.
 740 *	@qid: ingress qid where response of this WR should be received.
 741 *	@op_type:	encryption or decryption
 742 */
 743static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
 744{
 745	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
 746	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
 
 747	struct sk_buff *skb = NULL;
 748	struct chcr_wr *chcr_req;
 749	struct cpl_rx_phys_dsgl *phys_cpl;
 750	struct ulptx_sgl *ulptx;
 751	struct chcr_blkcipher_req_ctx *reqctx =
 752		ablkcipher_request_ctx(wrparam->req);
 753	unsigned int temp = 0, transhdr_len, dst_size;
 754	int error;
 755	int nents;
 756	unsigned int kctx_len;
 757	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 758			GFP_KERNEL : GFP_ATOMIC;
 759	struct adapter *adap = padap(c_ctx(tfm)->dev);
 
 760
 761	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
 762			      reqctx->dst_ofst);
 763	dst_size = get_space_for_phys_dsgl(nents + 1);
 764	kctx_len = roundup(ablkctx->enckey_len, 16);
 765	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
 766	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
 767				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
 768	temp = reqctx->imm ? roundup(IV + wrparam->req->nbytes, 16) :
 769				     (sgl_len(nents + MIN_CIPHER_SG) * 8);
 770	transhdr_len += temp;
 771	transhdr_len = roundup(transhdr_len, 16);
 772	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
 773	if (!skb) {
 774		error = -ENOMEM;
 775		goto err;
 776	}
 777	chcr_req = __skb_put_zero(skb, transhdr_len);
 778	chcr_req->sec_cpl.op_ivinsrtofst =
 779		FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
 780
 781	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
 782	chcr_req->sec_cpl.aadstart_cipherstop_hi =
 783			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
 784
 785	chcr_req->sec_cpl.cipherstop_lo_authinsert =
 786			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
 787	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
 788							 ablkctx->ciph_mode,
 789							 0, 0, IV >> 1);
 790	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
 791							  0, 0, dst_size);
 792
 793	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
 794	if ((reqctx->op == CHCR_DECRYPT_OP) &&
 795	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
 796	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
 797	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
 798	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
 799		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
 800	} else {
 801		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
 802		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
 803			memcpy(chcr_req->key_ctx.key, ablkctx->key,
 804			       ablkctx->enckey_len);
 805		} else {
 806			memcpy(chcr_req->key_ctx.key, ablkctx->key +
 807			       (ablkctx->enckey_len >> 1),
 808			       ablkctx->enckey_len >> 1);
 809			memcpy(chcr_req->key_ctx.key +
 810			       (ablkctx->enckey_len >> 1),
 811			       ablkctx->key,
 812			       ablkctx->enckey_len >> 1);
 813		}
 814	}
 815	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
 816	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
 817	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
 818	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
 819
 820	atomic_inc(&adap->chcr_stats.cipher_rqst);
 821	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len
 822		+(reqctx->imm ? (IV + wrparam->bytes) : 0);
 823	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
 824		    transhdr_len, temp,
 825			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
 826	reqctx->skb = skb;
 827
 828	if (reqctx->op && (ablkctx->ciph_mode ==
 829			   CHCR_SCMD_CIPHER_MODE_AES_CBC))
 830		sg_pcopy_to_buffer(wrparam->req->src,
 831			sg_nents(wrparam->req->src), wrparam->req->info, 16,
 832			reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
 833
 834	return skb;
 835err:
 836	return ERR_PTR(error);
 837}
 838
 839static inline int chcr_keyctx_ck_size(unsigned int keylen)
 840{
 841	int ck_size = 0;
 842
 843	if (keylen == AES_KEYSIZE_128)
 844		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
 845	else if (keylen == AES_KEYSIZE_192)
 846		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
 847	else if (keylen == AES_KEYSIZE_256)
 848		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
 849	else
 850		ck_size = 0;
 851
 852	return ck_size;
 853}
 854static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
 855				       const u8 *key,
 856				       unsigned int keylen)
 857{
 858	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
 859	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 860	int err = 0;
 861
 862	crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
 863	crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
 864				  CRYPTO_TFM_REQ_MASK);
 865	err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
 866	tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
 867	tfm->crt_flags |=
 868		crypto_skcipher_get_flags(ablkctx->sw_cipher) &
 869		CRYPTO_TFM_RES_MASK;
 870	return err;
 871}
 872
 873static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
 874			       const u8 *key,
 875			       unsigned int keylen)
 876{
 877	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 878	unsigned int ck_size, context_size;
 879	u16 alignment = 0;
 880	int err;
 881
 882	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 883	if (err)
 884		goto badkey_err;
 885
 886	ck_size = chcr_keyctx_ck_size(keylen);
 887	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
 888	memcpy(ablkctx->key, key, keylen);
 889	ablkctx->enckey_len = keylen;
 890	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
 891	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 892			keylen + alignment) >> 4;
 893
 894	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 895						0, 0, context_size);
 896	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
 897	return 0;
 898badkey_err:
 899	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 900	ablkctx->enckey_len = 0;
 901
 902	return err;
 903}
 904
 905static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
 906				   const u8 *key,
 907				   unsigned int keylen)
 908{
 909	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 910	unsigned int ck_size, context_size;
 911	u16 alignment = 0;
 912	int err;
 913
 914	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 915	if (err)
 916		goto badkey_err;
 917	ck_size = chcr_keyctx_ck_size(keylen);
 918	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
 919	memcpy(ablkctx->key, key, keylen);
 920	ablkctx->enckey_len = keylen;
 921	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 922			keylen + alignment) >> 4;
 923
 924	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 925						0, 0, context_size);
 926	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
 927
 928	return 0;
 929badkey_err:
 930	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 931	ablkctx->enckey_len = 0;
 932
 933	return err;
 934}
 935
 936static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
 937				   const u8 *key,
 938				   unsigned int keylen)
 939{
 940	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 941	unsigned int ck_size, context_size;
 942	u16 alignment = 0;
 943	int err;
 944
 945	if (keylen < CTR_RFC3686_NONCE_SIZE)
 946		return -EINVAL;
 947	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
 948	       CTR_RFC3686_NONCE_SIZE);
 949
 950	keylen -= CTR_RFC3686_NONCE_SIZE;
 951	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 952	if (err)
 953		goto badkey_err;
 954
 955	ck_size = chcr_keyctx_ck_size(keylen);
 956	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
 957	memcpy(ablkctx->key, key, keylen);
 958	ablkctx->enckey_len = keylen;
 959	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 960			keylen + alignment) >> 4;
 961
 962	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 963						0, 0, context_size);
 964	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
 965
 966	return 0;
 967badkey_err:
 968	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 969	ablkctx->enckey_len = 0;
 970
 971	return err;
 972}
 973static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
 974{
 975	unsigned int size = AES_BLOCK_SIZE;
 976	__be32 *b = (__be32 *)(dstiv + size);
 977	u32 c, prev;
 978
 979	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
 980	for (; size >= 4; size -= 4) {
 981		prev = be32_to_cpu(*--b);
 982		c = prev + add;
 983		*b = cpu_to_be32(c);
 984		if (prev < c)
 985			break;
 986		add = 1;
 987	}
 988
 989}
 990
 991static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
 992{
 993	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
 994	u64 c;
 995	u32 temp = be32_to_cpu(*--b);
 996
 997	temp = ~temp;
 998	c = (u64)temp +  1; // No of block can processed withou overflow
 999	if ((bytes / AES_BLOCK_SIZE) > c)
1000		bytes = c * AES_BLOCK_SIZE;
1001	return bytes;
1002}
1003
1004static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1005			     u32 isfinal)
1006{
1007	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1008	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1009	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1010	struct crypto_cipher *cipher;
1011	int ret, i;
1012	u8 *key;
1013	unsigned int keylen;
1014	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1015	int round8 = round / 8;
1016
1017	cipher = ablkctx->aes_generic;
1018	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1019
1020	keylen = ablkctx->enckey_len / 2;
1021	key = ablkctx->key + keylen;
1022	ret = crypto_cipher_setkey(cipher, key, keylen);
 
 
 
 
 
 
 
1023	if (ret)
1024		goto out;
1025	/*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/
1026	for (i = 0; i < round8; i++)
1027		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1028
1029	for (i = 0; i < (round % 8); i++)
1030		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1031
1032	if (!isfinal)
1033		crypto_cipher_decrypt_one(cipher, iv, iv);
1034out:
1035	return ret;
 
1036}
1037
1038static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1039				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1040{
1041	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1042	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1043	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1044	int ret = 0;
1045
1046	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1047		ctr_add_iv(iv, req->info, (reqctx->processed /
1048			   AES_BLOCK_SIZE));
1049	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1050		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1051			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1052						AES_BLOCK_SIZE) + 1);
1053	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1054		ret = chcr_update_tweak(req, iv, 0);
1055	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1056		if (reqctx->op)
1057			/*Updated before sending last WR*/
1058			memcpy(iv, req->info, AES_BLOCK_SIZE);
1059		else
1060			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1061	}
1062
1063	return ret;
1064
1065}
1066
1067/* We need separate function for final iv because in rfc3686  Initial counter
1068 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1069 * for subsequent update requests
1070 */
1071
1072static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1073				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1074{
1075	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1076	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1077	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1078	int ret = 0;
1079
1080	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1081		ctr_add_iv(iv, req->info, (reqctx->processed /
1082			   AES_BLOCK_SIZE));
1083	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1084		ret = chcr_update_tweak(req, iv, 1);
 
 
 
 
1085	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1086		/*Already updated for Decrypt*/
1087		if (!reqctx->op)
1088			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1089
1090	}
1091	return ret;
1092
1093}
1094
1095static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1096				   unsigned char *input, int err)
1097{
1098	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 
 
 
1099	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1100	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
 
 
 
1101	struct sk_buff *skb;
1102	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1103	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1104	struct  cipher_wr_param wrparam;
1105	int bytes;
1106
1107	if (err)
1108		goto unmap;
1109	if (req->nbytes == reqctx->processed) {
1110		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1111				      req);
1112		err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1113		goto complete;
1114	}
1115
1116	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1117					    c_ctx(tfm)->tx_qidx))) {
1118		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1119			err = -EBUSY;
1120			goto unmap;
1121		}
1122
1123	}
1124	if (!reqctx->imm) {
1125		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
1126					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1127					  reqctx->src_ofst, reqctx->dst_ofst);
1128		if ((bytes + reqctx->processed) >= req->nbytes)
1129			bytes  = req->nbytes - reqctx->processed;
1130		else
1131			bytes = rounddown(bytes, 16);
1132	} else {
1133		/*CTR mode counter overfloa*/
1134		bytes  = req->nbytes - reqctx->processed;
1135	}
1136	dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1137				reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
1138	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1139	dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1140				   reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
1141	if (err)
1142		goto unmap;
1143
1144	if (unlikely(bytes == 0)) {
1145		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1146				      req);
1147		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1148				     req->base.flags,
1149				     req->src,
1150				     req->dst,
1151				     req->nbytes,
1152				     req->info,
1153				     reqctx->op);
1154		goto complete;
1155	}
1156
1157	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1158	    CRYPTO_ALG_SUB_TYPE_CTR)
1159		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1160	wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1161	wrparam.req = req;
1162	wrparam.bytes = bytes;
1163	skb = create_cipher_wr(&wrparam);
1164	if (IS_ERR(skb)) {
1165		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1166		err = PTR_ERR(skb);
1167		goto unmap;
1168	}
1169	skb->dev = u_ctx->lldi.ports[0];
1170	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1171	chcr_send_wr(skb);
1172	reqctx->last_req_len = bytes;
1173	reqctx->processed += bytes;
 
 
 
 
 
1174	return 0;
1175unmap:
1176	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1177complete:
 
 
 
 
 
 
1178	req->base.complete(&req->base, err);
1179	return err;
1180}
1181
1182static int process_cipher(struct ablkcipher_request *req,
1183				  unsigned short qid,
1184				  struct sk_buff **skb,
1185				  unsigned short op_type)
1186{
1187	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1188	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1189	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1190	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
 
1191	struct	cipher_wr_param wrparam;
1192	int bytes, err = -EINVAL;
 
1193
1194	reqctx->processed = 0;
1195	if (!req->info)
 
1196		goto error;
 
1197	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1198	    (req->nbytes == 0) ||
1199	    (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
 
 
 
 
 
1200		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1201		       ablkctx->enckey_len, req->nbytes, ivsize);
1202		goto error;
1203	}
1204	chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1205	if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
 
 
 
1206					    AES_MIN_KEY_SIZE +
1207					    sizeof(struct cpl_rx_phys_dsgl) +
1208					/*Min dsgl size*/
1209					    32))) {
1210		/* Can be sent as Imm*/
1211		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1212
1213		dnents = sg_nents_xlen(req->dst, req->nbytes,
1214				       CHCR_DST_SG_SIZE, 0);
1215		dnents += 1; // IV
1216		phys_dsgl = get_space_for_phys_dsgl(dnents);
1217		kctx_len = roundup(ablkctx->enckey_len, 16);
1218		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1219		reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1220			SGE_MAX_WR_LEN;
1221		bytes = IV + req->nbytes;
1222
1223	} else {
1224		reqctx->imm = 0;
1225	}
1226
1227	if (!reqctx->imm) {
1228		bytes = chcr_sg_ent_in_wr(req->src, req->dst,
1229					  MIN_CIPHER_SG,
1230					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1231					  0, 0);
1232		if ((bytes + reqctx->processed) >= req->nbytes)
1233			bytes  = req->nbytes - reqctx->processed;
1234		else
1235			bytes = rounddown(bytes, 16);
1236	} else {
1237		bytes = req->nbytes;
1238	}
1239	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1240	    CRYPTO_ALG_SUB_TYPE_CTR) {
1241		bytes = adjust_ctr_overflow(req->info, bytes);
1242	}
1243	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1244	    CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1245		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1246		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1247				CTR_RFC3686_IV_SIZE);
1248
1249		/* initialize counter portion of counter block */
1250		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1251			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
 
1252
1253	} else {
1254
1255		memcpy(reqctx->iv, req->info, IV);
 
1256	}
1257	if (unlikely(bytes == 0)) {
1258		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1259				      req);
1260		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1261					   req->base.flags,
1262					   req->src,
1263					   req->dst,
1264					   req->nbytes,
1265					   reqctx->iv,
1266					   op_type);
1267		goto error;
1268	}
1269	reqctx->op = op_type;
1270	reqctx->srcsg = req->src;
1271	reqctx->dstsg = req->dst;
1272	reqctx->src_ofst = 0;
1273	reqctx->dst_ofst = 0;
1274	wrparam.qid = qid;
1275	wrparam.req = req;
1276	wrparam.bytes = bytes;
1277	*skb = create_cipher_wr(&wrparam);
1278	if (IS_ERR(*skb)) {
1279		err = PTR_ERR(*skb);
1280		goto unmap;
1281	}
1282	reqctx->processed = bytes;
1283	reqctx->last_req_len = bytes;
 
1284
1285	return 0;
1286unmap:
1287	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1288error:
1289	return err;
1290}
1291
1292static int chcr_aes_encrypt(struct ablkcipher_request *req)
1293{
1294	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 
 
1295	struct sk_buff *skb = NULL;
1296	int err;
1297	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
 
 
1298
 
 
 
 
 
 
 
 
1299	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1300					    c_ctx(tfm)->tx_qidx))) {
1301		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1302			return -EBUSY;
 
1303	}
1304
1305	err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1306			     &skb, CHCR_ENCRYPT_OP);
1307	if (err || !skb)
1308		return  err;
1309	skb->dev = u_ctx->lldi.ports[0];
1310	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1311	chcr_send_wr(skb);
 
 
 
 
 
 
1312	return -EINPROGRESS;
 
 
 
1313}
1314
1315static int chcr_aes_decrypt(struct ablkcipher_request *req)
1316{
1317	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 
1318	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
 
1319	struct sk_buff *skb = NULL;
1320	int err;
 
 
 
 
 
 
 
 
 
 
 
1321
1322	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1323					    c_ctx(tfm)->tx_qidx))) {
1324		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1325			return -EBUSY;
1326	}
1327
1328	 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1329			      &skb, CHCR_DECRYPT_OP);
1330	if (err || !skb)
1331		return err;
1332	skb->dev = u_ctx->lldi.ports[0];
1333	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1334	chcr_send_wr(skb);
1335	return -EINPROGRESS;
1336}
1337
1338static int chcr_device_init(struct chcr_context *ctx)
1339{
1340	struct uld_ctx *u_ctx = NULL;
1341	struct adapter *adap;
1342	unsigned int id;
1343	int txq_perchan, txq_idx, ntxq;
1344	int err = 0, rxq_perchan, rxq_idx;
1345
1346	id = smp_processor_id();
1347	if (!ctx->dev) {
1348		u_ctx = assign_chcr_device();
1349		if (!u_ctx) {
 
1350			pr_err("chcr device assignment fails\n");
1351			goto out;
1352		}
1353		ctx->dev = u_ctx->dev;
1354		adap = padap(ctx->dev);
1355		ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
1356				    adap->vres.ncrypto_fc);
1357		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1358		txq_perchan = ntxq / u_ctx->lldi.nchan;
1359		rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
1360		rxq_idx += id % rxq_perchan;
1361		txq_idx = ctx->dev->tx_channel_id * txq_perchan;
1362		txq_idx += id % txq_perchan;
1363		spin_lock(&ctx->dev->lock_chcr_dev);
1364		ctx->rx_qidx = rxq_idx;
1365		ctx->tx_qidx = txq_idx;
1366		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1367		ctx->dev->rx_channel_id = 0;
1368		spin_unlock(&ctx->dev->lock_chcr_dev);
1369	}
1370out:
1371	return err;
1372}
1373
1374static int chcr_cra_init(struct crypto_tfm *tfm)
1375{
1376	struct crypto_alg *alg = tfm->__crt_alg;
1377	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1378	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1379
1380	ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
1381				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1382	if (IS_ERR(ablkctx->sw_cipher)) {
1383		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1384		return PTR_ERR(ablkctx->sw_cipher);
1385	}
 
 
 
1386
1387	if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1388		/* To update tweak*/
1389		ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1390		if (IS_ERR(ablkctx->aes_generic)) {
1391			pr_err("failed to allocate aes cipher for tweak\n");
1392			return PTR_ERR(ablkctx->aes_generic);
1393		}
1394	} else
1395		ablkctx->aes_generic = NULL;
1396
1397	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1398	return chcr_device_init(crypto_tfm_ctx(tfm));
1399}
1400
1401static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1402{
1403	struct crypto_alg *alg = tfm->__crt_alg;
1404	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1405	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1406
1407	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1408	 * cannot be used as fallback in chcr_handle_cipher_response
1409	 */
1410	ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1411				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1412	if (IS_ERR(ablkctx->sw_cipher)) {
1413		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1414		return PTR_ERR(ablkctx->sw_cipher);
1415	}
1416	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1417	return chcr_device_init(crypto_tfm_ctx(tfm));
 
1418}
1419
1420
1421static void chcr_cra_exit(struct crypto_tfm *tfm)
1422{
1423	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1424	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1425
1426	crypto_free_skcipher(ablkctx->sw_cipher);
1427	if (ablkctx->aes_generic)
1428		crypto_free_cipher(ablkctx->aes_generic);
1429}
1430
1431static int get_alg_config(struct algo_param *params,
1432			  unsigned int auth_size)
1433{
1434	switch (auth_size) {
1435	case SHA1_DIGEST_SIZE:
1436		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1437		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1438		params->result_size = SHA1_DIGEST_SIZE;
1439		break;
1440	case SHA224_DIGEST_SIZE:
1441		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1442		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1443		params->result_size = SHA256_DIGEST_SIZE;
1444		break;
1445	case SHA256_DIGEST_SIZE:
1446		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1447		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1448		params->result_size = SHA256_DIGEST_SIZE;
1449		break;
1450	case SHA384_DIGEST_SIZE:
1451		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1452		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1453		params->result_size = SHA512_DIGEST_SIZE;
1454		break;
1455	case SHA512_DIGEST_SIZE:
1456		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1457		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1458		params->result_size = SHA512_DIGEST_SIZE;
1459		break;
1460	default:
1461		pr_err("chcr : ERROR, unsupported digest size\n");
1462		return -EINVAL;
1463	}
1464	return 0;
1465}
1466
1467static inline void chcr_free_shash(struct crypto_shash *base_hash)
1468{
1469		crypto_free_shash(base_hash);
1470}
1471
1472/**
1473 *	create_hash_wr - Create hash work request
1474 *	@req - Cipher req base
1475 */
1476static struct sk_buff *create_hash_wr(struct ahash_request *req,
1477				      struct hash_wr_param *param)
1478{
1479	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1480	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1481	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
 
1482	struct sk_buff *skb = NULL;
1483	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1484	struct chcr_wr *chcr_req;
1485	struct ulptx_sgl *ulptx;
1486	unsigned int nents = 0, transhdr_len;
1487	unsigned int temp = 0;
1488	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1489		GFP_ATOMIC;
1490	struct adapter *adap = padap(h_ctx(tfm)->dev);
1491	int error = 0;
 
1492
1493	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1494	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1495				param->sg_len) <= SGE_MAX_WR_LEN;
1496	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1497		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1498	nents += param->bfr_len ? 1 : 0;
1499	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1500				param->sg_len, 16) : (sgl_len(nents) * 8);
1501	transhdr_len = roundup(transhdr_len, 16);
1502
1503	skb = alloc_skb(transhdr_len, flags);
1504	if (!skb)
1505		return ERR_PTR(-ENOMEM);
1506	chcr_req = __skb_put_zero(skb, transhdr_len);
1507
1508	chcr_req->sec_cpl.op_ivinsrtofst =
1509		FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
 
1510	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1511
1512	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1513		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1514	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1515		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1516	chcr_req->sec_cpl.seqno_numivs =
1517		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1518					 param->opad_needed, 0);
1519
1520	chcr_req->sec_cpl.ivgen_hdrlen =
1521		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1522
1523	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1524	       param->alg_prm.result_size);
1525
1526	if (param->opad_needed)
1527		memcpy(chcr_req->key_ctx.key +
1528		       ((param->alg_prm.result_size <= 32) ? 32 :
1529			CHCR_HASH_MAX_DIGEST_SIZE),
1530		       hmacctx->opad, param->alg_prm.result_size);
1531
1532	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1533					    param->alg_prm.mk_size, 0,
1534					    param->opad_needed,
1535					    ((param->kctx_len +
1536					     sizeof(chcr_req->key_ctx)) >> 4));
1537	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1538	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1539				     DUMMY_BYTES);
1540	if (param->bfr_len != 0) {
1541		req_ctx->hctx_wr.dma_addr =
1542			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1543				       param->bfr_len, DMA_TO_DEVICE);
1544		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1545				       req_ctx->hctx_wr. dma_addr)) {
1546			error = -ENOMEM;
1547			goto err;
1548		}
1549		req_ctx->hctx_wr.dma_len = param->bfr_len;
1550	} else {
1551		req_ctx->hctx_wr.dma_addr = 0;
1552	}
1553	chcr_add_hash_src_ent(req, ulptx, param);
1554	/* Request upto max wr size */
1555	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1556				(param->sg_len + param->bfr_len) : 0);
1557	atomic_inc(&adap->chcr_stats.digest_rqst);
1558	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1559		    param->hash_size, transhdr_len,
1560		    temp,  0);
1561	req_ctx->hctx_wr.skb = skb;
1562	return skb;
1563err:
1564	kfree_skb(skb);
1565	return  ERR_PTR(error);
1566}
1567
1568static int chcr_ahash_update(struct ahash_request *req)
1569{
1570	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1571	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1572	struct uld_ctx *u_ctx = NULL;
 
 
1573	struct sk_buff *skb;
1574	u8 remainder = 0, bs;
1575	unsigned int nbytes = req->nbytes;
1576	struct hash_wr_param params;
1577	int error;
 
 
 
 
 
 
1578
1579	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1580	u_ctx = ULD_CTX(h_ctx(rtfm));
1581	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1582					    h_ctx(rtfm)->tx_qidx))) {
1583		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1584			return -EBUSY;
1585	}
1586
1587	if (nbytes + req_ctx->reqlen >= bs) {
1588		remainder = (nbytes + req_ctx->reqlen) % bs;
1589		nbytes = nbytes + req_ctx->reqlen - remainder;
1590	} else {
1591		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1592				   + req_ctx->reqlen, nbytes, 0);
1593		req_ctx->reqlen += nbytes;
1594		return 0;
1595	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1596	chcr_init_hctx_per_wr(req_ctx);
1597	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1598	if (error)
1599		return -ENOMEM;
 
 
1600	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1601	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1602	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1603				     HASH_SPACE_LEFT(params.kctx_len), 0);
1604	if (params.sg_len > req->nbytes)
1605		params.sg_len = req->nbytes;
1606	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1607			req_ctx->reqlen;
1608	params.opad_needed = 0;
1609	params.more = 1;
1610	params.last = 0;
1611	params.bfr_len = req_ctx->reqlen;
1612	params.scmd1 = 0;
1613	req_ctx->hctx_wr.srcsg = req->src;
1614
1615	params.hash_size = params.alg_prm.result_size;
1616	req_ctx->data_len += params.sg_len + params.bfr_len;
1617	skb = create_hash_wr(req, &params);
1618	if (IS_ERR(skb)) {
1619		error = PTR_ERR(skb);
1620		goto unmap;
1621	}
1622
1623	req_ctx->hctx_wr.processed += params.sg_len;
1624	if (remainder) {
1625		/* Swap buffers */
1626		swap(req_ctx->reqbfr, req_ctx->skbfr);
1627		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1628				   req_ctx->reqbfr, remainder, req->nbytes -
1629				   remainder);
1630	}
1631	req_ctx->reqlen = remainder;
1632	skb->dev = u_ctx->lldi.ports[0];
1633	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1634	chcr_send_wr(skb);
1635
1636	return -EINPROGRESS;
1637unmap:
1638	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
 
 
1639	return error;
1640}
1641
1642static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1643{
1644	memset(bfr_ptr, 0, bs);
1645	*bfr_ptr = 0x80;
1646	if (bs == 64)
1647		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1648	else
1649		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1650}
1651
1652static int chcr_ahash_final(struct ahash_request *req)
1653{
1654	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1655	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
 
1656	struct hash_wr_param params;
1657	struct sk_buff *skb;
1658	struct uld_ctx *u_ctx = NULL;
 
1659	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
 
 
 
 
 
 
 
 
 
 
 
1660
1661	chcr_init_hctx_per_wr(req_ctx);
1662	u_ctx = ULD_CTX(h_ctx(rtfm));
1663	if (is_hmac(crypto_ahash_tfm(rtfm)))
1664		params.opad_needed = 1;
1665	else
1666		params.opad_needed = 0;
1667	params.sg_len = 0;
1668	req_ctx->hctx_wr.isfinal = 1;
1669	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1670	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1671	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1672		params.opad_needed = 1;
1673		params.kctx_len *= 2;
1674	} else {
1675		params.opad_needed = 0;
1676	}
1677
1678	req_ctx->hctx_wr.result = 1;
1679	params.bfr_len = req_ctx->reqlen;
1680	req_ctx->data_len += params.bfr_len + params.sg_len;
1681	req_ctx->hctx_wr.srcsg = req->src;
1682	if (req_ctx->reqlen == 0) {
1683		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1684		params.last = 0;
1685		params.more = 1;
1686		params.scmd1 = 0;
1687		params.bfr_len = bs;
1688
1689	} else {
1690		params.scmd1 = req_ctx->data_len;
1691		params.last = 1;
1692		params.more = 0;
1693	}
1694	params.hash_size = crypto_ahash_digestsize(rtfm);
1695	skb = create_hash_wr(req, &params);
1696	if (IS_ERR(skb))
1697		return PTR_ERR(skb);
 
 
1698	req_ctx->reqlen = 0;
1699	skb->dev = u_ctx->lldi.ports[0];
1700	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1701	chcr_send_wr(skb);
1702	return -EINPROGRESS;
 
 
 
1703}
1704
1705static int chcr_ahash_finup(struct ahash_request *req)
1706{
1707	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1708	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1709	struct uld_ctx *u_ctx = NULL;
 
 
1710	struct sk_buff *skb;
1711	struct hash_wr_param params;
1712	u8  bs;
1713	int error;
 
 
 
 
 
 
1714
1715	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1716	u_ctx = ULD_CTX(h_ctx(rtfm));
 
 
1717
1718	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1719					    h_ctx(rtfm)->tx_qidx))) {
1720		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1721			return -EBUSY;
 
1722	}
1723	chcr_init_hctx_per_wr(req_ctx);
1724	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1725	if (error)
1726		return -ENOMEM;
 
 
1727
1728	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1729	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1730	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1731		params.kctx_len *= 2;
1732		params.opad_needed = 1;
1733	} else {
1734		params.opad_needed = 0;
1735	}
1736
1737	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1738				    HASH_SPACE_LEFT(params.kctx_len), 0);
1739	if (params.sg_len < req->nbytes) {
1740		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1741			params.kctx_len /= 2;
1742			params.opad_needed = 0;
1743		}
1744		params.last = 0;
1745		params.more = 1;
1746		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1747					- req_ctx->reqlen;
1748		params.hash_size = params.alg_prm.result_size;
1749		params.scmd1 = 0;
1750	} else {
1751		params.last = 1;
1752		params.more = 0;
1753		params.sg_len = req->nbytes;
1754		params.hash_size = crypto_ahash_digestsize(rtfm);
1755		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1756				params.sg_len;
1757	}
1758	params.bfr_len = req_ctx->reqlen;
1759	req_ctx->data_len += params.bfr_len + params.sg_len;
1760	req_ctx->hctx_wr.result = 1;
1761	req_ctx->hctx_wr.srcsg = req->src;
1762	if ((req_ctx->reqlen + req->nbytes) == 0) {
1763		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1764		params.last = 0;
1765		params.more = 1;
1766		params.scmd1 = 0;
1767		params.bfr_len = bs;
1768	}
1769	skb = create_hash_wr(req, &params);
1770	if (IS_ERR(skb)) {
1771		error = PTR_ERR(skb);
1772		goto unmap;
1773	}
1774	req_ctx->reqlen = 0;
1775	req_ctx->hctx_wr.processed += params.sg_len;
1776	skb->dev = u_ctx->lldi.ports[0];
1777	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1778	chcr_send_wr(skb);
1779
1780	return -EINPROGRESS;
1781unmap:
1782	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
 
 
1783	return error;
1784}
1785
1786static int chcr_ahash_digest(struct ahash_request *req)
1787{
1788	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1789	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1790	struct uld_ctx *u_ctx = NULL;
 
 
1791	struct sk_buff *skb;
1792	struct hash_wr_param params;
1793	u8  bs;
1794	int error;
 
 
 
 
 
 
1795
1796	rtfm->init(req);
1797	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
 
 
 
1798
1799	u_ctx = ULD_CTX(h_ctx(rtfm));
1800	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1801					    h_ctx(rtfm)->tx_qidx))) {
1802		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1803			return -EBUSY;
 
1804	}
1805
1806	chcr_init_hctx_per_wr(req_ctx);
1807	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1808	if (error)
1809		return -ENOMEM;
 
 
1810
1811	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1812	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1813	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1814		params.kctx_len *= 2;
1815		params.opad_needed = 1;
1816	} else {
1817		params.opad_needed = 0;
1818	}
1819	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1820				HASH_SPACE_LEFT(params.kctx_len), 0);
1821	if (params.sg_len < req->nbytes) {
1822		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1823			params.kctx_len /= 2;
1824			params.opad_needed = 0;
1825		}
1826		params.last = 0;
1827		params.more = 1;
1828		params.scmd1 = 0;
1829		params.sg_len = rounddown(params.sg_len, bs);
1830		params.hash_size = params.alg_prm.result_size;
1831	} else {
1832		params.sg_len = req->nbytes;
1833		params.hash_size = crypto_ahash_digestsize(rtfm);
1834		params.last = 1;
1835		params.more = 0;
1836		params.scmd1 = req->nbytes + req_ctx->data_len;
1837
1838	}
1839	params.bfr_len = 0;
1840	req_ctx->hctx_wr.result = 1;
1841	req_ctx->hctx_wr.srcsg = req->src;
1842	req_ctx->data_len += params.bfr_len + params.sg_len;
1843
1844	if (req->nbytes == 0) {
1845		create_last_hash_block(req_ctx->reqbfr, bs, 0);
1846		params.more = 1;
1847		params.bfr_len = bs;
1848	}
1849
1850	skb = create_hash_wr(req, &params);
1851	if (IS_ERR(skb)) {
1852		error = PTR_ERR(skb);
1853		goto unmap;
1854	}
1855	req_ctx->hctx_wr.processed += params.sg_len;
1856	skb->dev = u_ctx->lldi.ports[0];
1857	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1858	chcr_send_wr(skb);
1859	return -EINPROGRESS;
1860unmap:
1861	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
 
 
1862	return error;
1863}
1864
1865static int chcr_ahash_continue(struct ahash_request *req)
1866{
1867	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1868	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1869	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1870	struct uld_ctx *u_ctx = NULL;
 
1871	struct sk_buff *skb;
1872	struct hash_wr_param params;
1873	u8  bs;
1874	int error;
 
 
 
 
 
 
1875
1876	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1877	u_ctx = ULD_CTX(h_ctx(rtfm));
1878	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1879					    h_ctx(rtfm)->tx_qidx))) {
1880		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1881			return -EBUSY;
1882	}
1883	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1884	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1885	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1886		params.kctx_len *= 2;
1887		params.opad_needed = 1;
1888	} else {
1889		params.opad_needed = 0;
1890	}
1891	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
1892					    HASH_SPACE_LEFT(params.kctx_len),
1893					    hctx_wr->src_ofst);
1894	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
1895		params.sg_len = req->nbytes - hctx_wr->processed;
1896	if (!hctx_wr->result ||
1897	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
1898		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1899			params.kctx_len /= 2;
1900			params.opad_needed = 0;
1901		}
1902		params.last = 0;
1903		params.more = 1;
1904		params.sg_len = rounddown(params.sg_len, bs);
1905		params.hash_size = params.alg_prm.result_size;
1906		params.scmd1 = 0;
1907	} else {
1908		params.last = 1;
1909		params.more = 0;
1910		params.hash_size = crypto_ahash_digestsize(rtfm);
1911		params.scmd1 = reqctx->data_len + params.sg_len;
1912	}
1913	params.bfr_len = 0;
1914	reqctx->data_len += params.sg_len;
1915	skb = create_hash_wr(req, &params);
1916	if (IS_ERR(skb)) {
1917		error = PTR_ERR(skb);
1918		goto err;
1919	}
1920	hctx_wr->processed += params.sg_len;
1921	skb->dev = u_ctx->lldi.ports[0];
1922	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1923	chcr_send_wr(skb);
1924	return 0;
1925err:
1926	return error;
1927}
1928
1929static inline void chcr_handle_ahash_resp(struct ahash_request *req,
1930					  unsigned char *input,
1931					  int err)
1932{
1933	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1934	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1935	int digestsize, updated_digestsize;
1936	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1937	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
 
1938
1939	if (input == NULL)
1940		goto out;
1941	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1942	updated_digestsize = digestsize;
1943	if (digestsize == SHA224_DIGEST_SIZE)
1944		updated_digestsize = SHA256_DIGEST_SIZE;
1945	else if (digestsize == SHA384_DIGEST_SIZE)
1946		updated_digestsize = SHA512_DIGEST_SIZE;
1947
1948	if (hctx_wr->dma_addr) {
1949		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
1950				 hctx_wr->dma_len, DMA_TO_DEVICE);
1951		hctx_wr->dma_addr = 0;
1952	}
1953	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
1954				 req->nbytes)) {
1955		if (hctx_wr->result == 1) {
1956			hctx_wr->result = 0;
1957			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
1958			       digestsize);
1959		} else {
1960			memcpy(reqctx->partial_hash,
1961			       input + sizeof(struct cpl_fw6_pld),
1962			       updated_digestsize);
1963
1964		}
1965		goto unmap;
1966	}
1967	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
1968	       updated_digestsize);
1969
1970	err = chcr_ahash_continue(req);
1971	if (err)
1972		goto unmap;
1973	return;
1974unmap:
1975	if (hctx_wr->is_sg_map)
1976		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1977
1978
1979out:
 
1980	req->base.complete(&req->base, err);
1981}
1982
1983/*
1984 *	chcr_handle_resp - Unmap the DMA buffers associated with the request
1985 *	@req: crypto request
1986 */
1987int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
1988			 int err)
1989{
1990	struct crypto_tfm *tfm = req->tfm;
1991	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1992	struct adapter *adap = padap(ctx->dev);
1993
1994	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
1995	case CRYPTO_ALG_TYPE_AEAD:
1996		chcr_handle_aead_resp(aead_request_cast(req), input, err);
1997		break;
1998
1999	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2000		 err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
2001					       input, err);
2002		break;
2003
2004	case CRYPTO_ALG_TYPE_AHASH:
2005		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2006		}
2007	atomic_inc(&adap->chcr_stats.complete);
2008	return err;
2009}
2010static int chcr_ahash_export(struct ahash_request *areq, void *out)
2011{
2012	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2013	struct chcr_ahash_req_ctx *state = out;
2014
2015	state->reqlen = req_ctx->reqlen;
2016	state->data_len = req_ctx->data_len;
2017	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2018	memcpy(state->partial_hash, req_ctx->partial_hash,
2019	       CHCR_HASH_MAX_DIGEST_SIZE);
2020	chcr_init_hctx_per_wr(state);
2021		return 0;
2022}
2023
2024static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2025{
2026	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2027	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2028
2029	req_ctx->reqlen = state->reqlen;
2030	req_ctx->data_len = state->data_len;
2031	req_ctx->reqbfr = req_ctx->bfr1;
2032	req_ctx->skbfr = req_ctx->bfr2;
2033	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2034	memcpy(req_ctx->partial_hash, state->partial_hash,
2035	       CHCR_HASH_MAX_DIGEST_SIZE);
2036	chcr_init_hctx_per_wr(req_ctx);
2037	return 0;
2038}
2039
2040static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2041			     unsigned int keylen)
2042{
2043	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2044	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2045	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2046	unsigned int i, err = 0, updated_digestsize;
2047
2048	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2049
2050	/* use the key to calculate the ipad and opad. ipad will sent with the
2051	 * first request's data. opad will be sent with the final hash result
2052	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2053	 */
2054	shash->tfm = hmacctx->base_hash;
2055	shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
2056	if (keylen > bs) {
2057		err = crypto_shash_digest(shash, key, keylen,
2058					  hmacctx->ipad);
2059		if (err)
2060			goto out;
2061		keylen = digestsize;
2062	} else {
2063		memcpy(hmacctx->ipad, key, keylen);
2064	}
2065	memset(hmacctx->ipad + keylen, 0, bs - keylen);
2066	memcpy(hmacctx->opad, hmacctx->ipad, bs);
2067
2068	for (i = 0; i < bs / sizeof(int); i++) {
2069		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2070		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2071	}
2072
2073	updated_digestsize = digestsize;
2074	if (digestsize == SHA224_DIGEST_SIZE)
2075		updated_digestsize = SHA256_DIGEST_SIZE;
2076	else if (digestsize == SHA384_DIGEST_SIZE)
2077		updated_digestsize = SHA512_DIGEST_SIZE;
2078	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2079					hmacctx->ipad, digestsize);
2080	if (err)
2081		goto out;
2082	chcr_change_order(hmacctx->ipad, updated_digestsize);
2083
2084	err = chcr_compute_partial_hash(shash, hmacctx->opad,
2085					hmacctx->opad, digestsize);
2086	if (err)
2087		goto out;
2088	chcr_change_order(hmacctx->opad, updated_digestsize);
2089out:
2090	return err;
2091}
2092
2093static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
2094			       unsigned int key_len)
2095{
2096	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2097	unsigned short context_size = 0;
2098	int err;
2099
2100	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2101	if (err)
2102		goto badkey_err;
2103
2104	memcpy(ablkctx->key, key, key_len);
2105	ablkctx->enckey_len = key_len;
2106	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2107	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2108	ablkctx->key_ctx_hdr =
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2109		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2110				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2111				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2112				 CHCR_KEYCTX_NO_KEY, 1,
2113				 0, context_size);
 
2114	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2115	return 0;
2116badkey_err:
2117	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2118	ablkctx->enckey_len = 0;
2119
2120	return err;
2121}
2122
2123static int chcr_sha_init(struct ahash_request *areq)
2124{
2125	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2126	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2127	int digestsize =  crypto_ahash_digestsize(tfm);
2128
2129	req_ctx->data_len = 0;
2130	req_ctx->reqlen = 0;
2131	req_ctx->reqbfr = req_ctx->bfr1;
2132	req_ctx->skbfr = req_ctx->bfr2;
2133	copy_hash_init_values(req_ctx->partial_hash, digestsize);
2134
2135	return 0;
2136}
2137
2138static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2139{
2140	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2141				 sizeof(struct chcr_ahash_req_ctx));
2142	return chcr_device_init(crypto_tfm_ctx(tfm));
2143}
2144
2145static int chcr_hmac_init(struct ahash_request *areq)
2146{
2147	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2148	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2149	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2150	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2151	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2152
2153	chcr_sha_init(areq);
2154	req_ctx->data_len = bs;
2155	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2156		if (digestsize == SHA224_DIGEST_SIZE)
2157			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2158			       SHA256_DIGEST_SIZE);
2159		else if (digestsize == SHA384_DIGEST_SIZE)
2160			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2161			       SHA512_DIGEST_SIZE);
2162		else
2163			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2164			       digestsize);
2165	}
2166	return 0;
2167}
2168
2169static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2170{
2171	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2172	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2173	unsigned int digestsize =
2174		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2175
2176	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2177				 sizeof(struct chcr_ahash_req_ctx));
2178	hmacctx->base_hash = chcr_alloc_shash(digestsize);
2179	if (IS_ERR(hmacctx->base_hash))
2180		return PTR_ERR(hmacctx->base_hash);
2181	return chcr_device_init(crypto_tfm_ctx(tfm));
2182}
2183
2184static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2185{
2186	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2187	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2188
2189	if (hmacctx->base_hash) {
2190		chcr_free_shash(hmacctx->base_hash);
2191		hmacctx->base_hash = NULL;
2192	}
2193}
2194
2195static int chcr_aead_common_init(struct aead_request *req,
2196				 unsigned short op_type)
 
 
 
 
 
 
 
 
2197{
2198	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2199	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2200	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
 
2201	int error = -EINVAL;
2202	unsigned int authsize = crypto_aead_authsize(tfm);
2203
2204	/* validate key size */
2205	if (aeadctx->enckey_len == 0)
2206		goto err;
2207	if (op_type && req->cryptlen < authsize)
2208		goto err;
 
 
 
 
 
2209	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2210				  op_type);
2211	if (error) {
2212		error = -ENOMEM;
2213		goto err;
2214	}
2215	reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
2216					  CHCR_SRC_SG_SIZE, 0);
2217	reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
2218					  CHCR_SRC_SG_SIZE, req->assoclen);
2219	return 0;
2220err:
2221	return error;
2222}
2223
2224static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2225				   int aadmax, int wrlen,
2226				   unsigned short op_type)
2227{
2228	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2229
2230	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2231	    dst_nents > MAX_DSGL_ENT ||
2232	    (req->assoclen > aadmax) ||
2233	    (wrlen > SGE_MAX_WR_LEN))
2234		return 1;
2235	return 0;
2236}
2237
2238static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2239{
2240	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2241	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2242	struct aead_request *subreq = aead_request_ctx(req);
2243
2244	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2245	aead_request_set_callback(subreq, req->base.flags,
2246				  req->base.complete, req->base.data);
2247	 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2248				 req->iv);
2249	 aead_request_set_ad(subreq, req->assoclen);
2250	return op_type ? crypto_aead_decrypt(subreq) :
2251		crypto_aead_encrypt(subreq);
2252}
2253
2254static struct sk_buff *create_authenc_wr(struct aead_request *req,
2255					 unsigned short qid,
2256					 int size,
2257					 unsigned short op_type)
2258{
2259	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2260	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 
2261	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2262	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2263	struct sk_buff *skb = NULL;
2264	struct chcr_wr *chcr_req;
2265	struct cpl_rx_phys_dsgl *phys_cpl;
2266	struct ulptx_sgl *ulptx;
2267	unsigned int transhdr_len;
2268	unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2269	unsigned int   kctx_len = 0, dnents;
2270	unsigned int  assoclen = req->assoclen;
2271	unsigned int  authsize = crypto_aead_authsize(tfm);
2272	int error = -EINVAL;
 
2273	int null = 0;
2274	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2275		GFP_ATOMIC;
2276	struct adapter *adap = padap(a_ctx(tfm)->dev);
 
2277
2278	if (req->cryptlen == 0)
2279		return NULL;
2280
2281	reqctx->b0_dma = 0;
 
 
 
 
2282	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2283	subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2284		null = 1;
2285		assoclen = 0;
2286	}
2287	error = chcr_aead_common_init(req, op_type);
2288	if (error)
2289		return ERR_PTR(error);
2290	dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2291	dnents += sg_nents_xlen(req->dst, req->cryptlen +
2292		(op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
2293		req->assoclen);
2294	dnents += MIN_AUTH_SG; // For IV
2295
 
2296	dst_size = get_space_for_phys_dsgl(dnents);
2297	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2298		- sizeof(chcr_req->key_ctx);
2299	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2300	reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
2301			SGE_MAX_WR_LEN;
2302	temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
2303			: (sgl_len(reqctx->src_nents + reqctx->aad_nents
2304			+ MIN_GCM_SG) * 8);
2305	transhdr_len += temp;
2306	transhdr_len = roundup(transhdr_len, 16);
2307
2308	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2309				    transhdr_len, op_type)) {
2310		atomic_inc(&adap->chcr_stats.fallback);
2311		chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2312				    op_type);
2313		return ERR_PTR(chcr_aead_fallback(req, op_type));
2314	}
2315	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2316	if (!skb) {
2317		error = -ENOMEM;
2318		goto err;
2319	}
2320
2321	chcr_req = __skb_put_zero(skb, transhdr_len);
2322
2323	temp  = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2324
2325	/*
2326	 * Input order	is AAD,IV and Payload. where IV should be included as
2327	 * the part of authdata. All other fields should be filled according
2328	 * to the hardware spec
2329	 */
2330	chcr_req->sec_cpl.op_ivinsrtofst =
2331		FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
2332				       assoclen + 1);
2333	chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
2334	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2335					assoclen ? 1 : 0, assoclen,
2336					assoclen + IV + 1,
 
2337					(temp & 0x1F0) >> 4);
2338	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2339					temp & 0xF,
2340					null ? 0 : assoclen + IV + 1,
2341					temp, temp);
2342	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2343	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2344		temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2345	else
2346		temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2347	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2348					(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
2349					temp,
2350					actx->auth_mode, aeadctx->hmac_ctrl,
2351					IV >> 1);
2352	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2353					 0, 0, dst_size);
2354
2355	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2356	if (op_type == CHCR_ENCRYPT_OP ||
2357		subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2358		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2359		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2360		       aeadctx->enckey_len);
2361	else
2362		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2363		       aeadctx->enckey_len);
2364
2365	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2366	       actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
 
 
 
2367	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2368	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2369		memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2370		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
2371				CTR_RFC3686_IV_SIZE);
2372		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
2373			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2374	} else {
2375		memcpy(reqctx->iv, req->iv, IV);
2376	}
2377	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2378	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2379	chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
2380	chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
2381	atomic_inc(&adap->chcr_stats.cipher_rqst);
2382	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2383		kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
2384	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2385		   transhdr_len, temp, 0);
2386	reqctx->skb = skb;
2387	reqctx->op = op_type;
2388
2389	return skb;
2390err:
2391	chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2392			    op_type);
2393
2394	return ERR_PTR(error);
2395}
2396
2397int chcr_aead_dma_map(struct device *dev,
2398		      struct aead_request *req,
2399		      unsigned short op_type)
2400{
2401	int error;
2402	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2403	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2404	unsigned int authsize = crypto_aead_authsize(tfm);
2405	int dst_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
2406
2407	dst_size = req->assoclen + req->cryptlen + (op_type ?
2408				-authsize : authsize);
2409	if (!req->cryptlen || !dst_size)
2410		return 0;
2411	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
2412					DMA_BIDIRECTIONAL);
2413	if (dma_mapping_error(dev, reqctx->iv_dma))
2414		return -ENOMEM;
2415
 
 
 
2416	if (req->src == req->dst) {
2417		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2418				   DMA_BIDIRECTIONAL);
 
2419		if (!error)
2420			goto err;
2421	} else {
2422		error = dma_map_sg(dev, req->src, sg_nents(req->src),
 
2423				   DMA_TO_DEVICE);
2424		if (!error)
2425			goto err;
2426		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
 
2427				   DMA_FROM_DEVICE);
2428		if (!error) {
2429			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2430				   DMA_TO_DEVICE);
 
2431			goto err;
2432		}
2433	}
2434
2435	return 0;
2436err:
2437	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2438	return -ENOMEM;
2439}
2440
2441void chcr_aead_dma_unmap(struct device *dev,
2442			 struct aead_request *req,
2443			 unsigned short op_type)
2444{
2445	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2446	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2447	unsigned int authsize = crypto_aead_authsize(tfm);
2448	int dst_size;
2449
2450	dst_size = req->assoclen + req->cryptlen + (op_type ?
2451					-authsize : authsize);
2452	if (!req->cryptlen || !dst_size)
 
 
 
 
 
 
 
 
 
 
 
2453		return;
2454
2455	dma_unmap_single(dev, reqctx->iv_dma, IV,
2456					DMA_BIDIRECTIONAL);
2457	if (req->src == req->dst) {
2458		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2459				   DMA_BIDIRECTIONAL);
 
2460	} else {
2461		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2462				   DMA_TO_DEVICE);
2463		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2464				   DMA_FROM_DEVICE);
 
 
2465	}
2466}
2467
2468void chcr_add_aead_src_ent(struct aead_request *req,
2469			   struct ulptx_sgl *ulptx,
2470			   unsigned int assoclen,
2471			   unsigned short op_type)
2472{
2473	struct ulptx_walk ulp_walk;
2474	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2475
2476	if (reqctx->imm) {
2477		u8 *buf = (u8 *)ulptx;
2478
2479		if (reqctx->b0_dma) {
2480			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2481			buf += reqctx->b0_len;
2482		}
2483		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2484				   buf, assoclen, 0);
2485		buf += assoclen;
2486		memcpy(buf, reqctx->iv, IV);
2487		buf += IV;
2488		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2489				   buf, req->cryptlen, req->assoclen);
2490	} else {
2491		ulptx_walk_init(&ulp_walk, ulptx);
2492		if (reqctx->b0_dma)
2493			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2494					    &reqctx->b0_dma);
2495		ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
2496		ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
2497		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
2498				  req->assoclen);
2499		ulptx_walk_end(&ulp_walk);
2500	}
2501}
2502
2503void chcr_add_aead_dst_ent(struct aead_request *req,
2504			   struct cpl_rx_phys_dsgl *phys_cpl,
2505			   unsigned int assoclen,
2506			   unsigned short op_type,
2507			   unsigned short qid)
2508{
2509	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2510	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2511	struct dsgl_walk dsgl_walk;
2512	unsigned int authsize = crypto_aead_authsize(tfm);
 
2513	u32 temp;
 
2514
2515	dsgl_walk_init(&dsgl_walk, phys_cpl);
2516	if (reqctx->b0_dma)
2517		dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
2518	dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
2519	dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2520	temp = req->cryptlen + (op_type ? -authsize : authsize);
2521	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
2522	dsgl_walk_end(&dsgl_walk, qid);
2523}
2524
2525void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2526			     struct ulptx_sgl *ulptx,
2527			     struct  cipher_wr_param *wrparam)
2528{
2529	struct ulptx_walk ulp_walk;
2530	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
 
2531
 
 
2532	if (reqctx->imm) {
2533		u8 *buf = (u8 *)ulptx;
2534
2535		memcpy(buf, reqctx->iv, IV);
2536		buf += IV;
2537		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2538				   buf, wrparam->bytes, reqctx->processed);
2539	} else {
2540		ulptx_walk_init(&ulp_walk, ulptx);
2541		ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
2542		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2543				  reqctx->src_ofst);
2544		reqctx->srcsg = ulp_walk.last_sg;
2545		reqctx->src_ofst = ulp_walk.last_sg_len;
2546		ulptx_walk_end(&ulp_walk);
2547	}
2548}
2549
2550void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2551			     struct cpl_rx_phys_dsgl *phys_cpl,
2552			     struct  cipher_wr_param *wrparam,
2553			     unsigned short qid)
2554{
2555	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
 
 
2556	struct dsgl_walk dsgl_walk;
 
2557
2558	dsgl_walk_init(&dsgl_walk, phys_cpl);
2559	dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2560	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2561			 reqctx->dst_ofst);
2562	reqctx->dstsg = dsgl_walk.last_sg;
2563	reqctx->dst_ofst = dsgl_walk.last_sg_len;
2564
2565	dsgl_walk_end(&dsgl_walk, qid);
2566}
2567
2568void chcr_add_hash_src_ent(struct ahash_request *req,
2569			   struct ulptx_sgl *ulptx,
2570			   struct hash_wr_param *param)
2571{
2572	struct ulptx_walk ulp_walk;
2573	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2574
2575	if (reqctx->hctx_wr.imm) {
2576		u8 *buf = (u8 *)ulptx;
2577
2578		if (param->bfr_len) {
2579			memcpy(buf, reqctx->reqbfr, param->bfr_len);
2580			buf += param->bfr_len;
2581		}
2582
2583		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2584				   sg_nents(reqctx->hctx_wr.srcsg), buf,
2585				   param->sg_len, 0);
2586	} else {
2587		ulptx_walk_init(&ulp_walk, ulptx);
2588		if (param->bfr_len)
2589			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2590					    &reqctx->hctx_wr.dma_addr);
2591		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2592				  param->sg_len, reqctx->hctx_wr.src_ofst);
2593		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2594		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2595		ulptx_walk_end(&ulp_walk);
2596	}
2597}
2598
2599int chcr_hash_dma_map(struct device *dev,
2600		      struct ahash_request *req)
2601{
2602	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2603	int error = 0;
2604
2605	if (!req->nbytes)
2606		return 0;
2607	error = dma_map_sg(dev, req->src, sg_nents(req->src),
2608			   DMA_TO_DEVICE);
2609	if (!error)
2610		return -ENOMEM;
2611	req_ctx->hctx_wr.is_sg_map = 1;
2612	return 0;
2613}
2614
2615void chcr_hash_dma_unmap(struct device *dev,
2616			 struct ahash_request *req)
2617{
2618	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2619
2620	if (!req->nbytes)
2621		return;
2622
2623	dma_unmap_sg(dev, req->src, sg_nents(req->src),
2624			   DMA_TO_DEVICE);
2625	req_ctx->hctx_wr.is_sg_map = 0;
2626
2627}
2628
2629int chcr_cipher_dma_map(struct device *dev,
2630			struct ablkcipher_request *req)
2631{
2632	int error;
2633	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2634
2635	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
2636					DMA_BIDIRECTIONAL);
2637	if (dma_mapping_error(dev, reqctx->iv_dma))
2638		return -ENOMEM;
2639
2640	if (req->src == req->dst) {
2641		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2642				   DMA_BIDIRECTIONAL);
2643		if (!error)
2644			goto err;
2645	} else {
2646		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2647				   DMA_TO_DEVICE);
2648		if (!error)
2649			goto err;
2650		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2651				   DMA_FROM_DEVICE);
2652		if (!error) {
2653			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2654				   DMA_TO_DEVICE);
2655			goto err;
2656		}
2657	}
2658
2659	return 0;
2660err:
2661	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2662	return -ENOMEM;
2663}
2664
2665void chcr_cipher_dma_unmap(struct device *dev,
2666			   struct ablkcipher_request *req)
2667{
2668	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2669
2670	dma_unmap_single(dev, reqctx->iv_dma, IV,
2671					DMA_BIDIRECTIONAL);
2672	if (req->src == req->dst) {
2673		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2674				   DMA_BIDIRECTIONAL);
2675	} else {
2676		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2677				   DMA_TO_DEVICE);
2678		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2679				   DMA_FROM_DEVICE);
2680	}
2681}
2682
2683static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2684{
2685	__be32 data;
2686
2687	memset(block, 0, csize);
2688	block += csize;
2689
2690	if (csize >= 4)
2691		csize = 4;
2692	else if (msglen > (unsigned int)(1 << (8 * csize)))
2693		return -EOVERFLOW;
2694
2695	data = cpu_to_be32(msglen);
2696	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2697
2698	return 0;
2699}
2700
2701static void generate_b0(struct aead_request *req,
2702			struct chcr_aead_ctx *aeadctx,
2703			unsigned short op_type)
2704{
2705	unsigned int l, lp, m;
2706	int rc;
2707	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2708	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2709	u8 *b0 = reqctx->scratch_pad;
2710
2711	m = crypto_aead_authsize(aead);
2712
2713	memcpy(b0, reqctx->iv, 16);
2714
2715	lp = b0[0];
2716	l = lp + 1;
2717
2718	/* set m, bits 3-5 */
2719	*b0 |= (8 * ((m - 2) / 2));
2720
2721	/* set adata, bit 6, if associated data is used */
2722	if (req->assoclen)
2723		*b0 |= 64;
2724	rc = set_msg_len(b0 + 16 - l,
2725			 (op_type == CHCR_DECRYPT_OP) ?
2726			 req->cryptlen - m : req->cryptlen, l);
 
 
2727}
2728
2729static inline int crypto_ccm_check_iv(const u8 *iv)
2730{
2731	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2732	if (iv[0] < 1 || iv[0] > 7)
2733		return -EINVAL;
2734
2735	return 0;
2736}
2737
2738static int ccm_format_packet(struct aead_request *req,
2739			     struct chcr_aead_ctx *aeadctx,
2740			     unsigned int sub_type,
2741			     unsigned short op_type)
 
2742{
2743	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
 
 
2744	int rc = 0;
2745
2746	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2747		reqctx->iv[0] = 3;
2748		memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
2749		memcpy(reqctx->iv + 4, req->iv, 8);
2750		memset(reqctx->iv + 12, 0, 4);
2751		*((unsigned short *)(reqctx->scratch_pad + 16)) =
2752			htons(req->assoclen - 8);
2753	} else {
2754		memcpy(reqctx->iv, req->iv, 16);
2755		*((unsigned short *)(reqctx->scratch_pad + 16)) =
2756			htons(req->assoclen);
2757	}
2758	generate_b0(req, aeadctx, op_type);
 
 
 
2759	/* zero the ctr value */
2760	memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
2761	return rc;
2762}
2763
2764static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2765				  unsigned int dst_size,
2766				  struct aead_request *req,
2767				  unsigned short op_type)
2768{
2769	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2770	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 
 
2771	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2772	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2773	unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
2774	unsigned int ccm_xtra;
2775	unsigned char tag_offset = 0, auth_offset = 0;
2776	unsigned int assoclen;
2777
2778	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2779		assoclen = req->assoclen - 8;
2780	else
2781		assoclen = req->assoclen;
2782	ccm_xtra = CCM_B0_SIZE +
2783		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2784
2785	auth_offset = req->cryptlen ?
2786		(assoclen + IV + 1 + ccm_xtra) : 0;
2787	if (op_type == CHCR_DECRYPT_OP) {
2788		if (crypto_aead_authsize(tfm) != req->cryptlen)
2789			tag_offset = crypto_aead_authsize(tfm);
2790		else
2791			auth_offset = 0;
2792	}
2793
2794
2795	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2796					 2, assoclen + 1 + ccm_xtra);
2797	sec_cpl->pldlen =
2798		htonl(assoclen + IV + req->cryptlen + ccm_xtra);
2799	/* For CCM there wil be b0 always. So AAD start will be 1 always */
2800	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2801					1, assoclen + ccm_xtra, assoclen
2802					+ IV + 1 + ccm_xtra, 0);
2803
2804	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2805					auth_offset, tag_offset,
2806					(op_type == CHCR_ENCRYPT_OP) ? 0 :
2807					crypto_aead_authsize(tfm));
2808	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2809					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2810					cipher_mode, mac_mode,
2811					aeadctx->hmac_ctrl, IV >> 1);
2812
2813	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2814					0, dst_size);
2815}
2816
2817static int aead_ccm_validate_input(unsigned short op_type,
2818				   struct aead_request *req,
2819				   struct chcr_aead_ctx *aeadctx,
2820				   unsigned int sub_type)
2821{
2822	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2823		if (crypto_ccm_check_iv(req->iv)) {
2824			pr_err("CCM: IV check fails\n");
2825			return -EINVAL;
2826		}
2827	} else {
2828		if (req->assoclen != 16 && req->assoclen != 20) {
2829			pr_err("RFC4309: Invalid AAD length %d\n",
2830			       req->assoclen);
2831			return -EINVAL;
2832		}
2833	}
2834	return 0;
2835}
2836
2837static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2838					  unsigned short qid,
2839					  int size,
2840					  unsigned short op_type)
2841{
2842	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2843	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2844	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2845	struct sk_buff *skb = NULL;
2846	struct chcr_wr *chcr_req;
2847	struct cpl_rx_phys_dsgl *phys_cpl;
2848	struct ulptx_sgl *ulptx;
2849	unsigned int transhdr_len;
2850	unsigned int dst_size = 0, kctx_len, dnents, temp;
2851	unsigned int sub_type, assoclen = req->assoclen;
2852	unsigned int authsize = crypto_aead_authsize(tfm);
2853	int error = -EINVAL;
 
2854	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2855		GFP_ATOMIC;
2856	struct adapter *adap = padap(a_ctx(tfm)->dev);
2857
2858	reqctx->b0_dma = 0;
2859	sub_type = get_aead_subtype(tfm);
2860	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2861		assoclen -= 8;
2862	error = chcr_aead_common_init(req, op_type);
 
2863	if (error)
2864		return ERR_PTR(error);
2865
2866
2867	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2868	error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
2869	if (error)
2870		goto err;
2871	dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2872	dnents += sg_nents_xlen(req->dst, req->cryptlen
2873			+ (op_type ? -authsize : authsize),
2874			CHCR_DST_SG_SIZE, req->assoclen);
2875	dnents += MIN_CCM_SG; // For IV and B0
2876	dst_size = get_space_for_phys_dsgl(dnents);
 
 
 
2877	kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2878	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2879	reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
2880		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
2881	temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
2882				     reqctx->b0_len, 16) :
2883		(sgl_len(reqctx->src_nents + reqctx->aad_nents +
2884				    MIN_CCM_SG) *  8);
2885	transhdr_len += temp;
2886	transhdr_len = roundup(transhdr_len, 16);
2887
2888	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2889				    reqctx->b0_len, transhdr_len, op_type)) {
2890		atomic_inc(&adap->chcr_stats.fallback);
2891		chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2892				    op_type);
2893		return ERR_PTR(chcr_aead_fallback(req, op_type));
2894	}
2895	skb = alloc_skb(SGE_MAX_WR_LEN,  flags);
2896
2897	if (!skb) {
2898		error = -ENOMEM;
2899		goto err;
2900	}
2901
2902	chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
2903
2904	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type);
2905
2906	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2907	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2908	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2909			aeadctx->key, aeadctx->enckey_len);
2910
2911	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2912	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2913	error = ccm_format_packet(req, aeadctx, sub_type, op_type);
 
2914	if (error)
2915		goto dstmap_fail;
2916
2917	reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
2918					&reqctx->scratch_pad, reqctx->b0_len,
2919					DMA_BIDIRECTIONAL);
2920	if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
2921			      reqctx->b0_dma)) {
2922		error = -ENOMEM;
2923		goto dstmap_fail;
2924	}
2925
2926	chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
2927	chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
2928
2929	atomic_inc(&adap->chcr_stats.aead_rqst);
2930	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2931		kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
2932		reqctx->b0_len) : 0);
2933	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2934		    transhdr_len, temp, 0);
2935	reqctx->skb = skb;
2936	reqctx->op = op_type;
2937
2938	return skb;
2939dstmap_fail:
2940	kfree_skb(skb);
2941err:
2942	chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
2943	return ERR_PTR(error);
2944}
2945
2946static struct sk_buff *create_gcm_wr(struct aead_request *req,
2947				     unsigned short qid,
2948				     int size,
2949				     unsigned short op_type)
2950{
2951	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2952	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 
2953	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2954	struct sk_buff *skb = NULL;
2955	struct chcr_wr *chcr_req;
2956	struct cpl_rx_phys_dsgl *phys_cpl;
2957	struct ulptx_sgl *ulptx;
2958	unsigned int transhdr_len, dnents = 0;
2959	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
2960	unsigned int authsize = crypto_aead_authsize(tfm);
2961	int error = -EINVAL;
 
2962	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2963		GFP_ATOMIC;
2964	struct adapter *adap = padap(a_ctx(tfm)->dev);
 
2965
2966	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
2967		assoclen = req->assoclen - 8;
2968
2969	reqctx->b0_dma = 0;
2970	error = chcr_aead_common_init(req, op_type);
2971	if (error)
2972		return ERR_PTR(error);
2973	dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2974	dnents += sg_nents_xlen(req->dst, req->cryptlen +
2975				(op_type ? -authsize : authsize),
2976				CHCR_DST_SG_SIZE, req->assoclen);
 
2977	dnents += MIN_GCM_SG; // For IV
2978	dst_size = get_space_for_phys_dsgl(dnents);
2979	kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
2980	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2981	reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
2982			SGE_MAX_WR_LEN;
2983	temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
2984		(sgl_len(reqctx->src_nents +
2985		reqctx->aad_nents + MIN_GCM_SG) * 8);
2986	transhdr_len += temp;
2987	transhdr_len = roundup(transhdr_len, 16);
2988	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2989			    transhdr_len, op_type)) {
 
2990		atomic_inc(&adap->chcr_stats.fallback);
2991		chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2992				    op_type);
2993		return ERR_PTR(chcr_aead_fallback(req, op_type));
2994	}
2995	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2996	if (!skb) {
2997		error = -ENOMEM;
2998		goto err;
2999	}
3000
3001	chcr_req = __skb_put_zero(skb, transhdr_len);
3002
3003	//Offset of tag from end
3004	temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
3005	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3006					a_ctx(tfm)->dev->rx_channel_id, 2,
3007					(assoclen + 1));
3008	chcr_req->sec_cpl.pldlen =
3009		htonl(assoclen + IV + req->cryptlen);
3010	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3011					assoclen ? 1 : 0, assoclen,
3012					assoclen + IV + 1, 0);
 
3013	chcr_req->sec_cpl.cipherstop_lo_authinsert =
3014			FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
3015						temp, temp);
3016	chcr_req->sec_cpl.seqno_numivs =
3017			FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
3018					CHCR_ENCRYPT_OP) ? 1 : 0,
3019					CHCR_SCMD_CIPHER_MODE_AES_GCM,
3020					CHCR_SCMD_AUTH_MODE_GHASH,
3021					aeadctx->hmac_ctrl, IV >> 1);
3022	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3023					0, 0, dst_size);
3024	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3025	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3026	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3027	       GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3028
 
 
3029	/* prepare a 16 byte iv */
3030	/* S   A   L  T |  IV | 0x00000001 */
3031	if (get_aead_subtype(tfm) ==
3032	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3033		memcpy(reqctx->iv, aeadctx->salt, 4);
3034		memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
3035	} else {
3036		memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
3037	}
3038	*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
3039
3040	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3041	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
3042
3043	chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
3044	chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
3045	atomic_inc(&adap->chcr_stats.aead_rqst);
3046	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
3047		kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
3048	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3049		    transhdr_len, temp, reqctx->verify);
3050	reqctx->skb = skb;
3051	reqctx->op = op_type;
3052	return skb;
3053
3054err:
3055	chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
3056	return ERR_PTR(error);
3057}
3058
3059
3060
3061static int chcr_aead_cra_init(struct crypto_aead *tfm)
3062{
3063	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3064	struct aead_alg *alg = crypto_aead_alg(tfm);
3065
3066	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3067					       CRYPTO_ALG_NEED_FALLBACK |
3068					       CRYPTO_ALG_ASYNC);
3069	if  (IS_ERR(aeadctx->sw_cipher))
3070		return PTR_ERR(aeadctx->sw_cipher);
3071	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3072				 sizeof(struct aead_request) +
3073				 crypto_aead_reqsize(aeadctx->sw_cipher)));
3074	return chcr_device_init(a_ctx(tfm));
3075}
3076
3077static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3078{
3079	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3080
3081	crypto_free_aead(aeadctx->sw_cipher);
3082}
3083
3084static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3085					unsigned int authsize)
3086{
3087	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3088
3089	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3090	aeadctx->mayverify = VERIFY_HW;
3091	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3092}
3093static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3094				    unsigned int authsize)
3095{
3096	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3097	u32 maxauth = crypto_aead_maxauthsize(tfm);
3098
3099	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3100	 * true for sha1. authsize == 12 condition should be before
3101	 * authsize == (maxauth >> 1)
3102	 */
3103	if (authsize == ICV_4) {
3104		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3105		aeadctx->mayverify = VERIFY_HW;
3106	} else if (authsize == ICV_6) {
3107		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3108		aeadctx->mayverify = VERIFY_HW;
3109	} else if (authsize == ICV_10) {
3110		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3111		aeadctx->mayverify = VERIFY_HW;
3112	} else if (authsize == ICV_12) {
3113		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3114		aeadctx->mayverify = VERIFY_HW;
3115	} else if (authsize == ICV_14) {
3116		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3117		aeadctx->mayverify = VERIFY_HW;
3118	} else if (authsize == (maxauth >> 1)) {
3119		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3120		aeadctx->mayverify = VERIFY_HW;
3121	} else if (authsize == maxauth) {
3122		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3123		aeadctx->mayverify = VERIFY_HW;
3124	} else {
3125		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3126		aeadctx->mayverify = VERIFY_SW;
3127	}
3128	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3129}
3130
3131
3132static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3133{
3134	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3135
3136	switch (authsize) {
3137	case ICV_4:
3138		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3139		aeadctx->mayverify = VERIFY_HW;
3140		break;
3141	case ICV_8:
3142		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3143		aeadctx->mayverify = VERIFY_HW;
3144		break;
3145	case ICV_12:
3146		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3147		 aeadctx->mayverify = VERIFY_HW;
3148		break;
3149	case ICV_14:
3150		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3151		 aeadctx->mayverify = VERIFY_HW;
3152		break;
3153	case ICV_16:
3154		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3155		aeadctx->mayverify = VERIFY_HW;
3156		break;
3157	case ICV_13:
3158	case ICV_15:
3159		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3160		aeadctx->mayverify = VERIFY_SW;
3161		break;
3162	default:
3163
3164		  crypto_tfm_set_flags((struct crypto_tfm *) tfm,
3165			CRYPTO_TFM_RES_BAD_KEY_LEN);
3166		return -EINVAL;
3167	}
3168	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3169}
3170
3171static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3172					  unsigned int authsize)
3173{
3174	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3175
3176	switch (authsize) {
3177	case ICV_8:
3178		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3179		aeadctx->mayverify = VERIFY_HW;
3180		break;
3181	case ICV_12:
3182		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3183		aeadctx->mayverify = VERIFY_HW;
3184		break;
3185	case ICV_16:
3186		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3187		aeadctx->mayverify = VERIFY_HW;
3188		break;
3189	default:
3190		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3191				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3192		return -EINVAL;
3193	}
3194	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3195}
3196
3197static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3198				unsigned int authsize)
3199{
3200	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3201
3202	switch (authsize) {
3203	case ICV_4:
3204		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3205		aeadctx->mayverify = VERIFY_HW;
3206		break;
3207	case ICV_6:
3208		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3209		aeadctx->mayverify = VERIFY_HW;
3210		break;
3211	case ICV_8:
3212		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3213		aeadctx->mayverify = VERIFY_HW;
3214		break;
3215	case ICV_10:
3216		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3217		aeadctx->mayverify = VERIFY_HW;
3218		break;
3219	case ICV_12:
3220		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3221		aeadctx->mayverify = VERIFY_HW;
3222		break;
3223	case ICV_14:
3224		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3225		aeadctx->mayverify = VERIFY_HW;
3226		break;
3227	case ICV_16:
3228		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3229		aeadctx->mayverify = VERIFY_HW;
3230		break;
3231	default:
3232		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3233				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3234		return -EINVAL;
3235	}
3236	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3237}
3238
3239static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3240				const u8 *key,
3241				unsigned int keylen)
3242{
3243	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3244	unsigned char ck_size, mk_size;
3245	int key_ctx_size = 0;
3246
3247	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3248	if (keylen == AES_KEYSIZE_128) {
3249		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3250		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3251	} else if (keylen == AES_KEYSIZE_192) {
3252		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3253		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3254	} else if (keylen == AES_KEYSIZE_256) {
3255		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3256		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3257	} else {
3258		crypto_tfm_set_flags((struct crypto_tfm *)aead,
3259				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3260		aeadctx->enckey_len = 0;
3261		return	-EINVAL;
3262	}
3263	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3264						key_ctx_size >> 4);
3265	memcpy(aeadctx->key, key, keylen);
3266	aeadctx->enckey_len = keylen;
3267
3268	return 0;
3269}
3270
3271static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3272				const u8 *key,
3273				unsigned int keylen)
3274{
3275	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3276	int error;
3277
3278	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3279	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3280			      CRYPTO_TFM_REQ_MASK);
3281	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3282	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3283	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3284			      CRYPTO_TFM_RES_MASK);
3285	if (error)
3286		return error;
3287	return chcr_ccm_common_setkey(aead, key, keylen);
3288}
3289
3290static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3291				    unsigned int keylen)
3292{
3293	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3294	int error;
3295
3296	if (keylen < 3) {
3297		crypto_tfm_set_flags((struct crypto_tfm *)aead,
3298				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3299		aeadctx->enckey_len = 0;
3300		return	-EINVAL;
3301	}
3302	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3303	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3304			      CRYPTO_TFM_REQ_MASK);
3305	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3306	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3307	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3308			      CRYPTO_TFM_RES_MASK);
3309	if (error)
3310		return error;
3311	keylen -= 3;
3312	memcpy(aeadctx->salt, key + keylen, 3);
3313	return chcr_ccm_common_setkey(aead, key, keylen);
3314}
3315
3316static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3317			   unsigned int keylen)
3318{
3319	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3320	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3321	struct crypto_cipher *cipher;
3322	unsigned int ck_size;
3323	int ret = 0, key_ctx_size = 0;
 
3324
3325	aeadctx->enckey_len = 0;
3326	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3327	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3328			      & CRYPTO_TFM_REQ_MASK);
3329	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3330	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3331	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3332			      CRYPTO_TFM_RES_MASK);
3333	if (ret)
3334		goto out;
3335
3336	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3337	    keylen > 3) {
3338		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3339		memcpy(aeadctx->salt, key + keylen, 4);
3340	}
3341	if (keylen == AES_KEYSIZE_128) {
3342		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3343	} else if (keylen == AES_KEYSIZE_192) {
3344		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3345	} else if (keylen == AES_KEYSIZE_256) {
3346		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3347	} else {
3348		crypto_tfm_set_flags((struct crypto_tfm *)aead,
3349				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3350		pr_err("GCM: Invalid key length %d\n", keylen);
3351		ret = -EINVAL;
3352		goto out;
3353	}
3354
3355	memcpy(aeadctx->key, key, keylen);
3356	aeadctx->enckey_len = keylen;
3357	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3358		AEAD_H_SIZE;
3359	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3360						CHCR_KEYCTX_MAC_KEY_SIZE_128,
3361						0, 0,
3362						key_ctx_size >> 4);
3363	/* Calculate the H = CIPH(K, 0 repeated 16 times).
3364	 * It will go in key context
3365	 */
3366	cipher = crypto_alloc_cipher("aes-generic", 0, 0);
3367	if (IS_ERR(cipher)) {
3368		aeadctx->enckey_len = 0;
3369		ret = -ENOMEM;
3370		goto out;
3371	}
3372
3373	ret = crypto_cipher_setkey(cipher, key, keylen);
3374	if (ret) {
3375		aeadctx->enckey_len = 0;
3376		goto out1;
3377	}
3378	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3379	crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
 
3380
3381out1:
3382	crypto_free_cipher(cipher);
3383out:
3384	return ret;
3385}
3386
3387static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3388				   unsigned int keylen)
3389{
3390	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3391	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3392	/* it contains auth and cipher key both*/
3393	struct crypto_authenc_keys keys;
3394	unsigned int bs, subtype;
3395	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3396	int err = 0, i, key_ctx_len = 0;
3397	unsigned char ck_size = 0;
3398	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3399	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3400	struct algo_param param;
3401	int align;
3402	u8 *o_ptr = NULL;
3403
3404	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3405	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3406			      & CRYPTO_TFM_REQ_MASK);
3407	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3408	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3409	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3410			      & CRYPTO_TFM_RES_MASK);
3411	if (err)
3412		goto out;
3413
3414	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3415		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3416		goto out;
3417	}
3418
3419	if (get_alg_config(&param, max_authsize)) {
3420		pr_err("chcr : Unsupported digest size\n");
3421		goto out;
3422	}
3423	subtype = get_aead_subtype(authenc);
3424	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3425		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3426		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3427			goto out;
3428		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3429		- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3430		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3431	}
3432	if (keys.enckeylen == AES_KEYSIZE_128) {
3433		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3434	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3435		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3436	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3437		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3438	} else {
3439		pr_err("chcr : Unsupported cipher key\n");
3440		goto out;
3441	}
3442
3443	/* Copy only encryption key. We use authkey to generate h(ipad) and
3444	 * h(opad) so authkey is not needed again. authkeylen size have the
3445	 * size of the hash digest size.
3446	 */
3447	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3448	aeadctx->enckey_len = keys.enckeylen;
3449	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3450		subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3451
3452		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3453			    aeadctx->enckey_len << 3);
3454	}
3455	base_hash  = chcr_alloc_shash(max_authsize);
3456	if (IS_ERR(base_hash)) {
3457		pr_err("chcr : Base driver cannot be loaded\n");
3458		aeadctx->enckey_len = 0;
3459		memzero_explicit(&keys, sizeof(keys));
3460		return -EINVAL;
3461	}
3462	{
3463		SHASH_DESC_ON_STACK(shash, base_hash);
 
3464		shash->tfm = base_hash;
3465		shash->flags = crypto_shash_get_flags(base_hash);
3466		bs = crypto_shash_blocksize(base_hash);
3467		align = KEYCTX_ALIGN_PAD(max_authsize);
3468		o_ptr =  actx->h_iopad + param.result_size + align;
3469
3470		if (keys.authkeylen > bs) {
3471			err = crypto_shash_digest(shash, keys.authkey,
3472						  keys.authkeylen,
3473						  o_ptr);
3474			if (err) {
3475				pr_err("chcr : Base driver cannot be loaded\n");
3476				goto out;
3477			}
3478			keys.authkeylen = max_authsize;
3479		} else
3480			memcpy(o_ptr, keys.authkey, keys.authkeylen);
3481
3482		/* Compute the ipad-digest*/
3483		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3484		memcpy(pad, o_ptr, keys.authkeylen);
3485		for (i = 0; i < bs >> 2; i++)
3486			*((unsigned int *)pad + i) ^= IPAD_DATA;
3487
3488		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3489					      max_authsize))
3490			goto out;
3491		/* Compute the opad-digest */
3492		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3493		memcpy(pad, o_ptr, keys.authkeylen);
3494		for (i = 0; i < bs >> 2; i++)
3495			*((unsigned int *)pad + i) ^= OPAD_DATA;
3496
3497		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3498			goto out;
3499
3500		/* convert the ipad and opad digest to network order */
3501		chcr_change_order(actx->h_iopad, param.result_size);
3502		chcr_change_order(o_ptr, param.result_size);
3503		key_ctx_len = sizeof(struct _key_ctx) +
3504			roundup(keys.enckeylen, 16) +
3505			(param.result_size + align) * 2;
3506		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3507						0, 1, key_ctx_len >> 4);
3508		actx->auth_mode = param.auth_mode;
3509		chcr_free_shash(base_hash);
3510
3511		memzero_explicit(&keys, sizeof(keys));
3512		return 0;
3513	}
3514out:
3515	aeadctx->enckey_len = 0;
3516	memzero_explicit(&keys, sizeof(keys));
3517	if (!IS_ERR(base_hash))
3518		chcr_free_shash(base_hash);
3519	return -EINVAL;
3520}
3521
3522static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3523					const u8 *key, unsigned int keylen)
3524{
3525	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3526	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3527	struct crypto_authenc_keys keys;
3528	int err;
3529	/* it contains auth and cipher key both*/
3530	unsigned int subtype;
3531	int key_ctx_len = 0;
3532	unsigned char ck_size = 0;
3533
3534	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3535	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3536			      & CRYPTO_TFM_REQ_MASK);
3537	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3538	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3539	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3540			      & CRYPTO_TFM_RES_MASK);
3541	if (err)
3542		goto out;
3543
3544	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3545		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3546		goto out;
3547	}
3548	subtype = get_aead_subtype(authenc);
3549	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3550	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3551		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3552			goto out;
3553		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3554			- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3555		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3556	}
3557	if (keys.enckeylen == AES_KEYSIZE_128) {
3558		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3559	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3560		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3561	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3562		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3563	} else {
3564		pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3565		goto out;
3566	}
3567	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3568	aeadctx->enckey_len = keys.enckeylen;
3569	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3570	    subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3571		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3572				aeadctx->enckey_len << 3);
3573	}
3574	key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3575
3576	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3577						0, key_ctx_len >> 4);
3578	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3579	memzero_explicit(&keys, sizeof(keys));
3580	return 0;
3581out:
3582	aeadctx->enckey_len = 0;
3583	memzero_explicit(&keys, sizeof(keys));
3584	return -EINVAL;
3585}
3586
3587static int chcr_aead_op(struct aead_request *req,
3588			unsigned short op_type,
3589			int size,
3590			create_wr_t create_wr_fn)
3591{
3592	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3593	struct uld_ctx *u_ctx;
 
 
3594	struct sk_buff *skb;
 
3595
3596	if (!a_ctx(tfm)->dev) {
3597		pr_err("chcr : %s : No crypto device.\n", __func__);
 
3598		return -ENXIO;
3599	}
3600	u_ctx = ULD_CTX(a_ctx(tfm));
 
 
 
 
 
 
 
3601	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3602				   a_ctx(tfm)->tx_qidx)) {
3603		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3604			return -EBUSY;
 
 
 
 
 
 
 
 
3605	}
3606
3607	/* Form a WR from req */
3608	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
3609			   op_type);
3610
3611	if (IS_ERR(skb) || !skb)
3612		return PTR_ERR(skb);
 
 
3613
3614	skb->dev = u_ctx->lldi.ports[0];
3615	set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3616	chcr_send_wr(skb);
3617	return -EINPROGRESS;
3618}
3619
3620static int chcr_aead_encrypt(struct aead_request *req)
3621{
3622	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3623	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
 
 
 
 
 
 
 
3624
3625	reqctx->verify = VERIFY_HW;
 
3626
3627	switch (get_aead_subtype(tfm)) {
3628	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3629	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3630	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3631	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3632		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3633				    create_authenc_wr);
3634	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3635	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3636		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3637				    create_aead_ccm_wr);
3638	default:
3639		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3640				    create_gcm_wr);
3641	}
3642}
3643
3644static int chcr_aead_decrypt(struct aead_request *req)
3645{
3646	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3647	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 
3648	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3649	int size;
 
 
 
 
 
 
3650
3651	if (aeadctx->mayverify == VERIFY_SW) {
3652		size = crypto_aead_maxauthsize(tfm);
3653		reqctx->verify = VERIFY_SW;
3654	} else {
3655		size = 0;
3656		reqctx->verify = VERIFY_HW;
3657	}
3658
3659	switch (get_aead_subtype(tfm)) {
3660	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3661	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3662	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3663	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3664		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3665				    create_authenc_wr);
3666	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3667	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3668		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3669				    create_aead_ccm_wr);
3670	default:
3671		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3672				    create_gcm_wr);
3673	}
3674}
3675
3676static struct chcr_alg_template driver_algs[] = {
3677	/* AES-CBC */
3678	{
3679		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3680		.is_registered = 0,
3681		.alg.crypto = {
3682			.cra_name		= "cbc(aes)",
3683			.cra_driver_name	= "cbc-aes-chcr",
3684			.cra_blocksize		= AES_BLOCK_SIZE,
3685			.cra_init		= chcr_cra_init,
3686			.cra_exit		= chcr_cra_exit,
3687			.cra_u.ablkcipher	= {
3688				.min_keysize	= AES_MIN_KEY_SIZE,
3689				.max_keysize	= AES_MAX_KEY_SIZE,
3690				.ivsize		= AES_BLOCK_SIZE,
3691				.setkey			= chcr_aes_cbc_setkey,
3692				.encrypt		= chcr_aes_encrypt,
3693				.decrypt		= chcr_aes_decrypt,
3694			}
3695		}
3696	},
3697	{
3698		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3699		.is_registered = 0,
3700		.alg.crypto =   {
3701			.cra_name		= "xts(aes)",
3702			.cra_driver_name	= "xts-aes-chcr",
3703			.cra_blocksize		= AES_BLOCK_SIZE,
3704			.cra_init		= chcr_cra_init,
3705			.cra_exit		= NULL,
3706			.cra_u .ablkcipher = {
3707					.min_keysize	= 2 * AES_MIN_KEY_SIZE,
3708					.max_keysize	= 2 * AES_MAX_KEY_SIZE,
3709					.ivsize		= AES_BLOCK_SIZE,
3710					.setkey		= chcr_aes_xts_setkey,
3711					.encrypt	= chcr_aes_encrypt,
3712					.decrypt	= chcr_aes_decrypt,
3713				}
3714			}
3715	},
3716	{
3717		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3718		.is_registered = 0,
3719		.alg.crypto = {
3720			.cra_name		= "ctr(aes)",
3721			.cra_driver_name	= "ctr-aes-chcr",
3722			.cra_blocksize		= 1,
3723			.cra_init		= chcr_cra_init,
3724			.cra_exit		= chcr_cra_exit,
3725			.cra_u.ablkcipher	= {
3726				.min_keysize	= AES_MIN_KEY_SIZE,
3727				.max_keysize	= AES_MAX_KEY_SIZE,
3728				.ivsize		= AES_BLOCK_SIZE,
3729				.setkey		= chcr_aes_ctr_setkey,
3730				.encrypt	= chcr_aes_encrypt,
3731				.decrypt	= chcr_aes_decrypt,
3732			}
3733		}
3734	},
3735	{
3736		.type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3737			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3738		.is_registered = 0,
3739		.alg.crypto = {
3740			.cra_name		= "rfc3686(ctr(aes))",
3741			.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3742			.cra_blocksize		= 1,
3743			.cra_init		= chcr_rfc3686_init,
3744			.cra_exit		= chcr_cra_exit,
3745			.cra_u.ablkcipher	= {
3746				.min_keysize	= AES_MIN_KEY_SIZE +
3747					CTR_RFC3686_NONCE_SIZE,
3748				.max_keysize	= AES_MAX_KEY_SIZE +
3749					CTR_RFC3686_NONCE_SIZE,
3750				.ivsize		= CTR_RFC3686_IV_SIZE,
3751				.setkey		= chcr_aes_rfc3686_setkey,
3752				.encrypt	= chcr_aes_encrypt,
3753				.decrypt	= chcr_aes_decrypt,
3754				.geniv          = "seqiv",
3755			}
3756		}
3757	},
3758	/* SHA */
3759	{
3760		.type = CRYPTO_ALG_TYPE_AHASH,
3761		.is_registered = 0,
3762		.alg.hash = {
3763			.halg.digestsize = SHA1_DIGEST_SIZE,
3764			.halg.base = {
3765				.cra_name = "sha1",
3766				.cra_driver_name = "sha1-chcr",
3767				.cra_blocksize = SHA1_BLOCK_SIZE,
3768			}
3769		}
3770	},
3771	{
3772		.type = CRYPTO_ALG_TYPE_AHASH,
3773		.is_registered = 0,
3774		.alg.hash = {
3775			.halg.digestsize = SHA256_DIGEST_SIZE,
3776			.halg.base = {
3777				.cra_name = "sha256",
3778				.cra_driver_name = "sha256-chcr",
3779				.cra_blocksize = SHA256_BLOCK_SIZE,
3780			}
3781		}
3782	},
3783	{
3784		.type = CRYPTO_ALG_TYPE_AHASH,
3785		.is_registered = 0,
3786		.alg.hash = {
3787			.halg.digestsize = SHA224_DIGEST_SIZE,
3788			.halg.base = {
3789				.cra_name = "sha224",
3790				.cra_driver_name = "sha224-chcr",
3791				.cra_blocksize = SHA224_BLOCK_SIZE,
3792			}
3793		}
3794	},
3795	{
3796		.type = CRYPTO_ALG_TYPE_AHASH,
3797		.is_registered = 0,
3798		.alg.hash = {
3799			.halg.digestsize = SHA384_DIGEST_SIZE,
3800			.halg.base = {
3801				.cra_name = "sha384",
3802				.cra_driver_name = "sha384-chcr",
3803				.cra_blocksize = SHA384_BLOCK_SIZE,
3804			}
3805		}
3806	},
3807	{
3808		.type = CRYPTO_ALG_TYPE_AHASH,
3809		.is_registered = 0,
3810		.alg.hash = {
3811			.halg.digestsize = SHA512_DIGEST_SIZE,
3812			.halg.base = {
3813				.cra_name = "sha512",
3814				.cra_driver_name = "sha512-chcr",
3815				.cra_blocksize = SHA512_BLOCK_SIZE,
3816			}
3817		}
3818	},
3819	/* HMAC */
3820	{
3821		.type = CRYPTO_ALG_TYPE_HMAC,
3822		.is_registered = 0,
3823		.alg.hash = {
3824			.halg.digestsize = SHA1_DIGEST_SIZE,
3825			.halg.base = {
3826				.cra_name = "hmac(sha1)",
3827				.cra_driver_name = "hmac-sha1-chcr",
3828				.cra_blocksize = SHA1_BLOCK_SIZE,
3829			}
3830		}
3831	},
3832	{
3833		.type = CRYPTO_ALG_TYPE_HMAC,
3834		.is_registered = 0,
3835		.alg.hash = {
3836			.halg.digestsize = SHA224_DIGEST_SIZE,
3837			.halg.base = {
3838				.cra_name = "hmac(sha224)",
3839				.cra_driver_name = "hmac-sha224-chcr",
3840				.cra_blocksize = SHA224_BLOCK_SIZE,
3841			}
3842		}
3843	},
3844	{
3845		.type = CRYPTO_ALG_TYPE_HMAC,
3846		.is_registered = 0,
3847		.alg.hash = {
3848			.halg.digestsize = SHA256_DIGEST_SIZE,
3849			.halg.base = {
3850				.cra_name = "hmac(sha256)",
3851				.cra_driver_name = "hmac-sha256-chcr",
3852				.cra_blocksize = SHA256_BLOCK_SIZE,
3853			}
3854		}
3855	},
3856	{
3857		.type = CRYPTO_ALG_TYPE_HMAC,
3858		.is_registered = 0,
3859		.alg.hash = {
3860			.halg.digestsize = SHA384_DIGEST_SIZE,
3861			.halg.base = {
3862				.cra_name = "hmac(sha384)",
3863				.cra_driver_name = "hmac-sha384-chcr",
3864				.cra_blocksize = SHA384_BLOCK_SIZE,
3865			}
3866		}
3867	},
3868	{
3869		.type = CRYPTO_ALG_TYPE_HMAC,
3870		.is_registered = 0,
3871		.alg.hash = {
3872			.halg.digestsize = SHA512_DIGEST_SIZE,
3873			.halg.base = {
3874				.cra_name = "hmac(sha512)",
3875				.cra_driver_name = "hmac-sha512-chcr",
3876				.cra_blocksize = SHA512_BLOCK_SIZE,
3877			}
3878		}
3879	},
3880	/* Add AEAD Algorithms */
3881	{
3882		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3883		.is_registered = 0,
3884		.alg.aead = {
3885			.base = {
3886				.cra_name = "gcm(aes)",
3887				.cra_driver_name = "gcm-aes-chcr",
3888				.cra_blocksize	= 1,
3889				.cra_priority = CHCR_AEAD_PRIORITY,
3890				.cra_ctxsize =	sizeof(struct chcr_context) +
3891						sizeof(struct chcr_aead_ctx) +
3892						sizeof(struct chcr_gcm_ctx),
3893			},
3894			.ivsize = GCM_AES_IV_SIZE,
3895			.maxauthsize = GHASH_DIGEST_SIZE,
3896			.setkey = chcr_gcm_setkey,
3897			.setauthsize = chcr_gcm_setauthsize,
3898		}
3899	},
3900	{
3901		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3902		.is_registered = 0,
3903		.alg.aead = {
3904			.base = {
3905				.cra_name = "rfc4106(gcm(aes))",
3906				.cra_driver_name = "rfc4106-gcm-aes-chcr",
3907				.cra_blocksize	 = 1,
3908				.cra_priority = CHCR_AEAD_PRIORITY + 1,
3909				.cra_ctxsize =	sizeof(struct chcr_context) +
3910						sizeof(struct chcr_aead_ctx) +
3911						sizeof(struct chcr_gcm_ctx),
3912
3913			},
3914			.ivsize = GCM_RFC4106_IV_SIZE,
3915			.maxauthsize	= GHASH_DIGEST_SIZE,
3916			.setkey = chcr_gcm_setkey,
3917			.setauthsize	= chcr_4106_4309_setauthsize,
3918		}
3919	},
3920	{
3921		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3922		.is_registered = 0,
3923		.alg.aead = {
3924			.base = {
3925				.cra_name = "ccm(aes)",
3926				.cra_driver_name = "ccm-aes-chcr",
3927				.cra_blocksize	 = 1,
3928				.cra_priority = CHCR_AEAD_PRIORITY,
3929				.cra_ctxsize =	sizeof(struct chcr_context) +
3930						sizeof(struct chcr_aead_ctx),
3931
3932			},
3933			.ivsize = AES_BLOCK_SIZE,
3934			.maxauthsize	= GHASH_DIGEST_SIZE,
3935			.setkey = chcr_aead_ccm_setkey,
3936			.setauthsize	= chcr_ccm_setauthsize,
3937		}
3938	},
3939	{
3940		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3941		.is_registered = 0,
3942		.alg.aead = {
3943			.base = {
3944				.cra_name = "rfc4309(ccm(aes))",
3945				.cra_driver_name = "rfc4309-ccm-aes-chcr",
3946				.cra_blocksize	 = 1,
3947				.cra_priority = CHCR_AEAD_PRIORITY + 1,
3948				.cra_ctxsize =	sizeof(struct chcr_context) +
3949						sizeof(struct chcr_aead_ctx),
3950
3951			},
3952			.ivsize = 8,
3953			.maxauthsize	= GHASH_DIGEST_SIZE,
3954			.setkey = chcr_aead_rfc4309_setkey,
3955			.setauthsize = chcr_4106_4309_setauthsize,
3956		}
3957	},
3958	{
3959		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3960		.is_registered = 0,
3961		.alg.aead = {
3962			.base = {
3963				.cra_name = "authenc(hmac(sha1),cbc(aes))",
3964				.cra_driver_name =
3965					"authenc-hmac-sha1-cbc-aes-chcr",
3966				.cra_blocksize	 = AES_BLOCK_SIZE,
3967				.cra_priority = CHCR_AEAD_PRIORITY,
3968				.cra_ctxsize =	sizeof(struct chcr_context) +
3969						sizeof(struct chcr_aead_ctx) +
3970						sizeof(struct chcr_authenc_ctx),
3971
3972			},
3973			.ivsize = AES_BLOCK_SIZE,
3974			.maxauthsize = SHA1_DIGEST_SIZE,
3975			.setkey = chcr_authenc_setkey,
3976			.setauthsize = chcr_authenc_setauthsize,
3977		}
3978	},
3979	{
3980		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3981		.is_registered = 0,
3982		.alg.aead = {
3983			.base = {
3984
3985				.cra_name = "authenc(hmac(sha256),cbc(aes))",
3986				.cra_driver_name =
3987					"authenc-hmac-sha256-cbc-aes-chcr",
3988				.cra_blocksize	 = AES_BLOCK_SIZE,
3989				.cra_priority = CHCR_AEAD_PRIORITY,
3990				.cra_ctxsize =	sizeof(struct chcr_context) +
3991						sizeof(struct chcr_aead_ctx) +
3992						sizeof(struct chcr_authenc_ctx),
3993
3994			},
3995			.ivsize = AES_BLOCK_SIZE,
3996			.maxauthsize	= SHA256_DIGEST_SIZE,
3997			.setkey = chcr_authenc_setkey,
3998			.setauthsize = chcr_authenc_setauthsize,
3999		}
4000	},
4001	{
4002		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4003		.is_registered = 0,
4004		.alg.aead = {
4005			.base = {
4006				.cra_name = "authenc(hmac(sha224),cbc(aes))",
4007				.cra_driver_name =
4008					"authenc-hmac-sha224-cbc-aes-chcr",
4009				.cra_blocksize	 = AES_BLOCK_SIZE,
4010				.cra_priority = CHCR_AEAD_PRIORITY,
4011				.cra_ctxsize =	sizeof(struct chcr_context) +
4012						sizeof(struct chcr_aead_ctx) +
4013						sizeof(struct chcr_authenc_ctx),
4014			},
4015			.ivsize = AES_BLOCK_SIZE,
4016			.maxauthsize = SHA224_DIGEST_SIZE,
4017			.setkey = chcr_authenc_setkey,
4018			.setauthsize = chcr_authenc_setauthsize,
4019		}
4020	},
4021	{
4022		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4023		.is_registered = 0,
4024		.alg.aead = {
4025			.base = {
4026				.cra_name = "authenc(hmac(sha384),cbc(aes))",
4027				.cra_driver_name =
4028					"authenc-hmac-sha384-cbc-aes-chcr",
4029				.cra_blocksize	 = AES_BLOCK_SIZE,
4030				.cra_priority = CHCR_AEAD_PRIORITY,
4031				.cra_ctxsize =	sizeof(struct chcr_context) +
4032						sizeof(struct chcr_aead_ctx) +
4033						sizeof(struct chcr_authenc_ctx),
4034
4035			},
4036			.ivsize = AES_BLOCK_SIZE,
4037			.maxauthsize = SHA384_DIGEST_SIZE,
4038			.setkey = chcr_authenc_setkey,
4039			.setauthsize = chcr_authenc_setauthsize,
4040		}
4041	},
4042	{
4043		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4044		.is_registered = 0,
4045		.alg.aead = {
4046			.base = {
4047				.cra_name = "authenc(hmac(sha512),cbc(aes))",
4048				.cra_driver_name =
4049					"authenc-hmac-sha512-cbc-aes-chcr",
4050				.cra_blocksize	 = AES_BLOCK_SIZE,
4051				.cra_priority = CHCR_AEAD_PRIORITY,
4052				.cra_ctxsize =	sizeof(struct chcr_context) +
4053						sizeof(struct chcr_aead_ctx) +
4054						sizeof(struct chcr_authenc_ctx),
4055
4056			},
4057			.ivsize = AES_BLOCK_SIZE,
4058			.maxauthsize = SHA512_DIGEST_SIZE,
4059			.setkey = chcr_authenc_setkey,
4060			.setauthsize = chcr_authenc_setauthsize,
4061		}
4062	},
4063	{
4064		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4065		.is_registered = 0,
4066		.alg.aead = {
4067			.base = {
4068				.cra_name = "authenc(digest_null,cbc(aes))",
4069				.cra_driver_name =
4070					"authenc-digest_null-cbc-aes-chcr",
4071				.cra_blocksize	 = AES_BLOCK_SIZE,
4072				.cra_priority = CHCR_AEAD_PRIORITY,
4073				.cra_ctxsize =	sizeof(struct chcr_context) +
4074						sizeof(struct chcr_aead_ctx) +
4075						sizeof(struct chcr_authenc_ctx),
4076
4077			},
4078			.ivsize  = AES_BLOCK_SIZE,
4079			.maxauthsize = 0,
4080			.setkey  = chcr_aead_digest_null_setkey,
4081			.setauthsize = chcr_authenc_null_setauthsize,
4082		}
4083	},
4084	{
4085		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4086		.is_registered = 0,
4087		.alg.aead = {
4088			.base = {
4089				.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4090				.cra_driver_name =
4091				"authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4092				.cra_blocksize	 = 1,
4093				.cra_priority = CHCR_AEAD_PRIORITY,
4094				.cra_ctxsize =	sizeof(struct chcr_context) +
4095						sizeof(struct chcr_aead_ctx) +
4096						sizeof(struct chcr_authenc_ctx),
4097
4098			},
4099			.ivsize = CTR_RFC3686_IV_SIZE,
4100			.maxauthsize = SHA1_DIGEST_SIZE,
4101			.setkey = chcr_authenc_setkey,
4102			.setauthsize = chcr_authenc_setauthsize,
4103		}
4104	},
4105	{
4106		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4107		.is_registered = 0,
4108		.alg.aead = {
4109			.base = {
4110
4111				.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4112				.cra_driver_name =
4113				"authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4114				.cra_blocksize	 = 1,
4115				.cra_priority = CHCR_AEAD_PRIORITY,
4116				.cra_ctxsize =	sizeof(struct chcr_context) +
4117						sizeof(struct chcr_aead_ctx) +
4118						sizeof(struct chcr_authenc_ctx),
4119
4120			},
4121			.ivsize = CTR_RFC3686_IV_SIZE,
4122			.maxauthsize	= SHA256_DIGEST_SIZE,
4123			.setkey = chcr_authenc_setkey,
4124			.setauthsize = chcr_authenc_setauthsize,
4125		}
4126	},
4127	{
4128		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4129		.is_registered = 0,
4130		.alg.aead = {
4131			.base = {
4132				.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4133				.cra_driver_name =
4134				"authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4135				.cra_blocksize	 = 1,
4136				.cra_priority = CHCR_AEAD_PRIORITY,
4137				.cra_ctxsize =	sizeof(struct chcr_context) +
4138						sizeof(struct chcr_aead_ctx) +
4139						sizeof(struct chcr_authenc_ctx),
4140			},
4141			.ivsize = CTR_RFC3686_IV_SIZE,
4142			.maxauthsize = SHA224_DIGEST_SIZE,
4143			.setkey = chcr_authenc_setkey,
4144			.setauthsize = chcr_authenc_setauthsize,
4145		}
4146	},
4147	{
4148		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4149		.is_registered = 0,
4150		.alg.aead = {
4151			.base = {
4152				.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4153				.cra_driver_name =
4154				"authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4155				.cra_blocksize	 = 1,
4156				.cra_priority = CHCR_AEAD_PRIORITY,
4157				.cra_ctxsize =	sizeof(struct chcr_context) +
4158						sizeof(struct chcr_aead_ctx) +
4159						sizeof(struct chcr_authenc_ctx),
4160
4161			},
4162			.ivsize = CTR_RFC3686_IV_SIZE,
4163			.maxauthsize = SHA384_DIGEST_SIZE,
4164			.setkey = chcr_authenc_setkey,
4165			.setauthsize = chcr_authenc_setauthsize,
4166		}
4167	},
4168	{
4169		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4170		.is_registered = 0,
4171		.alg.aead = {
4172			.base = {
4173				.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4174				.cra_driver_name =
4175				"authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4176				.cra_blocksize	 = 1,
4177				.cra_priority = CHCR_AEAD_PRIORITY,
4178				.cra_ctxsize =	sizeof(struct chcr_context) +
4179						sizeof(struct chcr_aead_ctx) +
4180						sizeof(struct chcr_authenc_ctx),
4181
4182			},
4183			.ivsize = CTR_RFC3686_IV_SIZE,
4184			.maxauthsize = SHA512_DIGEST_SIZE,
4185			.setkey = chcr_authenc_setkey,
4186			.setauthsize = chcr_authenc_setauthsize,
4187		}
4188	},
4189	{
4190		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4191		.is_registered = 0,
4192		.alg.aead = {
4193			.base = {
4194				.cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4195				.cra_driver_name =
4196				"authenc-digest_null-rfc3686-ctr-aes-chcr",
4197				.cra_blocksize	 = 1,
4198				.cra_priority = CHCR_AEAD_PRIORITY,
4199				.cra_ctxsize =	sizeof(struct chcr_context) +
4200						sizeof(struct chcr_aead_ctx) +
4201						sizeof(struct chcr_authenc_ctx),
4202
4203			},
4204			.ivsize  = CTR_RFC3686_IV_SIZE,
4205			.maxauthsize = 0,
4206			.setkey  = chcr_aead_digest_null_setkey,
4207			.setauthsize = chcr_authenc_null_setauthsize,
4208		}
4209	},
4210
4211};
4212
4213/*
4214 *	chcr_unregister_alg - Deregister crypto algorithms with
4215 *	kernel framework.
4216 */
4217static int chcr_unregister_alg(void)
4218{
4219	int i;
4220
4221	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4222		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4223		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4224			if (driver_algs[i].is_registered)
4225				crypto_unregister_alg(
4226						&driver_algs[i].alg.crypto);
 
 
 
 
4227			break;
4228		case CRYPTO_ALG_TYPE_AEAD:
4229			if (driver_algs[i].is_registered)
 
4230				crypto_unregister_aead(
4231						&driver_algs[i].alg.aead);
 
 
4232			break;
4233		case CRYPTO_ALG_TYPE_AHASH:
4234			if (driver_algs[i].is_registered)
 
 
4235				crypto_unregister_ahash(
4236						&driver_algs[i].alg.hash);
 
 
4237			break;
4238		}
4239		driver_algs[i].is_registered = 0;
4240	}
4241	return 0;
4242}
4243
4244#define SZ_AHASH_CTX sizeof(struct chcr_context)
4245#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4246#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4247#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
4248
4249/*
4250 *	chcr_register_alg - Register crypto algorithms with kernel framework.
4251 */
4252static int chcr_register_alg(void)
4253{
4254	struct crypto_alg ai;
4255	struct ahash_alg *a_hash;
4256	int err = 0, i;
4257	char *name = NULL;
4258
4259	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4260		if (driver_algs[i].is_registered)
4261			continue;
4262		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4263		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4264			driver_algs[i].alg.crypto.cra_priority =
4265				CHCR_CRA_PRIORITY;
4266			driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
4267			driver_algs[i].alg.crypto.cra_flags =
4268				CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
 
4269				CRYPTO_ALG_NEED_FALLBACK;
4270			driver_algs[i].alg.crypto.cra_ctxsize =
4271				sizeof(struct chcr_context) +
4272				sizeof(struct ablk_ctx);
4273			driver_algs[i].alg.crypto.cra_alignmask = 0;
4274			driver_algs[i].alg.crypto.cra_type =
4275				&crypto_ablkcipher_type;
4276			err = crypto_register_alg(&driver_algs[i].alg.crypto);
4277			name = driver_algs[i].alg.crypto.cra_driver_name;
4278			break;
4279		case CRYPTO_ALG_TYPE_AEAD:
4280			driver_algs[i].alg.aead.base.cra_flags =
4281				CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
4282				CRYPTO_ALG_NEED_FALLBACK;
4283			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4284			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4285			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4286			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4287			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4288			err = crypto_register_aead(&driver_algs[i].alg.aead);
4289			name = driver_algs[i].alg.aead.base.cra_driver_name;
4290			break;
4291		case CRYPTO_ALG_TYPE_AHASH:
4292			a_hash = &driver_algs[i].alg.hash;
4293			a_hash->update = chcr_ahash_update;
4294			a_hash->final = chcr_ahash_final;
4295			a_hash->finup = chcr_ahash_finup;
4296			a_hash->digest = chcr_ahash_digest;
4297			a_hash->export = chcr_ahash_export;
4298			a_hash->import = chcr_ahash_import;
4299			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4300			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4301			a_hash->halg.base.cra_module = THIS_MODULE;
4302			a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
 
4303			a_hash->halg.base.cra_alignmask = 0;
4304			a_hash->halg.base.cra_exit = NULL;
4305			a_hash->halg.base.cra_type = &crypto_ahash_type;
4306
4307			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4308				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4309				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4310				a_hash->init = chcr_hmac_init;
4311				a_hash->setkey = chcr_ahash_setkey;
4312				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4313			} else {
4314				a_hash->init = chcr_sha_init;
4315				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4316				a_hash->halg.base.cra_init = chcr_sha_cra_init;
4317			}
4318			err = crypto_register_ahash(&driver_algs[i].alg.hash);
4319			ai = driver_algs[i].alg.hash.halg.base;
4320			name = ai.cra_driver_name;
4321			break;
4322		}
4323		if (err) {
4324			pr_err("chcr : %s : Algorithm registration failed\n",
4325			       name);
4326			goto register_err;
4327		} else {
4328			driver_algs[i].is_registered = 1;
4329		}
4330	}
4331	return 0;
4332
4333register_err:
4334	chcr_unregister_alg();
4335	return err;
4336}
4337
4338/*
4339 *	start_crypto - Register the crypto algorithms.
4340 *	This should called once when the first device comesup. After this
4341 *	kernel will start calling driver APIs for crypto operations.
4342 */
4343int start_crypto(void)
4344{
4345	return chcr_register_alg();
4346}
4347
4348/*
4349 *	stop_crypto - Deregister all the crypto algorithms with kernel.
4350 *	This should be called once when the last device goes down. After this
4351 *	kernel will not call the driver API for crypto operations.
4352 */
4353int stop_crypto(void)
4354{
4355	chcr_unregister_alg();
4356	return 0;
4357}