Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v3.1
 
   1/*
   2 * Support for Intel AES-NI instructions. This file contains glue
   3 * code, the real AES implementation is in intel-aes_asm.S.
   4 *
   5 * Copyright (C) 2008, Intel Corp.
   6 *    Author: Huang Ying <ying.huang@intel.com>
   7 *
   8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
   9 * interface for 64-bit kernels.
  10 *    Authors: Adrian Hoban <adrian.hoban@intel.com>
  11 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  12 *             Tadeusz Struk (tadeusz.struk@intel.com)
  13 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  14 *    Copyright (c) 2010, Intel Corporation.
  15 *
  16 * This program is free software; you can redistribute it and/or modify
  17 * it under the terms of the GNU General Public License as published by
  18 * the Free Software Foundation; either version 2 of the License, or
  19 * (at your option) any later version.
  20 */
  21
  22#include <linux/hardirq.h>
  23#include <linux/types.h>
  24#include <linux/crypto.h>
  25#include <linux/err.h>
  26#include <crypto/algapi.h>
  27#include <crypto/aes.h>
  28#include <crypto/cryptd.h>
  29#include <crypto/ctr.h>
  30#include <asm/i387.h>
  31#include <asm/aes.h>
 
 
 
  32#include <crypto/scatterwalk.h>
  33#include <crypto/internal/aead.h>
 
 
 
  34#include <linux/workqueue.h>
  35#include <linux/spinlock.h>
 
  36
  37#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
  38#define HAS_CTR
  39#endif
  40
  41#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
  42#define HAS_LRW
  43#endif
  44
  45#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
  46#define HAS_PCBC
  47#endif
  48
  49#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
  50#define HAS_XTS
  51#endif
  52
  53struct async_aes_ctx {
  54	struct cryptd_ablkcipher *cryptd_tfm;
  55};
 
 
 
 
  56
  57/* This data is stored at the end of the crypto_tfm struct.
  58 * It's a type of per "session" data storage location.
  59 * This needs to be 16 byte aligned.
  60 */
  61struct aesni_rfc4106_gcm_ctx {
  62	u8 hash_subkey[16];
  63	struct crypto_aes_ctx aes_key_expanded;
  64	u8 nonce[4];
  65	struct cryptd_aead *cryptd_tfm;
  66};
  67
  68struct aesni_gcm_set_hash_subkey_result {
  69	int err;
  70	struct completion completion;
  71};
  72
  73struct aesni_hash_subkey_req_data {
  74	u8 iv[16];
  75	struct aesni_gcm_set_hash_subkey_result result;
  76	struct scatterlist sg;
  77};
  78
  79#define AESNI_ALIGN	(16)
  80#define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE-1))
  81#define RFC4106_HASH_SUBKEY_SIZE 16
 
 
 
 
 
 
 
 
 
 
 
  82
  83asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
  84			     unsigned int key_len);
  85asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
  86			  const u8 *in);
  87asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
  88			  const u8 *in);
  89asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
  90			      const u8 *in, unsigned int len);
  91asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
  92			      const u8 *in, unsigned int len);
  93asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
  94			      const u8 *in, unsigned int len, u8 *iv);
  95asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
  96			      const u8 *in, unsigned int len, u8 *iv);
 
 
 
 
  97
  98int crypto_fpu_init(void);
  99void crypto_fpu_exit(void);
 
 
 
 
 
 
 100
 101#ifdef CONFIG_X86_64
 
 102asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
 103			      const u8 *in, unsigned int len, u8 *iv);
 
 104
 105/* asmlinkage void aesni_gcm_enc()
 106 * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
 107 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
 108 * const u8 *in, Plaintext input
 109 * unsigned long plaintext_len, Length of data in bytes for encryption.
 110 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
 111 *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
 112 *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
 113 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
 114 * const u8 *aad, Additional Authentication Data (AAD)
 115 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
 116 *          is going to be 8 or 12 bytes
 117 * u8 *auth_tag, Authenticated Tag output.
 118 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
 119 *          Valid values are 16 (most likely), 12 or 8.
 
 
 
 
 
 
 
 
 
 
 
 
 120 */
 121asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
 122			const u8 *in, unsigned long plaintext_len, u8 *iv,
 123			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 124			u8 *auth_tag, unsigned long auth_tag_len);
 125
 126/* asmlinkage void aesni_gcm_dec()
 127 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
 128 * u8 *out, Plaintext output. Decrypt in-place is allowed.
 129 * const u8 *in, Ciphertext input
 130 * unsigned long ciphertext_len, Length of data in bytes for decryption.
 131 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
 132 *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
 133 *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
 134 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
 135 * const u8 *aad, Additional Authentication Data (AAD)
 136 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
 137 * to be 8 or 12 bytes
 138 * u8 *auth_tag, Authenticated Tag output.
 139 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
 140 * Valid values are 16 (most likely), 12 or 8.
 
 
 141 */
 142asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
 143			const u8 *in, unsigned long ciphertext_len, u8 *iv,
 144			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 145			u8 *auth_tag, unsigned long auth_tag_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 146
 147static inline struct
 148aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
 149{
 150	return
 151		(struct aesni_rfc4106_gcm_ctx *)
 152		PTR_ALIGN((u8 *)
 153		crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
 
 
 
 
 
 
 
 
 
 
 
 154}
 155#endif
 156
 157static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
 158{
 159	unsigned long addr = (unsigned long)raw_ctx;
 160	unsigned long align = AESNI_ALIGN;
 161
 162	if (align <= crypto_tfm_ctx_alignment())
 163		align = 1;
 164	return (struct crypto_aes_ctx *)ALIGN(addr, align);
 165}
 166
 167static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
 168			      const u8 *in_key, unsigned int key_len)
 169{
 170	struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
 171	u32 *flags = &tfm->crt_flags;
 172	int err;
 173
 174	if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
 175	    key_len != AES_KEYSIZE_256) {
 176		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 177		return -EINVAL;
 178	}
 179
 180	if (!irq_fpu_usable())
 181		err = crypto_aes_expand_key(ctx, in_key, key_len);
 182	else {
 183		kernel_fpu_begin();
 184		err = aesni_set_key(ctx, in_key, key_len);
 185		kernel_fpu_end();
 186	}
 187
 188	return err;
 189}
 190
 191static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 192		       unsigned int key_len)
 193{
 194	return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
 195}
 196
 197static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 198{
 199	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 200
 201	if (!irq_fpu_usable())
 202		crypto_aes_encrypt_x86(ctx, dst, src);
 203	else {
 204		kernel_fpu_begin();
 205		aesni_enc(ctx, dst, src);
 206		kernel_fpu_end();
 207	}
 208}
 209
 210static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 211{
 212	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 213
 214	if (!irq_fpu_usable())
 215		crypto_aes_decrypt_x86(ctx, dst, src);
 216	else {
 217		kernel_fpu_begin();
 218		aesni_dec(ctx, dst, src);
 219		kernel_fpu_end();
 220	}
 221}
 222
 223static struct crypto_alg aesni_alg = {
 224	.cra_name		= "aes",
 225	.cra_driver_name	= "aes-aesni",
 226	.cra_priority		= 300,
 227	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
 228	.cra_blocksize		= AES_BLOCK_SIZE,
 229	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
 230	.cra_alignmask		= 0,
 231	.cra_module		= THIS_MODULE,
 232	.cra_list		= LIST_HEAD_INIT(aesni_alg.cra_list),
 233	.cra_u	= {
 234		.cipher	= {
 235			.cia_min_keysize	= AES_MIN_KEY_SIZE,
 236			.cia_max_keysize	= AES_MAX_KEY_SIZE,
 237			.cia_setkey		= aes_set_key,
 238			.cia_encrypt		= aes_encrypt,
 239			.cia_decrypt		= aes_decrypt
 240		}
 241	}
 242};
 243
 244static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 245{
 246	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 247
 248	aesni_enc(ctx, dst, src);
 249}
 250
 251static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 252{
 253	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 254
 255	aesni_dec(ctx, dst, src);
 256}
 257
 258static struct crypto_alg __aesni_alg = {
 259	.cra_name		= "__aes-aesni",
 260	.cra_driver_name	= "__driver-aes-aesni",
 261	.cra_priority		= 0,
 262	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
 263	.cra_blocksize		= AES_BLOCK_SIZE,
 264	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
 265	.cra_alignmask		= 0,
 266	.cra_module		= THIS_MODULE,
 267	.cra_list		= LIST_HEAD_INIT(__aesni_alg.cra_list),
 268	.cra_u	= {
 269		.cipher	= {
 270			.cia_min_keysize	= AES_MIN_KEY_SIZE,
 271			.cia_max_keysize	= AES_MAX_KEY_SIZE,
 272			.cia_setkey		= aes_set_key,
 273			.cia_encrypt		= __aes_encrypt,
 274			.cia_decrypt		= __aes_decrypt
 275		}
 276	}
 277};
 278
 279static int ecb_encrypt(struct blkcipher_desc *desc,
 280		       struct scatterlist *dst, struct scatterlist *src,
 281		       unsigned int nbytes)
 282{
 283	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 284	struct blkcipher_walk walk;
 285	int err;
 286
 287	blkcipher_walk_init(&walk, dst, src, nbytes);
 288	err = blkcipher_walk_virt(desc, &walk);
 289	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 290
 291	kernel_fpu_begin();
 292	while ((nbytes = walk.nbytes)) {
 
 293		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 294			      nbytes & AES_BLOCK_MASK);
 
 295		nbytes &= AES_BLOCK_SIZE - 1;
 296		err = blkcipher_walk_done(desc, &walk, nbytes);
 297	}
 298	kernel_fpu_end();
 299
 300	return err;
 301}
 302
 303static int ecb_decrypt(struct blkcipher_desc *desc,
 304		       struct scatterlist *dst, struct scatterlist *src,
 305		       unsigned int nbytes)
 306{
 307	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 308	struct blkcipher_walk walk;
 
 
 309	int err;
 310
 311	blkcipher_walk_init(&walk, dst, src, nbytes);
 312	err = blkcipher_walk_virt(desc, &walk);
 313	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 314
 315	kernel_fpu_begin();
 316	while ((nbytes = walk.nbytes)) {
 
 317		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 318			      nbytes & AES_BLOCK_MASK);
 
 319		nbytes &= AES_BLOCK_SIZE - 1;
 320		err = blkcipher_walk_done(desc, &walk, nbytes);
 321	}
 322	kernel_fpu_end();
 323
 324	return err;
 325}
 326
 327static struct crypto_alg blk_ecb_alg = {
 328	.cra_name		= "__ecb-aes-aesni",
 329	.cra_driver_name	= "__driver-ecb-aes-aesni",
 330	.cra_priority		= 0,
 331	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
 332	.cra_blocksize		= AES_BLOCK_SIZE,
 333	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
 334	.cra_alignmask		= 0,
 335	.cra_type		= &crypto_blkcipher_type,
 336	.cra_module		= THIS_MODULE,
 337	.cra_list		= LIST_HEAD_INIT(blk_ecb_alg.cra_list),
 338	.cra_u = {
 339		.blkcipher = {
 340			.min_keysize	= AES_MIN_KEY_SIZE,
 341			.max_keysize	= AES_MAX_KEY_SIZE,
 342			.setkey		= aes_set_key,
 343			.encrypt	= ecb_encrypt,
 344			.decrypt	= ecb_decrypt,
 345		},
 346	},
 347};
 348
 349static int cbc_encrypt(struct blkcipher_desc *desc,
 350		       struct scatterlist *dst, struct scatterlist *src,
 351		       unsigned int nbytes)
 352{
 353	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 354	struct blkcipher_walk walk;
 
 
 355	int err;
 356
 357	blkcipher_walk_init(&walk, dst, src, nbytes);
 358	err = blkcipher_walk_virt(desc, &walk);
 359	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 360
 361	kernel_fpu_begin();
 362	while ((nbytes = walk.nbytes)) {
 
 363		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 364			      nbytes & AES_BLOCK_MASK, walk.iv);
 
 365		nbytes &= AES_BLOCK_SIZE - 1;
 366		err = blkcipher_walk_done(desc, &walk, nbytes);
 367	}
 368	kernel_fpu_end();
 369
 370	return err;
 371}
 372
 373static int cbc_decrypt(struct blkcipher_desc *desc,
 374		       struct scatterlist *dst, struct scatterlist *src,
 375		       unsigned int nbytes)
 376{
 377	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 378	struct blkcipher_walk walk;
 
 
 379	int err;
 380
 381	blkcipher_walk_init(&walk, dst, src, nbytes);
 382	err = blkcipher_walk_virt(desc, &walk);
 383	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 384
 385	kernel_fpu_begin();
 386	while ((nbytes = walk.nbytes)) {
 
 387		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 388			      nbytes & AES_BLOCK_MASK, walk.iv);
 
 389		nbytes &= AES_BLOCK_SIZE - 1;
 390		err = blkcipher_walk_done(desc, &walk, nbytes);
 391	}
 392	kernel_fpu_end();
 393
 394	return err;
 395}
 396
 397static struct crypto_alg blk_cbc_alg = {
 398	.cra_name		= "__cbc-aes-aesni",
 399	.cra_driver_name	= "__driver-cbc-aes-aesni",
 400	.cra_priority		= 0,
 401	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
 402	.cra_blocksize		= AES_BLOCK_SIZE,
 403	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
 404	.cra_alignmask		= 0,
 405	.cra_type		= &crypto_blkcipher_type,
 406	.cra_module		= THIS_MODULE,
 407	.cra_list		= LIST_HEAD_INIT(blk_cbc_alg.cra_list),
 408	.cra_u = {
 409		.blkcipher = {
 410			.min_keysize	= AES_MIN_KEY_SIZE,
 411			.max_keysize	= AES_MAX_KEY_SIZE,
 412			.setkey		= aes_set_key,
 413			.encrypt	= cbc_encrypt,
 414			.decrypt	= cbc_decrypt,
 415		},
 416	},
 417};
 418
 419#ifdef CONFIG_X86_64
 420static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
 421			    struct blkcipher_walk *walk)
 422{
 423	u8 *ctrblk = walk->iv;
 424	u8 keystream[AES_BLOCK_SIZE];
 425	u8 *src = walk->src.virt.addr;
 426	u8 *dst = walk->dst.virt.addr;
 427	unsigned int nbytes = walk->nbytes;
 428
 429	aesni_enc(ctx, keystream, ctrblk);
 430	crypto_xor(keystream, src, nbytes);
 431	memcpy(dst, keystream, nbytes);
 432	crypto_inc(ctrblk, AES_BLOCK_SIZE);
 433}
 434
 435static int ctr_crypt(struct blkcipher_desc *desc,
 436		     struct scatterlist *dst, struct scatterlist *src,
 437		     unsigned int nbytes)
 438{
 439	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 440	struct blkcipher_walk walk;
 441	int err;
 442
 443	blkcipher_walk_init(&walk, dst, src, nbytes);
 444	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
 445	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 446
 447	kernel_fpu_begin();
 448	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
 449		aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 450			      nbytes & AES_BLOCK_MASK, walk.iv);
 451		nbytes &= AES_BLOCK_SIZE - 1;
 452		err = blkcipher_walk_done(desc, &walk, nbytes);
 453	}
 454	if (walk.nbytes) {
 455		ctr_crypt_final(ctx, &walk);
 456		err = blkcipher_walk_done(desc, &walk, 0);
 457	}
 458	kernel_fpu_end();
 459
 460	return err;
 461}
 462
 463static struct crypto_alg blk_ctr_alg = {
 464	.cra_name		= "__ctr-aes-aesni",
 465	.cra_driver_name	= "__driver-ctr-aes-aesni",
 466	.cra_priority		= 0,
 467	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
 468	.cra_blocksize		= 1,
 469	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
 470	.cra_alignmask		= 0,
 471	.cra_type		= &crypto_blkcipher_type,
 472	.cra_module		= THIS_MODULE,
 473	.cra_list		= LIST_HEAD_INIT(blk_ctr_alg.cra_list),
 474	.cra_u = {
 475		.blkcipher = {
 476			.min_keysize	= AES_MIN_KEY_SIZE,
 477			.max_keysize	= AES_MAX_KEY_SIZE,
 478			.ivsize		= AES_BLOCK_SIZE,
 479			.setkey		= aes_set_key,
 480			.encrypt	= ctr_crypt,
 481			.decrypt	= ctr_crypt,
 482		},
 483	},
 484};
 485#endif
 486
 487static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
 488			unsigned int key_len)
 489{
 490	struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 491	struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
 
 
 
 
 
 492	int err;
 493
 494	crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 495	crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
 496				    & CRYPTO_TFM_REQ_MASK);
 497	err = crypto_ablkcipher_setkey(child, key, key_len);
 498	crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
 499				    & CRYPTO_TFM_RES_MASK);
 500	return err;
 501}
 502
 503static int ablk_encrypt(struct ablkcipher_request *req)
 504{
 505	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 506	struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 507
 508	if (!irq_fpu_usable()) {
 509		struct ablkcipher_request *cryptd_req =
 510			ablkcipher_request_ctx(req);
 511		memcpy(cryptd_req, req, sizeof(*req));
 512		ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
 513		return crypto_ablkcipher_encrypt(cryptd_req);
 514	} else {
 515		struct blkcipher_desc desc;
 516		desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
 517		desc.info = req->info;
 518		desc.flags = 0;
 519		return crypto_blkcipher_crt(desc.tfm)->encrypt(
 520			&desc, req->dst, req->src, req->nbytes);
 521	}
 522}
 523
 524static int ablk_decrypt(struct ablkcipher_request *req)
 525{
 526	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 527	struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 528
 529	if (!irq_fpu_usable()) {
 530		struct ablkcipher_request *cryptd_req =
 531			ablkcipher_request_ctx(req);
 532		memcpy(cryptd_req, req, sizeof(*req));
 533		ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
 534		return crypto_ablkcipher_decrypt(cryptd_req);
 535	} else {
 536		struct blkcipher_desc desc;
 537		desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
 538		desc.info = req->info;
 539		desc.flags = 0;
 540		return crypto_blkcipher_crt(desc.tfm)->decrypt(
 541			&desc, req->dst, req->src, req->nbytes);
 542	}
 543}
 544
 545static void ablk_exit(struct crypto_tfm *tfm)
 
 
 546{
 547	struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 548
 549	cryptd_free_ablkcipher(ctx->cryptd_tfm);
 
 
 
 
 
 
 
 
 
 550}
 551
 552static void ablk_init_common(struct crypto_tfm *tfm,
 553			     struct cryptd_ablkcipher *cryptd_tfm)
 554{
 555	struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 556
 557	ctx->cryptd_tfm = cryptd_tfm;
 558	tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
 559		crypto_ablkcipher_reqsize(&cryptd_tfm->base);
 560}
 561
 562static int ablk_ecb_init(struct crypto_tfm *tfm)
 563{
 564	struct cryptd_ablkcipher *cryptd_tfm;
 565
 566	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
 567	if (IS_ERR(cryptd_tfm))
 568		return PTR_ERR(cryptd_tfm);
 569	ablk_init_common(tfm, cryptd_tfm);
 570	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 571}
 572
 573static struct crypto_alg ablk_ecb_alg = {
 574	.cra_name		= "ecb(aes)",
 575	.cra_driver_name	= "ecb-aes-aesni",
 576	.cra_priority		= 400,
 577	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 578	.cra_blocksize		= AES_BLOCK_SIZE,
 579	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 580	.cra_alignmask		= 0,
 581	.cra_type		= &crypto_ablkcipher_type,
 582	.cra_module		= THIS_MODULE,
 583	.cra_list		= LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
 584	.cra_init		= ablk_ecb_init,
 585	.cra_exit		= ablk_exit,
 586	.cra_u = {
 587		.ablkcipher = {
 588			.min_keysize	= AES_MIN_KEY_SIZE,
 589			.max_keysize	= AES_MAX_KEY_SIZE,
 590			.setkey		= ablk_set_key,
 591			.encrypt	= ablk_encrypt,
 592			.decrypt	= ablk_decrypt,
 593		},
 594	},
 595};
 596
 597static int ablk_cbc_init(struct crypto_tfm *tfm)
 598{
 599	struct cryptd_ablkcipher *cryptd_tfm;
 
 600
 601	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
 602	if (IS_ERR(cryptd_tfm))
 603		return PTR_ERR(cryptd_tfm);
 604	ablk_init_common(tfm, cryptd_tfm);
 605	return 0;
 606}
 607
 608static struct crypto_alg ablk_cbc_alg = {
 609	.cra_name		= "cbc(aes)",
 610	.cra_driver_name	= "cbc-aes-aesni",
 611	.cra_priority		= 400,
 612	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 613	.cra_blocksize		= AES_BLOCK_SIZE,
 614	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 615	.cra_alignmask		= 0,
 616	.cra_type		= &crypto_ablkcipher_type,
 617	.cra_module		= THIS_MODULE,
 618	.cra_list		= LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
 619	.cra_init		= ablk_cbc_init,
 620	.cra_exit		= ablk_exit,
 621	.cra_u = {
 622		.ablkcipher = {
 623			.min_keysize	= AES_MIN_KEY_SIZE,
 624			.max_keysize	= AES_MAX_KEY_SIZE,
 625			.ivsize		= AES_BLOCK_SIZE,
 626			.setkey		= ablk_set_key,
 627			.encrypt	= ablk_encrypt,
 628			.decrypt	= ablk_decrypt,
 629		},
 630	},
 631};
 632
 633#ifdef CONFIG_X86_64
 634static int ablk_ctr_init(struct crypto_tfm *tfm)
 635{
 636	struct cryptd_ablkcipher *cryptd_tfm;
 637
 638	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
 639	if (IS_ERR(cryptd_tfm))
 640		return PTR_ERR(cryptd_tfm);
 641	ablk_init_common(tfm, cryptd_tfm);
 642	return 0;
 643}
 644
 645static struct crypto_alg ablk_ctr_alg = {
 646	.cra_name		= "ctr(aes)",
 647	.cra_driver_name	= "ctr-aes-aesni",
 648	.cra_priority		= 400,
 649	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 650	.cra_blocksize		= 1,
 651	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 652	.cra_alignmask		= 0,
 653	.cra_type		= &crypto_ablkcipher_type,
 654	.cra_module		= THIS_MODULE,
 655	.cra_list		= LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
 656	.cra_init		= ablk_ctr_init,
 657	.cra_exit		= ablk_exit,
 658	.cra_u = {
 659		.ablkcipher = {
 660			.min_keysize	= AES_MIN_KEY_SIZE,
 661			.max_keysize	= AES_MAX_KEY_SIZE,
 662			.ivsize		= AES_BLOCK_SIZE,
 663			.setkey		= ablk_set_key,
 664			.encrypt	= ablk_encrypt,
 665			.decrypt	= ablk_encrypt,
 666			.geniv		= "chainiv",
 667		},
 668	},
 669};
 670
 671#ifdef HAS_CTR
 672static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
 673{
 674	struct cryptd_ablkcipher *cryptd_tfm;
 675
 676	cryptd_tfm = cryptd_alloc_ablkcipher(
 677		"rfc3686(__driver-ctr-aes-aesni)", 0, 0);
 678	if (IS_ERR(cryptd_tfm))
 679		return PTR_ERR(cryptd_tfm);
 680	ablk_init_common(tfm, cryptd_tfm);
 681	return 0;
 682}
 683
 684static struct crypto_alg ablk_rfc3686_ctr_alg = {
 685	.cra_name		= "rfc3686(ctr(aes))",
 686	.cra_driver_name	= "rfc3686-ctr-aes-aesni",
 687	.cra_priority		= 400,
 688	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 689	.cra_blocksize		= 1,
 690	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 691	.cra_alignmask		= 0,
 692	.cra_type		= &crypto_ablkcipher_type,
 693	.cra_module		= THIS_MODULE,
 694	.cra_list		= LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
 695	.cra_init		= ablk_rfc3686_ctr_init,
 696	.cra_exit		= ablk_exit,
 697	.cra_u = {
 698		.ablkcipher = {
 699			.min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
 700			.max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
 701			.ivsize	     = CTR_RFC3686_IV_SIZE,
 702			.setkey	     = ablk_set_key,
 703			.encrypt     = ablk_encrypt,
 704			.decrypt     = ablk_decrypt,
 705			.geniv	     = "seqiv",
 706		},
 707	},
 708};
 709#endif
 710#endif
 711
 712#ifdef HAS_LRW
 713static int ablk_lrw_init(struct crypto_tfm *tfm)
 714{
 715	struct cryptd_ablkcipher *cryptd_tfm;
 716
 717	cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
 718					     0, 0);
 719	if (IS_ERR(cryptd_tfm))
 720		return PTR_ERR(cryptd_tfm);
 721	ablk_init_common(tfm, cryptd_tfm);
 722	return 0;
 723}
 724
 725static struct crypto_alg ablk_lrw_alg = {
 726	.cra_name		= "lrw(aes)",
 727	.cra_driver_name	= "lrw-aes-aesni",
 728	.cra_priority		= 400,
 729	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 730	.cra_blocksize		= AES_BLOCK_SIZE,
 731	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 732	.cra_alignmask		= 0,
 733	.cra_type		= &crypto_ablkcipher_type,
 734	.cra_module		= THIS_MODULE,
 735	.cra_list		= LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
 736	.cra_init		= ablk_lrw_init,
 737	.cra_exit		= ablk_exit,
 738	.cra_u = {
 739		.ablkcipher = {
 740			.min_keysize	= AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
 741			.max_keysize	= AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
 742			.ivsize		= AES_BLOCK_SIZE,
 743			.setkey		= ablk_set_key,
 744			.encrypt	= ablk_encrypt,
 745			.decrypt	= ablk_decrypt,
 746		},
 747	},
 748};
 749#endif
 750
 751#ifdef HAS_PCBC
 752static int ablk_pcbc_init(struct crypto_tfm *tfm)
 753{
 754	struct cryptd_ablkcipher *cryptd_tfm;
 
 
 
 
 
 
 
 755
 756	cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
 757					     0, 0);
 758	if (IS_ERR(cryptd_tfm))
 759		return PTR_ERR(cryptd_tfm);
 760	ablk_init_common(tfm, cryptd_tfm);
 761	return 0;
 762}
 763
 764static struct crypto_alg ablk_pcbc_alg = {
 765	.cra_name		= "pcbc(aes)",
 766	.cra_driver_name	= "pcbc-aes-aesni",
 767	.cra_priority		= 400,
 768	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 769	.cra_blocksize		= AES_BLOCK_SIZE,
 770	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 771	.cra_alignmask		= 0,
 772	.cra_type		= &crypto_ablkcipher_type,
 773	.cra_module		= THIS_MODULE,
 774	.cra_list		= LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
 775	.cra_init		= ablk_pcbc_init,
 776	.cra_exit		= ablk_exit,
 777	.cra_u = {
 778		.ablkcipher = {
 779			.min_keysize	= AES_MIN_KEY_SIZE,
 780			.max_keysize	= AES_MAX_KEY_SIZE,
 781			.ivsize		= AES_BLOCK_SIZE,
 782			.setkey		= ablk_set_key,
 783			.encrypt	= ablk_encrypt,
 784			.decrypt	= ablk_decrypt,
 785		},
 786	},
 787};
 788#endif
 789
 790#ifdef HAS_XTS
 791static int ablk_xts_init(struct crypto_tfm *tfm)
 792{
 793	struct cryptd_ablkcipher *cryptd_tfm;
 794
 795	cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
 796					     0, 0);
 797	if (IS_ERR(cryptd_tfm))
 798		return PTR_ERR(cryptd_tfm);
 799	ablk_init_common(tfm, cryptd_tfm);
 800	return 0;
 801}
 802
 803static struct crypto_alg ablk_xts_alg = {
 804	.cra_name		= "xts(aes)",
 805	.cra_driver_name	= "xts-aes-aesni",
 806	.cra_priority		= 400,
 807	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 808	.cra_blocksize		= AES_BLOCK_SIZE,
 809	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 810	.cra_alignmask		= 0,
 811	.cra_type		= &crypto_ablkcipher_type,
 812	.cra_module		= THIS_MODULE,
 813	.cra_list		= LIST_HEAD_INIT(ablk_xts_alg.cra_list),
 814	.cra_init		= ablk_xts_init,
 815	.cra_exit		= ablk_exit,
 816	.cra_u = {
 817		.ablkcipher = {
 818			.min_keysize	= 2 * AES_MIN_KEY_SIZE,
 819			.max_keysize	= 2 * AES_MAX_KEY_SIZE,
 820			.ivsize		= AES_BLOCK_SIZE,
 821			.setkey		= ablk_set_key,
 822			.encrypt	= ablk_encrypt,
 823			.decrypt	= ablk_decrypt,
 824		},
 825	},
 826};
 827#endif
 828
 829#ifdef CONFIG_X86_64
 830static int rfc4106_init(struct crypto_tfm *tfm)
 831{
 832	struct cryptd_aead *cryptd_tfm;
 833	struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
 834		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
 835	struct crypto_aead *cryptd_child;
 836	struct aesni_rfc4106_gcm_ctx *child_ctx;
 837	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
 838	if (IS_ERR(cryptd_tfm))
 839		return PTR_ERR(cryptd_tfm);
 840
 841	cryptd_child = cryptd_aead_child(cryptd_tfm);
 842	child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
 843	memcpy(child_ctx, ctx, sizeof(*ctx));
 844	ctx->cryptd_tfm = cryptd_tfm;
 845	tfm->crt_aead.reqsize = sizeof(struct aead_request)
 846		+ crypto_aead_reqsize(&cryptd_tfm->base);
 847	return 0;
 848}
 849
 850static void rfc4106_exit(struct crypto_tfm *tfm)
 851{
 852	struct aesni_rfc4106_gcm_ctx *ctx =
 853		(struct aesni_rfc4106_gcm_ctx *)
 854		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
 855	if (!IS_ERR(ctx->cryptd_tfm))
 856		cryptd_free_aead(ctx->cryptd_tfm);
 857	return;
 858}
 859
 860static void
 861rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
 862{
 863	struct aesni_gcm_set_hash_subkey_result *result = req->data;
 864
 865	if (err == -EINPROGRESS)
 866		return;
 867	result->err = err;
 868	complete(&result->completion);
 869}
 870
 871static int
 872rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
 873{
 874	struct crypto_ablkcipher *ctr_tfm;
 875	struct ablkcipher_request *req;
 876	int ret = -EINVAL;
 877	struct aesni_hash_subkey_req_data *req_data;
 878
 879	ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
 880	if (IS_ERR(ctr_tfm))
 881		return PTR_ERR(ctr_tfm);
 882
 883	crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
 884
 885	ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
 886	if (ret)
 887		goto out_free_ablkcipher;
 888
 889	ret = -ENOMEM;
 890	req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
 891	if (!req)
 892		goto out_free_ablkcipher;
 893
 894	req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
 895	if (!req_data)
 896		goto out_free_request;
 897
 898	memset(req_data->iv, 0, sizeof(req_data->iv));
 
 899
 900	/* Clear the data in the hash sub key container to zero.*/
 901	/* We want to cipher all zeros to create the hash sub key. */
 902	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
 
 
 
 
 903
 904	init_completion(&req_data->result.completion);
 905	sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
 906	ablkcipher_request_set_tfm(req, ctr_tfm);
 907	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
 908					CRYPTO_TFM_REQ_MAY_BACKLOG,
 909					rfc4106_set_hash_subkey_done,
 910					&req_data->result);
 911
 912	ablkcipher_request_set_crypt(req, &req_data->sg,
 913		&req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
 914
 915	ret = crypto_ablkcipher_encrypt(req);
 916	if (ret == -EINPROGRESS || ret == -EBUSY) {
 917		ret = wait_for_completion_interruptible
 918			(&req_data->result.completion);
 919		if (!ret)
 920			ret = req_data->result.err;
 921	}
 922	kfree(req_data);
 923out_free_request:
 924	ablkcipher_request_free(req);
 925out_free_ablkcipher:
 926	crypto_free_ablkcipher(ctr_tfm);
 927	return ret;
 928}
 929
 930static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
 931						   unsigned int key_len)
 932{
 933	int ret = 0;
 934	struct crypto_tfm *tfm = crypto_aead_tfm(parent);
 935	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
 936	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
 937	struct aesni_rfc4106_gcm_ctx *child_ctx =
 938                                 aesni_rfc4106_gcm_ctx_get(cryptd_child);
 939	u8 *new_key_mem = NULL;
 940
 941	if (key_len < 4) {
 942		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 943		return -EINVAL;
 944	}
 945	/*Account for 4 byte nonce at the end.*/
 946	key_len -= 4;
 947	if (key_len != AES_KEYSIZE_128) {
 948		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 949		return -EINVAL;
 950	}
 951
 952	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
 953	/*This must be on a 16 byte boundary!*/
 954	if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
 955		return -EINVAL;
 
 
 
 
 
 
 956
 957	if ((unsigned long)key % AESNI_ALIGN) {
 958		/*key is not aligned: use an auxuliar aligned pointer*/
 959		new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
 960		if (!new_key_mem)
 961			return -ENOMEM;
 962
 963		new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
 964		memcpy(new_key_mem, key, key_len);
 965		key = new_key_mem;
 966	}
 967
 968	if (!irq_fpu_usable())
 969		ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
 970		key, key_len);
 971	else {
 972		kernel_fpu_begin();
 973		ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 974		kernel_fpu_end();
 
 
 975	}
 976	/*This must be on a 16 byte boundary!*/
 977	if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
 978		ret = -EINVAL;
 979		goto exit;
 980	}
 981	ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
 982	memcpy(child_ctx, ctx, sizeof(*ctx));
 983exit:
 984	kfree(new_key_mem);
 985	return ret;
 986}
 987
 988/* This is the Integrity Check Value (aka the authentication tag length and can
 989 * be 8, 12 or 16 bytes long. */
 990static int rfc4106_set_authsize(struct crypto_aead *parent,
 991				unsigned int authsize)
 992{
 993	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
 994	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
 
 
 
 
 
 
 995
 996	switch (authsize) {
 997	case 8:
 998	case 12:
 999	case 16:
1000		break;
1001	default:
1002		return -EINVAL;
1003	}
1004	crypto_aead_crt(parent)->authsize = authsize;
1005	crypto_aead_crt(cryptd_child)->authsize = authsize;
1006	return 0;
1007}
1008
1009static int rfc4106_encrypt(struct aead_request *req)
 
1010{
1011	int ret;
1012	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1013	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 
 
1014
1015	if (!irq_fpu_usable()) {
1016		struct aead_request *cryptd_req =
1017			(struct aead_request *) aead_request_ctx(req);
1018		memcpy(cryptd_req, req, sizeof(*req));
1019		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1020		return crypto_aead_encrypt(cryptd_req);
1021	} else {
1022		struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1023		kernel_fpu_begin();
1024		ret = cryptd_child->base.crt_aead.encrypt(req);
1025		kernel_fpu_end();
1026		return ret;
1027	}
1028}
1029
1030static int rfc4106_decrypt(struct aead_request *req)
 
1031{
1032	int ret;
1033	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1034	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 
 
 
1035
1036	if (!irq_fpu_usable()) {
1037		struct aead_request *cryptd_req =
1038			(struct aead_request *) aead_request_ctx(req);
1039		memcpy(cryptd_req, req, sizeof(*req));
1040		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1041		return crypto_aead_decrypt(cryptd_req);
1042	} else {
1043		struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1044		kernel_fpu_begin();
1045		ret = cryptd_child->base.crt_aead.decrypt(req);
1046		kernel_fpu_end();
1047		return ret;
 
 
1048	}
 
1049}
1050
1051static struct crypto_alg rfc4106_alg = {
1052	.cra_name = "rfc4106(gcm(aes))",
1053	.cra_driver_name = "rfc4106-gcm-aesni",
1054	.cra_priority = 400,
1055	.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1056	.cra_blocksize = 1,
1057	.cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1058	.cra_alignmask = 0,
1059	.cra_type = &crypto_nivaead_type,
1060	.cra_module = THIS_MODULE,
1061	.cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
1062	.cra_init = rfc4106_init,
1063	.cra_exit = rfc4106_exit,
1064	.cra_u = {
1065		.aead = {
1066			.setkey = rfc4106_set_key,
1067			.setauthsize = rfc4106_set_authsize,
1068			.encrypt = rfc4106_encrypt,
1069			.decrypt = rfc4106_decrypt,
1070			.geniv = "seqiv",
1071			.ivsize = 8,
1072			.maxauthsize = 16,
1073		},
1074	},
1075};
1076
1077static int __driver_rfc4106_encrypt(struct aead_request *req)
1078{
1079	u8 one_entry_in_sg = 0;
1080	u8 *src, *dst, *assoc;
1081	__be32 counter = cpu_to_be32(1);
1082	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1083	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1084	void *aes_ctx = &(ctx->aes_key_expanded);
1085	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1086	u8 iv_tab[16+AESNI_ALIGN];
1087	u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1088	struct scatter_walk src_sg_walk;
1089	struct scatter_walk assoc_sg_walk;
1090	struct scatter_walk dst_sg_walk;
1091	unsigned int i;
 
1092
1093	/* Assuming we are supporting rfc4106 64-bit extended */
1094	/* sequence numbers We need to have the AAD length equal */
1095	/* to 8 or 12 bytes */
1096	if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1097		return -EINVAL;
 
1098	/* IV below built */
1099	for (i = 0; i < 4; i++)
1100		*(iv+i) = ctx->nonce[i];
1101	for (i = 0; i < 8; i++)
1102		*(iv+4+i) = req->iv[i];
1103	*((__be32 *)(iv+12)) = counter;
1104
1105	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1106		one_entry_in_sg = 1;
1107		scatterwalk_start(&src_sg_walk, req->src);
1108		scatterwalk_start(&assoc_sg_walk, req->assoc);
1109		src = scatterwalk_map(&src_sg_walk, 0);
1110		assoc = scatterwalk_map(&assoc_sg_walk, 0);
1111		dst = src;
1112		if (unlikely(req->src != req->dst)) {
1113			scatterwalk_start(&dst_sg_walk, req->dst);
1114			dst = scatterwalk_map(&dst_sg_walk, 0);
1115		}
1116
1117	} else {
1118		/* Allocate memory for src, dst, assoc */
1119		src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1120			GFP_ATOMIC);
1121		if (unlikely(!src))
1122			return -ENOMEM;
1123		assoc = (src + req->cryptlen + auth_tag_len);
1124		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1125		scatterwalk_map_and_copy(assoc, req->assoc, 0,
1126					req->assoclen, 0);
1127		dst = src;
1128	}
1129
1130	aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1131		ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1132		+ ((unsigned long)req->cryptlen), auth_tag_len);
1133
1134	/* The authTag (aka the Integrity Check Value) needs to be written
1135	 * back to the packet. */
1136	if (one_entry_in_sg) {
1137		if (unlikely(req->src != req->dst)) {
1138			scatterwalk_unmap(dst, 0);
1139			scatterwalk_done(&dst_sg_walk, 0, 0);
1140		}
1141		scatterwalk_unmap(src, 0);
1142		scatterwalk_unmap(assoc, 0);
1143		scatterwalk_done(&src_sg_walk, 0, 0);
1144		scatterwalk_done(&assoc_sg_walk, 0, 0);
1145	} else {
1146		scatterwalk_map_and_copy(dst, req->dst, 0,
1147			req->cryptlen + auth_tag_len, 1);
1148		kfree(src);
1149	}
1150	return 0;
1151}
1152
1153static int __driver_rfc4106_decrypt(struct aead_request *req)
1154{
1155	u8 one_entry_in_sg = 0;
1156	u8 *src, *dst, *assoc;
1157	unsigned long tempCipherLen = 0;
1158	__be32 counter = cpu_to_be32(1);
1159	int retval = 0;
1160	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1161	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1162	void *aes_ctx = &(ctx->aes_key_expanded);
1163	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1164	u8 iv_and_authTag[32+AESNI_ALIGN];
1165	u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1166	u8 *authTag = iv + 16;
1167	struct scatter_walk src_sg_walk;
1168	struct scatter_walk assoc_sg_walk;
1169	struct scatter_walk dst_sg_walk;
1170	unsigned int i;
1171
1172	if (unlikely((req->cryptlen < auth_tag_len) ||
1173		(req->assoclen != 8 && req->assoclen != 12)))
1174		return -EINVAL;
 
1175	/* Assuming we are supporting rfc4106 64-bit extended */
1176	/* sequence numbers We need to have the AAD length */
1177	/* equal to 8 or 12 bytes */
1178
1179	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1180	/* IV below built */
1181	for (i = 0; i < 4; i++)
1182		*(iv+i) = ctx->nonce[i];
1183	for (i = 0; i < 8; i++)
1184		*(iv+4+i) = req->iv[i];
1185	*((__be32 *)(iv+12)) = counter;
1186
1187	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1188		one_entry_in_sg = 1;
1189		scatterwalk_start(&src_sg_walk, req->src);
1190		scatterwalk_start(&assoc_sg_walk, req->assoc);
1191		src = scatterwalk_map(&src_sg_walk, 0);
1192		assoc = scatterwalk_map(&assoc_sg_walk, 0);
1193		dst = src;
1194		if (unlikely(req->src != req->dst)) {
1195			scatterwalk_start(&dst_sg_walk, req->dst);
1196			dst = scatterwalk_map(&dst_sg_walk, 0);
1197		}
1198
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1199	} else {
1200		/* Allocate memory for src, dst, assoc */
1201		src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1202		if (!src)
1203			return -ENOMEM;
1204		assoc = (src + req->cryptlen + auth_tag_len);
1205		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1206		scatterwalk_map_and_copy(assoc, req->assoc, 0,
1207			req->assoclen, 0);
1208		dst = src;
1209	}
1210
1211	aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
1212		ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1213		authTag, auth_tag_len);
1214
1215	/* Compare generated tag with passed in tag. */
1216	retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
1217		-EBADMSG : 0;
1218
1219	if (one_entry_in_sg) {
1220		if (unlikely(req->src != req->dst)) {
1221			scatterwalk_unmap(dst, 0);
1222			scatterwalk_done(&dst_sg_walk, 0, 0);
1223		}
1224		scatterwalk_unmap(src, 0);
1225		scatterwalk_unmap(assoc, 0);
1226		scatterwalk_done(&src_sg_walk, 0, 0);
1227		scatterwalk_done(&assoc_sg_walk, 0, 0);
1228	} else {
1229		scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1230		kfree(src);
 
 
 
 
 
 
 
 
1231	}
1232	return retval;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1233}
1234
1235static struct crypto_alg __rfc4106_alg = {
1236	.cra_name		= "__gcm-aes-aesni",
1237	.cra_driver_name	= "__driver-gcm-aes-aesni",
1238	.cra_priority		= 0,
1239	.cra_flags		= CRYPTO_ALG_TYPE_AEAD,
1240	.cra_blocksize		= 1,
1241	.cra_ctxsize	= sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1242	.cra_alignmask		= 0,
1243	.cra_type		= &crypto_aead_type,
1244	.cra_module		= THIS_MODULE,
1245	.cra_list		= LIST_HEAD_INIT(__rfc4106_alg.cra_list),
1246	.cra_u = {
1247		.aead = {
1248			.encrypt	= __driver_rfc4106_encrypt,
1249			.decrypt	= __driver_rfc4106_decrypt,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1250		},
1251	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1252};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1253#endif
1254
 
 
 
 
 
 
 
 
1255static int __init aesni_init(void)
1256{
1257	int err;
1258
1259	if (!cpu_has_aes) {
1260		printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
1261		return -ENODEV;
1262	}
1263
1264	if ((err = crypto_fpu_init()))
1265		goto fpu_err;
1266	if ((err = crypto_register_alg(&aesni_alg)))
1267		goto aes_err;
1268	if ((err = crypto_register_alg(&__aesni_alg)))
1269		goto __aes_err;
1270	if ((err = crypto_register_alg(&blk_ecb_alg)))
1271		goto blk_ecb_err;
1272	if ((err = crypto_register_alg(&blk_cbc_alg)))
1273		goto blk_cbc_err;
1274	if ((err = crypto_register_alg(&ablk_ecb_alg)))
1275		goto ablk_ecb_err;
1276	if ((err = crypto_register_alg(&ablk_cbc_alg)))
1277		goto ablk_cbc_err;
1278#ifdef CONFIG_X86_64
1279	if ((err = crypto_register_alg(&blk_ctr_alg)))
1280		goto blk_ctr_err;
1281	if ((err = crypto_register_alg(&ablk_ctr_alg)))
1282		goto ablk_ctr_err;
1283	if ((err = crypto_register_alg(&__rfc4106_alg)))
1284		goto __aead_gcm_err;
1285	if ((err = crypto_register_alg(&rfc4106_alg)))
1286		goto aead_gcm_err;
1287#ifdef HAS_CTR
1288	if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
1289		goto ablk_rfc3686_ctr_err;
1290#endif
1291#endif
1292#ifdef HAS_LRW
1293	if ((err = crypto_register_alg(&ablk_lrw_alg)))
1294		goto ablk_lrw_err;
1295#endif
1296#ifdef HAS_PCBC
1297	if ((err = crypto_register_alg(&ablk_pcbc_alg)))
1298		goto ablk_pcbc_err;
1299#endif
1300#ifdef HAS_XTS
1301	if ((err = crypto_register_alg(&ablk_xts_alg)))
1302		goto ablk_xts_err;
1303#endif
1304	return err;
1305
1306#ifdef HAS_XTS
1307ablk_xts_err:
1308#endif
1309#ifdef HAS_PCBC
1310	crypto_unregister_alg(&ablk_pcbc_alg);
1311ablk_pcbc_err:
1312#endif
1313#ifdef HAS_LRW
1314	crypto_unregister_alg(&ablk_lrw_alg);
1315ablk_lrw_err:
1316#endif
1317#ifdef CONFIG_X86_64
1318#ifdef HAS_CTR
1319	crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1320ablk_rfc3686_ctr_err:
1321#endif
1322	crypto_unregister_alg(&rfc4106_alg);
1323aead_gcm_err:
1324	crypto_unregister_alg(&__rfc4106_alg);
1325__aead_gcm_err:
1326	crypto_unregister_alg(&ablk_ctr_alg);
1327ablk_ctr_err:
1328	crypto_unregister_alg(&blk_ctr_alg);
1329blk_ctr_err:
1330#endif
1331	crypto_unregister_alg(&ablk_cbc_alg);
1332ablk_cbc_err:
1333	crypto_unregister_alg(&ablk_ecb_alg);
1334ablk_ecb_err:
1335	crypto_unregister_alg(&blk_cbc_alg);
1336blk_cbc_err:
1337	crypto_unregister_alg(&blk_ecb_alg);
1338blk_ecb_err:
1339	crypto_unregister_alg(&__aesni_alg);
1340__aes_err:
1341	crypto_unregister_alg(&aesni_alg);
1342aes_err:
1343fpu_err:
1344	return err;
1345}
1346
1347static void __exit aesni_exit(void)
1348{
1349#ifdef HAS_XTS
1350	crypto_unregister_alg(&ablk_xts_alg);
1351#endif
1352#ifdef HAS_PCBC
1353	crypto_unregister_alg(&ablk_pcbc_alg);
1354#endif
1355#ifdef HAS_LRW
1356	crypto_unregister_alg(&ablk_lrw_alg);
1357#endif
1358#ifdef CONFIG_X86_64
1359#ifdef HAS_CTR
1360	crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1361#endif
1362	crypto_unregister_alg(&rfc4106_alg);
1363	crypto_unregister_alg(&__rfc4106_alg);
1364	crypto_unregister_alg(&ablk_ctr_alg);
1365	crypto_unregister_alg(&blk_ctr_alg);
1366#endif
1367	crypto_unregister_alg(&ablk_cbc_alg);
1368	crypto_unregister_alg(&ablk_ecb_alg);
1369	crypto_unregister_alg(&blk_cbc_alg);
1370	crypto_unregister_alg(&blk_ecb_alg);
1371	crypto_unregister_alg(&__aesni_alg);
1372	crypto_unregister_alg(&aesni_alg);
1373
1374	crypto_fpu_exit();
1375}
1376
1377module_init(aesni_init);
1378module_exit(aesni_exit);
1379
1380MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1381MODULE_LICENSE("GPL");
1382MODULE_ALIAS("aes");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Support for Intel AES-NI instructions. This file contains glue
   4 * code, the real AES implementation is in intel-aes_asm.S.
   5 *
   6 * Copyright (C) 2008, Intel Corp.
   7 *    Author: Huang Ying <ying.huang@intel.com>
   8 *
   9 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
  10 * interface for 64-bit kernels.
  11 *    Authors: Adrian Hoban <adrian.hoban@intel.com>
  12 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  13 *             Tadeusz Struk (tadeusz.struk@intel.com)
  14 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  15 *    Copyright (c) 2010, Intel Corporation.
 
 
 
 
 
  16 */
  17
  18#include <linux/hardirq.h>
  19#include <linux/types.h>
  20#include <linux/module.h>
  21#include <linux/err.h>
  22#include <crypto/algapi.h>
  23#include <crypto/aes.h>
 
  24#include <crypto/ctr.h>
  25#include <crypto/b128ops.h>
  26#include <crypto/gcm.h>
  27#include <crypto/xts.h>
  28#include <asm/cpu_device_id.h>
  29#include <asm/simd.h>
  30#include <crypto/scatterwalk.h>
  31#include <crypto/internal/aead.h>
  32#include <crypto/internal/simd.h>
  33#include <crypto/internal/skcipher.h>
  34#include <linux/jump_label.h>
  35#include <linux/workqueue.h>
  36#include <linux/spinlock.h>
  37#include <linux/static_call.h>
  38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  39
  40#define AESNI_ALIGN	16
  41#define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
  42#define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE - 1))
  43#define RFC4106_HASH_SUBKEY_SIZE 16
  44#define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
  45#define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
  46#define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
  47
  48/* This data is stored at the end of the crypto_tfm struct.
  49 * It's a type of per "session" data storage location.
  50 * This needs to be 16 byte aligned.
  51 */
  52struct aesni_rfc4106_gcm_ctx {
  53	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
  54	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
  55	u8 nonce[4];
 
  56};
  57
  58struct generic_gcmaes_ctx {
  59	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
  60	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
  61};
  62
  63struct aesni_xts_ctx {
  64	u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
  65	u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
 
  66};
  67
  68#define GCM_BLOCK_LEN 16
  69
  70struct gcm_context_data {
  71	/* init, update and finalize context data */
  72	u8 aad_hash[GCM_BLOCK_LEN];
  73	u64 aad_length;
  74	u64 in_length;
  75	u8 partial_block_enc_key[GCM_BLOCK_LEN];
  76	u8 orig_IV[GCM_BLOCK_LEN];
  77	u8 current_counter[GCM_BLOCK_LEN];
  78	u64 partial_block_len;
  79	u64 unused;
  80	u8 hash_keys[GCM_BLOCK_LEN * 16];
  81};
  82
  83asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
  84			     unsigned int key_len);
  85asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
  86asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
 
 
  87asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
  88			      const u8 *in, unsigned int len);
  89asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
  90			      const u8 *in, unsigned int len);
  91asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
  92			      const u8 *in, unsigned int len, u8 *iv);
  93asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
  94			      const u8 *in, unsigned int len, u8 *iv);
  95asmlinkage void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
  96				  const u8 *in, unsigned int len, u8 *iv);
  97asmlinkage void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
  98				  const u8 *in, unsigned int len, u8 *iv);
  99
 100#define AVX_GEN2_OPTSIZE 640
 101#define AVX_GEN4_OPTSIZE 4096
 102
 103asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
 104				  const u8 *in, unsigned int len, u8 *iv);
 105
 106asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
 107				  const u8 *in, unsigned int len, u8 *iv);
 108
 109#ifdef CONFIG_X86_64
 110
 111asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
 112			      const u8 *in, unsigned int len, u8 *iv);
 113DEFINE_STATIC_CALL(aesni_ctr_enc_tfm, aesni_ctr_enc);
 114
 115/* Scatter / Gather routines, with args similar to above */
 116asmlinkage void aesni_gcm_init(void *ctx,
 117			       struct gcm_context_data *gdata,
 118			       u8 *iv,
 119			       u8 *hash_subkey, const u8 *aad,
 120			       unsigned long aad_len);
 121asmlinkage void aesni_gcm_enc_update(void *ctx,
 122				     struct gcm_context_data *gdata, u8 *out,
 123				     const u8 *in, unsigned long plaintext_len);
 124asmlinkage void aesni_gcm_dec_update(void *ctx,
 125				     struct gcm_context_data *gdata, u8 *out,
 126				     const u8 *in,
 127				     unsigned long ciphertext_len);
 128asmlinkage void aesni_gcm_finalize(void *ctx,
 129				   struct gcm_context_data *gdata,
 130				   u8 *auth_tag, unsigned long auth_tag_len);
 131
 132asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
 133		void *keys, u8 *out, unsigned int num_bytes);
 134asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
 135		void *keys, u8 *out, unsigned int num_bytes);
 136asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
 137		void *keys, u8 *out, unsigned int num_bytes);
 138/*
 139 * asmlinkage void aesni_gcm_init_avx_gen2()
 140 * gcm_data *my_ctx_data, context data
 141 * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
 142 */
 143asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
 144					struct gcm_context_data *gdata,
 145					u8 *iv,
 146					u8 *hash_subkey,
 147					const u8 *aad,
 148					unsigned long aad_len);
 149
 150asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
 151				     struct gcm_context_data *gdata, u8 *out,
 152				     const u8 *in, unsigned long plaintext_len);
 153asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
 154				     struct gcm_context_data *gdata, u8 *out,
 155				     const u8 *in,
 156				     unsigned long ciphertext_len);
 157asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
 158				   struct gcm_context_data *gdata,
 159				   u8 *auth_tag, unsigned long auth_tag_len);
 160
 161/*
 162 * asmlinkage void aesni_gcm_init_avx_gen4()
 163 * gcm_data *my_ctx_data, context data
 164 * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
 165 */
 166asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
 167					struct gcm_context_data *gdata,
 168					u8 *iv,
 169					u8 *hash_subkey,
 170					const u8 *aad,
 171					unsigned long aad_len);
 172
 173asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
 174				     struct gcm_context_data *gdata, u8 *out,
 175				     const u8 *in, unsigned long plaintext_len);
 176asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
 177				     struct gcm_context_data *gdata, u8 *out,
 178				     const u8 *in,
 179				     unsigned long ciphertext_len);
 180asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
 181				   struct gcm_context_data *gdata,
 182				   u8 *auth_tag, unsigned long auth_tag_len);
 183
 184static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx);
 185static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx2);
 186
 187static inline struct
 188aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
 189{
 190	unsigned long align = AESNI_ALIGN;
 191
 192	if (align <= crypto_tfm_ctx_alignment())
 193		align = 1;
 194	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
 195}
 196
 197static inline struct
 198generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
 199{
 200	unsigned long align = AESNI_ALIGN;
 201
 202	if (align <= crypto_tfm_ctx_alignment())
 203		align = 1;
 204	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
 205}
 206#endif
 207
 208static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
 209{
 210	unsigned long addr = (unsigned long)raw_ctx;
 211	unsigned long align = AESNI_ALIGN;
 212
 213	if (align <= crypto_tfm_ctx_alignment())
 214		align = 1;
 215	return (struct crypto_aes_ctx *)ALIGN(addr, align);
 216}
 217
 218static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
 219			      const u8 *in_key, unsigned int key_len)
 220{
 221	struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
 
 222	int err;
 223
 224	if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
 225	    key_len != AES_KEYSIZE_256)
 
 226		return -EINVAL;
 
 227
 228	if (!crypto_simd_usable())
 229		err = aes_expandkey(ctx, in_key, key_len);
 230	else {
 231		kernel_fpu_begin();
 232		err = aesni_set_key(ctx, in_key, key_len);
 233		kernel_fpu_end();
 234	}
 235
 236	return err;
 237}
 238
 239static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 240		       unsigned int key_len)
 241{
 242	return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
 243}
 244
 245static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 246{
 247	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 248
 249	if (!crypto_simd_usable()) {
 250		aes_encrypt(ctx, dst, src);
 251	} else {
 252		kernel_fpu_begin();
 253		aesni_enc(ctx, dst, src);
 254		kernel_fpu_end();
 255	}
 256}
 257
 258static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 259{
 260	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 261
 262	if (!crypto_simd_usable()) {
 263		aes_decrypt(ctx, dst, src);
 264	} else {
 265		kernel_fpu_begin();
 266		aesni_dec(ctx, dst, src);
 267		kernel_fpu_end();
 268	}
 269}
 270
 271static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
 272			         unsigned int len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 273{
 274	return aes_set_key_common(crypto_skcipher_tfm(tfm),
 275				  crypto_skcipher_ctx(tfm), key, len);
 
 276}
 277
 278static int ecb_encrypt(struct skcipher_request *req)
 279{
 280	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 281	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 282	struct skcipher_walk walk;
 283	unsigned int nbytes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284	int err;
 285
 286	err = skcipher_walk_virt(&walk, req, false);
 
 
 287
 
 288	while ((nbytes = walk.nbytes)) {
 289		kernel_fpu_begin();
 290		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 291			      nbytes & AES_BLOCK_MASK);
 292		kernel_fpu_end();
 293		nbytes &= AES_BLOCK_SIZE - 1;
 294		err = skcipher_walk_done(&walk, nbytes);
 295	}
 
 296
 297	return err;
 298}
 299
 300static int ecb_decrypt(struct skcipher_request *req)
 
 
 301{
 302	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 303	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 304	struct skcipher_walk walk;
 305	unsigned int nbytes;
 306	int err;
 307
 308	err = skcipher_walk_virt(&walk, req, false);
 
 
 309
 
 310	while ((nbytes = walk.nbytes)) {
 311		kernel_fpu_begin();
 312		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 313			      nbytes & AES_BLOCK_MASK);
 314		kernel_fpu_end();
 315		nbytes &= AES_BLOCK_SIZE - 1;
 316		err = skcipher_walk_done(&walk, nbytes);
 317	}
 
 318
 319	return err;
 320}
 321
 322static int cbc_encrypt(struct skcipher_request *req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 323{
 324	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 325	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 326	struct skcipher_walk walk;
 327	unsigned int nbytes;
 328	int err;
 329
 330	err = skcipher_walk_virt(&walk, req, false);
 
 
 331
 
 332	while ((nbytes = walk.nbytes)) {
 333		kernel_fpu_begin();
 334		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 335			      nbytes & AES_BLOCK_MASK, walk.iv);
 336		kernel_fpu_end();
 337		nbytes &= AES_BLOCK_SIZE - 1;
 338		err = skcipher_walk_done(&walk, nbytes);
 339	}
 
 340
 341	return err;
 342}
 343
 344static int cbc_decrypt(struct skcipher_request *req)
 
 
 345{
 346	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 347	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 348	struct skcipher_walk walk;
 349	unsigned int nbytes;
 350	int err;
 351
 352	err = skcipher_walk_virt(&walk, req, false);
 
 
 353
 
 354	while ((nbytes = walk.nbytes)) {
 355		kernel_fpu_begin();
 356		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 357			      nbytes & AES_BLOCK_MASK, walk.iv);
 358		kernel_fpu_end();
 359		nbytes &= AES_BLOCK_SIZE - 1;
 360		err = skcipher_walk_done(&walk, nbytes);
 361	}
 
 362
 363	return err;
 364}
 365
 366static int cts_cbc_encrypt(struct skcipher_request *req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 367{
 368	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 369	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 370	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
 371	struct scatterlist *src = req->src, *dst = req->dst;
 372	struct scatterlist sg_src[2], sg_dst[2];
 373	struct skcipher_request subreq;
 374	struct skcipher_walk walk;
 
 
 
 
 
 
 
 
 
 
 
 375	int err;
 376
 377	skcipher_request_set_tfm(&subreq, tfm);
 378	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
 379				      NULL, NULL);
 380
 381	if (req->cryptlen <= AES_BLOCK_SIZE) {
 382		if (req->cryptlen < AES_BLOCK_SIZE)
 383			return -EINVAL;
 384		cbc_blocks = 1;
 385	}
 386
 387	if (cbc_blocks > 0) {
 388		skcipher_request_set_crypt(&subreq, req->src, req->dst,
 389					   cbc_blocks * AES_BLOCK_SIZE,
 390					   req->iv);
 391
 392		err = cbc_encrypt(&subreq);
 393		if (err)
 394			return err;
 395
 396		if (req->cryptlen == AES_BLOCK_SIZE)
 397			return 0;
 398
 399		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
 400		if (req->dst != req->src)
 401			dst = scatterwalk_ffwd(sg_dst, req->dst,
 402					       subreq.cryptlen);
 403	}
 404
 405	/* handle ciphertext stealing */
 406	skcipher_request_set_crypt(&subreq, src, dst,
 407				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
 408				   req->iv);
 409
 410	err = skcipher_walk_virt(&walk, &subreq, false);
 411	if (err)
 412		return err;
 413
 414	kernel_fpu_begin();
 415	aesni_cts_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 416			  walk.nbytes, walk.iv);
 
 
 
 
 
 
 
 
 417	kernel_fpu_end();
 418
 419	return skcipher_walk_done(&walk, 0);
 420}
 421
 422static int cts_cbc_decrypt(struct skcipher_request *req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 423{
 424	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 425	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 426	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
 427	struct scatterlist *src = req->src, *dst = req->dst;
 428	struct scatterlist sg_src[2], sg_dst[2];
 429	struct skcipher_request subreq;
 430	struct skcipher_walk walk;
 431	int err;
 432
 433	skcipher_request_set_tfm(&subreq, tfm);
 434	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
 435				      NULL, NULL);
 436
 437	if (req->cryptlen <= AES_BLOCK_SIZE) {
 438		if (req->cryptlen < AES_BLOCK_SIZE)
 439			return -EINVAL;
 440		cbc_blocks = 1;
 441	}
 442
 443	if (cbc_blocks > 0) {
 444		skcipher_request_set_crypt(&subreq, req->src, req->dst,
 445					   cbc_blocks * AES_BLOCK_SIZE,
 446					   req->iv);
 447
 448		err = cbc_decrypt(&subreq);
 449		if (err)
 450			return err;
 451
 452		if (req->cryptlen == AES_BLOCK_SIZE)
 453			return 0;
 454
 455		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
 456		if (req->dst != req->src)
 457			dst = scatterwalk_ffwd(sg_dst, req->dst,
 458					       subreq.cryptlen);
 459	}
 460
 461	/* handle ciphertext stealing */
 462	skcipher_request_set_crypt(&subreq, src, dst,
 463				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
 464				   req->iv);
 465
 466	err = skcipher_walk_virt(&walk, &subreq, false);
 467	if (err)
 468		return err;
 469
 470	kernel_fpu_begin();
 471	aesni_cts_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 472			  walk.nbytes, walk.iv);
 473	kernel_fpu_end();
 
 
 
 
 
 
 
 
 
 
 
 474
 475	return skcipher_walk_done(&walk, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 476}
 477
 478#ifdef CONFIG_X86_64
 479static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
 480			      const u8 *in, unsigned int len, u8 *iv)
 481{
 482	/*
 483	 * based on key length, override with the by8 version
 484	 * of ctr mode encryption/decryption for improved performance
 485	 * aes_set_key_common() ensures that key length is one of
 486	 * {128,192,256}
 487	 */
 488	if (ctx->key_length == AES_KEYSIZE_128)
 489		aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
 490	else if (ctx->key_length == AES_KEYSIZE_192)
 491		aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
 492	else
 493		aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
 494}
 495
 496static int ctr_crypt(struct skcipher_request *req)
 
 497{
 498	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 499	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 500	u8 keystream[AES_BLOCK_SIZE];
 501	struct skcipher_walk walk;
 502	unsigned int nbytes;
 503	int err;
 504
 505	err = skcipher_walk_virt(&walk, req, false);
 
 
 506
 507	while ((nbytes = walk.nbytes) > 0) {
 508		kernel_fpu_begin();
 509		if (nbytes & AES_BLOCK_MASK)
 510			static_call(aesni_ctr_enc_tfm)(ctx, walk.dst.virt.addr,
 511						       walk.src.virt.addr,
 512						       nbytes & AES_BLOCK_MASK,
 513						       walk.iv);
 514		nbytes &= ~AES_BLOCK_MASK;
 515
 516		if (walk.nbytes == walk.total && nbytes > 0) {
 517			aesni_enc(ctx, keystream, walk.iv);
 518			crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes - nbytes,
 519				       walk.src.virt.addr + walk.nbytes - nbytes,
 520				       keystream, nbytes);
 521			crypto_inc(walk.iv, AES_BLOCK_SIZE);
 522			nbytes = 0;
 523		}
 524		kernel_fpu_end();
 525		err = skcipher_walk_done(&walk, nbytes);
 526	}
 527	return err;
 528}
 529
 530static int
 531rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 532{
 533	struct crypto_aes_ctx ctx;
 534	int ret;
 535
 536	ret = aes_expandkey(&ctx, key, key_len);
 537	if (ret)
 538		return ret;
 
 
 
 539
 540	/* Clear the data in the hash sub key container to zero.*/
 541	/* We want to cipher all zeros to create the hash sub key. */
 542	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 543
 544	aes_encrypt(&ctx, hash_subkey, hash_subkey);
 
 
 
 545
 546	memzero_explicit(&ctx, sizeof(ctx));
 
 
 
 547	return 0;
 548}
 549
 550static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
 551				  unsigned int key_len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 552{
 553	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
 554
 555	if (key_len < 4)
 556		return -EINVAL;
 
 
 
 
 
 557
 558	/*Account for 4 byte nonce at the end.*/
 559	key_len -= 4;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 560
 561	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
 
 
 
 562
 563	return aes_set_key_common(crypto_aead_tfm(aead),
 564				  &ctx->aes_key_expanded, key, key_len) ?:
 565	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
 
 
 
 566}
 567
 568/* This is the Integrity Check Value (aka the authentication tag) length and can
 569 * be 8, 12 or 16 bytes long. */
 570static int common_rfc4106_set_authsize(struct crypto_aead *aead,
 571				       unsigned int authsize)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 572{
 573	switch (authsize) {
 574	case 8:
 575	case 12:
 576	case 16:
 577		break;
 578	default:
 579		return -EINVAL;
 580	}
 581
 
 
 
 
 
 582	return 0;
 583}
 584
 585static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
 586				       unsigned int authsize)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 587{
 588	switch (authsize) {
 589	case 4:
 590	case 8:
 591	case 12:
 592	case 13:
 593	case 14:
 594	case 15:
 595	case 16:
 596		break;
 597	default:
 598		return -EINVAL;
 599	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 601	return 0;
 602}
 603
 604static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
 605			      unsigned int assoclen, u8 *hash_subkey,
 606			      u8 *iv, void *aes_ctx, u8 *auth_tag,
 607			      unsigned long auth_tag_len)
 608{
 609	u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
 610	struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
 611	unsigned long left = req->cryptlen;
 612	struct scatter_walk assoc_sg_walk;
 613	struct skcipher_walk walk;
 614	bool do_avx, do_avx2;
 615	u8 *assocmem = NULL;
 616	u8 *assoc;
 617	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 618
 619	if (!enc)
 620		left -= auth_tag_len;
 
 
 
 
 
 
 621
 622	do_avx = (left >= AVX_GEN2_OPTSIZE);
 623	do_avx2 = (left >= AVX_GEN4_OPTSIZE);
 624
 625	/* Linearize assoc, if not already linear */
 626	if (req->src->length >= assoclen && req->src->length) {
 627		scatterwalk_start(&assoc_sg_walk, req->src);
 628		assoc = scatterwalk_map(&assoc_sg_walk);
 629	} else {
 630		gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 631			      GFP_KERNEL : GFP_ATOMIC;
 632
 633		/* assoc can be any length, so must be on heap */
 634		assocmem = kmalloc(assoclen, flags);
 635		if (unlikely(!assocmem))
 636			return -ENOMEM;
 637		assoc = assocmem;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 638
 639		scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
 
 
 
 
 
 
 
 
 640	}
 641
 642	kernel_fpu_begin();
 643	if (static_branch_likely(&gcm_use_avx2) && do_avx2)
 644		aesni_gcm_init_avx_gen4(aes_ctx, data, iv, hash_subkey, assoc,
 645					assoclen);
 646	else if (static_branch_likely(&gcm_use_avx) && do_avx)
 647		aesni_gcm_init_avx_gen2(aes_ctx, data, iv, hash_subkey, assoc,
 648					assoclen);
 649	else
 650		aesni_gcm_init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
 651	kernel_fpu_end();
 652
 653	if (!assocmem)
 654		scatterwalk_unmap(assoc);
 655	else
 656		kfree(assocmem);
 
 657
 658	err = enc ? skcipher_walk_aead_encrypt(&walk, req, false)
 659		  : skcipher_walk_aead_decrypt(&walk, req, false);
 
 
 660
 661	while (walk.nbytes > 0) {
 
 
 
 662		kernel_fpu_begin();
 663		if (static_branch_likely(&gcm_use_avx2) && do_avx2) {
 664			if (enc)
 665				aesni_gcm_enc_update_avx_gen4(aes_ctx, data,
 666							      walk.dst.virt.addr,
 667							      walk.src.virt.addr,
 668							      walk.nbytes);
 669			else
 670				aesni_gcm_dec_update_avx_gen4(aes_ctx, data,
 671							      walk.dst.virt.addr,
 672							      walk.src.virt.addr,
 673							      walk.nbytes);
 674		} else if (static_branch_likely(&gcm_use_avx) && do_avx) {
 675			if (enc)
 676				aesni_gcm_enc_update_avx_gen2(aes_ctx, data,
 677							      walk.dst.virt.addr,
 678							      walk.src.virt.addr,
 679							      walk.nbytes);
 680			else
 681				aesni_gcm_dec_update_avx_gen2(aes_ctx, data,
 682							      walk.dst.virt.addr,
 683							      walk.src.virt.addr,
 684							      walk.nbytes);
 685		} else if (enc) {
 686			aesni_gcm_enc_update(aes_ctx, data, walk.dst.virt.addr,
 687					     walk.src.virt.addr, walk.nbytes);
 688		} else {
 689			aesni_gcm_dec_update(aes_ctx, data, walk.dst.virt.addr,
 690					     walk.src.virt.addr, walk.nbytes);
 691		}
 692		kernel_fpu_end();
 693
 694		err = skcipher_walk_done(&walk, 0);
 695	}
 
 
 
 
 
 
 
 
 
 
 
 696
 697	if (err)
 698		return err;
 699
 700	kernel_fpu_begin();
 701	if (static_branch_likely(&gcm_use_avx2) && do_avx2)
 702		aesni_gcm_finalize_avx_gen4(aes_ctx, data, auth_tag,
 703					    auth_tag_len);
 704	else if (static_branch_likely(&gcm_use_avx) && do_avx)
 705		aesni_gcm_finalize_avx_gen2(aes_ctx, data, auth_tag,
 706					    auth_tag_len);
 707	else
 708		aesni_gcm_finalize(aes_ctx, data, auth_tag, auth_tag_len);
 709	kernel_fpu_end();
 710
 
 
 
 
 
 
 
 
 
 
 711	return 0;
 712}
 713
 714static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
 715			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
 716{
 
 717	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 718	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
 719	u8 auth_tag[16];
 720	int err;
 721
 722	err = gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, aes_ctx,
 723				 auth_tag, auth_tag_len);
 724	if (err)
 725		return err;
 726
 727	scatterwalk_map_and_copy(auth_tag, req->dst,
 728				 req->assoclen + req->cryptlen,
 729				 auth_tag_len, 1);
 730	return 0;
 
 
 
 
 731}
 732
 733static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
 734			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
 735{
 
 736	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 737	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
 738	u8 auth_tag_msg[16];
 739	u8 auth_tag[16];
 740	int err;
 741
 742	err = gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, aes_ctx,
 743				 auth_tag, auth_tag_len);
 744	if (err)
 745		return err;
 746
 747	/* Copy out original auth_tag */
 748	scatterwalk_map_and_copy(auth_tag_msg, req->src,
 749				 req->assoclen + req->cryptlen - auth_tag_len,
 750				 auth_tag_len, 0);
 751
 752	/* Compare generated tag with passed in tag. */
 753	if (crypto_memneq(auth_tag_msg, auth_tag, auth_tag_len)) {
 754		memzero_explicit(auth_tag, sizeof(auth_tag));
 755		return -EBADMSG;
 756	}
 757	return 0;
 758}
 759
 760static int helper_rfc4106_encrypt(struct aead_request *req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 761{
 
 
 
 762	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 763	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 764	void *aes_ctx = &(ctx->aes_key_expanded);
 765	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
 766	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
 
 
 
 
 767	unsigned int i;
 768	__be32 counter = cpu_to_be32(1);
 769
 770	/* Assuming we are supporting rfc4106 64-bit extended */
 771	/* sequence numbers We need to have the AAD length equal */
 772	/* to 16 or 20 bytes */
 773	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
 774		return -EINVAL;
 775
 776	/* IV below built */
 777	for (i = 0; i < 4; i++)
 778		*(iv+i) = ctx->nonce[i];
 779	for (i = 0; i < 8; i++)
 780		*(iv+4+i) = req->iv[i];
 781	*((__be32 *)(iv+12)) = counter;
 782
 783	return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
 784			      aes_ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 785}
 786
 787static int helper_rfc4106_decrypt(struct aead_request *req)
 788{
 
 
 
 789	__be32 counter = cpu_to_be32(1);
 
 790	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 791	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 792	void *aes_ctx = &(ctx->aes_key_expanded);
 793	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
 794	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
 
 
 
 
 
 795	unsigned int i;
 796
 797	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
 
 798		return -EINVAL;
 799
 800	/* Assuming we are supporting rfc4106 64-bit extended */
 801	/* sequence numbers We need to have the AAD length */
 802	/* equal to 16 or 20 bytes */
 803
 
 804	/* IV below built */
 805	for (i = 0; i < 4; i++)
 806		*(iv+i) = ctx->nonce[i];
 807	for (i = 0; i < 8; i++)
 808		*(iv+4+i) = req->iv[i];
 809	*((__be32 *)(iv+12)) = counter;
 810
 811	return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
 812			      aes_ctx);
 813}
 814#endif
 
 
 
 
 
 
 
 815
 816static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
 817			    unsigned int keylen)
 818{
 819	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 820	int err;
 821
 822	err = xts_verify_key(tfm, key, keylen);
 823	if (err)
 824		return err;
 825
 826	keylen /= 2;
 827
 828	/* first half of xts-key is for crypt */
 829	err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
 830				 key, keylen);
 831	if (err)
 832		return err;
 833
 834	/* second half of xts-key is for tweak */
 835	return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
 836				  key + keylen, keylen);
 837}
 838
 839static int xts_crypt(struct skcipher_request *req, bool encrypt)
 840{
 841	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 842	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 843	int tail = req->cryptlen % AES_BLOCK_SIZE;
 844	struct skcipher_request subreq;
 845	struct skcipher_walk walk;
 846	int err;
 847
 848	if (req->cryptlen < AES_BLOCK_SIZE)
 849		return -EINVAL;
 850
 851	err = skcipher_walk_virt(&walk, req, false);
 852	if (!walk.nbytes)
 853		return err;
 854
 855	if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
 856		int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
 857
 858		skcipher_walk_abort(&walk);
 859
 860		skcipher_request_set_tfm(&subreq, tfm);
 861		skcipher_request_set_callback(&subreq,
 862					      skcipher_request_flags(req),
 863					      NULL, NULL);
 864		skcipher_request_set_crypt(&subreq, req->src, req->dst,
 865					   blocks * AES_BLOCK_SIZE, req->iv);
 866		req = &subreq;
 867
 868		err = skcipher_walk_virt(&walk, req, false);
 869		if (err)
 870			return err;
 871	} else {
 872		tail = 0;
 
 
 
 
 
 
 
 
 873	}
 874
 875	kernel_fpu_begin();
 
 
 876
 877	/* calculate first value of T */
 878	aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);
 
 879
 880	while (walk.nbytes > 0) {
 881		int nbytes = walk.nbytes;
 882
 883		if (nbytes < walk.total)
 884			nbytes &= ~(AES_BLOCK_SIZE - 1);
 885
 886		if (encrypt)
 887			aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
 888					  walk.dst.virt.addr, walk.src.virt.addr,
 889					  nbytes, walk.iv);
 890		else
 891			aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
 892					  walk.dst.virt.addr, walk.src.virt.addr,
 893					  nbytes, walk.iv);
 894		kernel_fpu_end();
 895
 896		err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
 897
 898		if (walk.nbytes > 0)
 899			kernel_fpu_begin();
 900	}
 901
 902	if (unlikely(tail > 0 && !err)) {
 903		struct scatterlist sg_src[2], sg_dst[2];
 904		struct scatterlist *src, *dst;
 905
 906		dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
 907		if (req->dst != req->src)
 908			dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
 909
 910		skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
 911					   req->iv);
 912
 913		err = skcipher_walk_virt(&walk, &subreq, false);
 914		if (err)
 915			return err;
 916
 917		kernel_fpu_begin();
 918		if (encrypt)
 919			aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
 920					  walk.dst.virt.addr, walk.src.virt.addr,
 921					  walk.nbytes, walk.iv);
 922		else
 923			aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
 924					  walk.dst.virt.addr, walk.src.virt.addr,
 925					  walk.nbytes, walk.iv);
 926		kernel_fpu_end();
 927
 928		err = skcipher_walk_done(&walk, 0);
 929	}
 930	return err;
 931}
 932
 933static int xts_encrypt(struct skcipher_request *req)
 934{
 935	return xts_crypt(req, true);
 936}
 937
 938static int xts_decrypt(struct skcipher_request *req)
 939{
 940	return xts_crypt(req, false);
 941}
 942
 943static struct crypto_alg aesni_cipher_alg = {
 944	.cra_name		= "aes",
 945	.cra_driver_name	= "aes-aesni",
 946	.cra_priority		= 300,
 947	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
 948	.cra_blocksize		= AES_BLOCK_SIZE,
 949	.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
 
 
 950	.cra_module		= THIS_MODULE,
 951	.cra_u	= {
 952		.cipher	= {
 953			.cia_min_keysize	= AES_MIN_KEY_SIZE,
 954			.cia_max_keysize	= AES_MAX_KEY_SIZE,
 955			.cia_setkey		= aes_set_key,
 956			.cia_encrypt		= aesni_encrypt,
 957			.cia_decrypt		= aesni_decrypt
 958		}
 959	}
 960};
 961
 962static struct skcipher_alg aesni_skciphers[] = {
 963	{
 964		.base = {
 965			.cra_name		= "__ecb(aes)",
 966			.cra_driver_name	= "__ecb-aes-aesni",
 967			.cra_priority		= 400,
 968			.cra_flags		= CRYPTO_ALG_INTERNAL,
 969			.cra_blocksize		= AES_BLOCK_SIZE,
 970			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
 971			.cra_module		= THIS_MODULE,
 972		},
 973		.min_keysize	= AES_MIN_KEY_SIZE,
 974		.max_keysize	= AES_MAX_KEY_SIZE,
 975		.setkey		= aesni_skcipher_setkey,
 976		.encrypt	= ecb_encrypt,
 977		.decrypt	= ecb_decrypt,
 978	}, {
 979		.base = {
 980			.cra_name		= "__cbc(aes)",
 981			.cra_driver_name	= "__cbc-aes-aesni",
 982			.cra_priority		= 400,
 983			.cra_flags		= CRYPTO_ALG_INTERNAL,
 984			.cra_blocksize		= AES_BLOCK_SIZE,
 985			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
 986			.cra_module		= THIS_MODULE,
 987		},
 988		.min_keysize	= AES_MIN_KEY_SIZE,
 989		.max_keysize	= AES_MAX_KEY_SIZE,
 990		.ivsize		= AES_BLOCK_SIZE,
 991		.setkey		= aesni_skcipher_setkey,
 992		.encrypt	= cbc_encrypt,
 993		.decrypt	= cbc_decrypt,
 994	}, {
 995		.base = {
 996			.cra_name		= "__cts(cbc(aes))",
 997			.cra_driver_name	= "__cts-cbc-aes-aesni",
 998			.cra_priority		= 400,
 999			.cra_flags		= CRYPTO_ALG_INTERNAL,
1000			.cra_blocksize		= AES_BLOCK_SIZE,
1001			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1002			.cra_module		= THIS_MODULE,
1003		},
1004		.min_keysize	= AES_MIN_KEY_SIZE,
1005		.max_keysize	= AES_MAX_KEY_SIZE,
1006		.ivsize		= AES_BLOCK_SIZE,
1007		.walksize	= 2 * AES_BLOCK_SIZE,
1008		.setkey		= aesni_skcipher_setkey,
1009		.encrypt	= cts_cbc_encrypt,
1010		.decrypt	= cts_cbc_decrypt,
1011#ifdef CONFIG_X86_64
1012	}, {
1013		.base = {
1014			.cra_name		= "__ctr(aes)",
1015			.cra_driver_name	= "__ctr-aes-aesni",
1016			.cra_priority		= 400,
1017			.cra_flags		= CRYPTO_ALG_INTERNAL,
1018			.cra_blocksize		= 1,
1019			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1020			.cra_module		= THIS_MODULE,
1021		},
1022		.min_keysize	= AES_MIN_KEY_SIZE,
1023		.max_keysize	= AES_MAX_KEY_SIZE,
1024		.ivsize		= AES_BLOCK_SIZE,
1025		.chunksize	= AES_BLOCK_SIZE,
1026		.setkey		= aesni_skcipher_setkey,
1027		.encrypt	= ctr_crypt,
1028		.decrypt	= ctr_crypt,
1029#endif
1030	}, {
1031		.base = {
1032			.cra_name		= "__xts(aes)",
1033			.cra_driver_name	= "__xts-aes-aesni",
1034			.cra_priority		= 401,
1035			.cra_flags		= CRYPTO_ALG_INTERNAL,
1036			.cra_blocksize		= AES_BLOCK_SIZE,
1037			.cra_ctxsize		= XTS_AES_CTX_SIZE,
1038			.cra_module		= THIS_MODULE,
1039		},
1040		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
1041		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
1042		.ivsize		= AES_BLOCK_SIZE,
1043		.walksize	= 2 * AES_BLOCK_SIZE,
1044		.setkey		= xts_aesni_setkey,
1045		.encrypt	= xts_encrypt,
1046		.decrypt	= xts_decrypt,
1047	}
1048};
1049
1050static
1051struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1052
1053#ifdef CONFIG_X86_64
1054static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1055				  unsigned int key_len)
1056{
1057	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
1058
1059	return aes_set_key_common(crypto_aead_tfm(aead),
1060				  &ctx->aes_key_expanded, key, key_len) ?:
1061	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1062}
1063
1064static int generic_gcmaes_encrypt(struct aead_request *req)
1065{
1066	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1067	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1068	void *aes_ctx = &(ctx->aes_key_expanded);
1069	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1070	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1071	__be32 counter = cpu_to_be32(1);
1072
1073	memcpy(iv, req->iv, 12);
1074	*((__be32 *)(iv+12)) = counter;
1075
1076	return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1077			      aes_ctx);
1078}
1079
1080static int generic_gcmaes_decrypt(struct aead_request *req)
1081{
1082	__be32 counter = cpu_to_be32(1);
1083	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1084	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1085	void *aes_ctx = &(ctx->aes_key_expanded);
1086	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1087	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1088
1089	memcpy(iv, req->iv, 12);
1090	*((__be32 *)(iv+12)) = counter;
1091
1092	return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1093			      aes_ctx);
1094}
1095
1096static struct aead_alg aesni_aeads[] = { {
1097	.setkey			= common_rfc4106_set_key,
1098	.setauthsize		= common_rfc4106_set_authsize,
1099	.encrypt		= helper_rfc4106_encrypt,
1100	.decrypt		= helper_rfc4106_decrypt,
1101	.ivsize			= GCM_RFC4106_IV_SIZE,
1102	.maxauthsize		= 16,
1103	.base = {
1104		.cra_name		= "__rfc4106(gcm(aes))",
1105		.cra_driver_name	= "__rfc4106-gcm-aesni",
1106		.cra_priority		= 400,
1107		.cra_flags		= CRYPTO_ALG_INTERNAL,
1108		.cra_blocksize		= 1,
1109		.cra_ctxsize		= sizeof(struct aesni_rfc4106_gcm_ctx),
1110		.cra_alignmask		= AESNI_ALIGN - 1,
1111		.cra_module		= THIS_MODULE,
1112	},
1113}, {
1114	.setkey			= generic_gcmaes_set_key,
1115	.setauthsize		= generic_gcmaes_set_authsize,
1116	.encrypt		= generic_gcmaes_encrypt,
1117	.decrypt		= generic_gcmaes_decrypt,
1118	.ivsize			= GCM_AES_IV_SIZE,
1119	.maxauthsize		= 16,
1120	.base = {
1121		.cra_name		= "__gcm(aes)",
1122		.cra_driver_name	= "__generic-gcm-aesni",
1123		.cra_priority		= 400,
1124		.cra_flags		= CRYPTO_ALG_INTERNAL,
1125		.cra_blocksize		= 1,
1126		.cra_ctxsize		= sizeof(struct generic_gcmaes_ctx),
1127		.cra_alignmask		= AESNI_ALIGN - 1,
1128		.cra_module		= THIS_MODULE,
1129	},
1130} };
1131#else
1132static struct aead_alg aesni_aeads[0];
1133#endif
1134
1135static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
1136
1137static const struct x86_cpu_id aesni_cpu_id[] = {
1138	X86_MATCH_FEATURE(X86_FEATURE_AES, NULL),
1139	{}
1140};
1141MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1142
1143static int __init aesni_init(void)
1144{
1145	int err;
1146
1147	if (!x86_match_cpu(aesni_cpu_id))
 
1148		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1149#ifdef CONFIG_X86_64
1150	if (boot_cpu_has(X86_FEATURE_AVX2)) {
1151		pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1152		static_branch_enable(&gcm_use_avx);
1153		static_branch_enable(&gcm_use_avx2);
1154	} else
1155	if (boot_cpu_has(X86_FEATURE_AVX)) {
1156		pr_info("AVX version of gcm_enc/dec engaged.\n");
1157		static_branch_enable(&gcm_use_avx);
1158	} else {
1159		pr_info("SSE version of gcm_enc/dec engaged.\n");
1160	}
1161	if (boot_cpu_has(X86_FEATURE_AVX)) {
1162		/* optimize performance of ctr mode encryption transform */
1163		static_call_update(aesni_ctr_enc_tfm, aesni_ctr_enc_avx_tfm);
1164		pr_info("AES CTR mode by8 optimization enabled\n");
1165	}
 
 
 
 
 
 
 
 
1166#endif
 
1167
1168	err = crypto_register_alg(&aesni_cipher_alg);
1169	if (err)
1170		return err;
1171
1172	err = simd_register_skciphers_compat(aesni_skciphers,
1173					     ARRAY_SIZE(aesni_skciphers),
1174					     aesni_simd_skciphers);
1175	if (err)
1176		goto unregister_cipher;
1177
1178	err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1179					 aesni_simd_aeads);
1180	if (err)
1181		goto unregister_skciphers;
1182
1183	return 0;
1184
1185unregister_skciphers:
1186	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1187				  aesni_simd_skciphers);
1188unregister_cipher:
1189	crypto_unregister_alg(&aesni_cipher_alg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190	return err;
1191}
1192
1193static void __exit aesni_exit(void)
1194{
1195	simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1196			      aesni_simd_aeads);
1197	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1198				  aesni_simd_skciphers);
1199	crypto_unregister_alg(&aesni_cipher_alg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1200}
1201
1202late_initcall(aesni_init);
1203module_exit(aesni_exit);
1204
1205MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1206MODULE_LICENSE("GPL");
1207MODULE_ALIAS_CRYPTO("aes");