Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v3.1
   1/*
   2 * Support for Intel AES-NI instructions. This file contains glue
   3 * code, the real AES implementation is in intel-aes_asm.S.
   4 *
   5 * Copyright (C) 2008, Intel Corp.
   6 *    Author: Huang Ying <ying.huang@intel.com>
   7 *
   8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
   9 * interface for 64-bit kernels.
  10 *    Authors: Adrian Hoban <adrian.hoban@intel.com>
  11 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  12 *             Tadeusz Struk (tadeusz.struk@intel.com)
  13 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  14 *    Copyright (c) 2010, Intel Corporation.
  15 *
  16 * This program is free software; you can redistribute it and/or modify
  17 * it under the terms of the GNU General Public License as published by
  18 * the Free Software Foundation; either version 2 of the License, or
  19 * (at your option) any later version.
  20 */
  21
  22#include <linux/hardirq.h>
  23#include <linux/types.h>
  24#include <linux/crypto.h>
 
  25#include <linux/err.h>
  26#include <crypto/algapi.h>
  27#include <crypto/aes.h>
  28#include <crypto/cryptd.h>
  29#include <crypto/ctr.h>
 
 
 
 
  30#include <asm/i387.h>
  31#include <asm/aes.h>
 
  32#include <crypto/scatterwalk.h>
  33#include <crypto/internal/aead.h>
  34#include <linux/workqueue.h>
  35#include <linux/spinlock.h>
  36
  37#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
  38#define HAS_CTR
  39#endif
  40
  41#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
  42#define HAS_LRW
  43#endif
  44
  45#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
  46#define HAS_PCBC
  47#endif
  48
  49#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
  50#define HAS_XTS
  51#endif
  52
  53struct async_aes_ctx {
  54	struct cryptd_ablkcipher *cryptd_tfm;
  55};
  56
  57/* This data is stored at the end of the crypto_tfm struct.
  58 * It's a type of per "session" data storage location.
  59 * This needs to be 16 byte aligned.
  60 */
  61struct aesni_rfc4106_gcm_ctx {
  62	u8 hash_subkey[16];
  63	struct crypto_aes_ctx aes_key_expanded;
  64	u8 nonce[4];
  65	struct cryptd_aead *cryptd_tfm;
  66};
  67
  68struct aesni_gcm_set_hash_subkey_result {
  69	int err;
  70	struct completion completion;
  71};
  72
  73struct aesni_hash_subkey_req_data {
  74	u8 iv[16];
  75	struct aesni_gcm_set_hash_subkey_result result;
  76	struct scatterlist sg;
  77};
  78
  79#define AESNI_ALIGN	(16)
  80#define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE-1))
  81#define RFC4106_HASH_SUBKEY_SIZE 16
  82
 
 
 
 
 
 
 
 
 
 
  83asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
  84			     unsigned int key_len);
  85asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
  86			  const u8 *in);
  87asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
  88			  const u8 *in);
  89asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
  90			      const u8 *in, unsigned int len);
  91asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
  92			      const u8 *in, unsigned int len);
  93asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
  94			      const u8 *in, unsigned int len, u8 *iv);
  95asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
  96			      const u8 *in, unsigned int len, u8 *iv);
  97
  98int crypto_fpu_init(void);
  99void crypto_fpu_exit(void);
 100
 
 
 
 101#ifdef CONFIG_X86_64
 102asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
 103			      const u8 *in, unsigned int len, u8 *iv);
 104
 
 
 
 105/* asmlinkage void aesni_gcm_enc()
 106 * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
 107 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
 108 * const u8 *in, Plaintext input
 109 * unsigned long plaintext_len, Length of data in bytes for encryption.
 110 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
 111 *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
 112 *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
 113 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
 114 * const u8 *aad, Additional Authentication Data (AAD)
 115 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
 116 *          is going to be 8 or 12 bytes
 117 * u8 *auth_tag, Authenticated Tag output.
 118 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
 119 *          Valid values are 16 (most likely), 12 or 8.
 120 */
 121asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
 122			const u8 *in, unsigned long plaintext_len, u8 *iv,
 123			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 124			u8 *auth_tag, unsigned long auth_tag_len);
 125
 126/* asmlinkage void aesni_gcm_dec()
 127 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
 128 * u8 *out, Plaintext output. Decrypt in-place is allowed.
 129 * const u8 *in, Ciphertext input
 130 * unsigned long ciphertext_len, Length of data in bytes for decryption.
 131 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
 132 *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
 133 *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
 134 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
 135 * const u8 *aad, Additional Authentication Data (AAD)
 136 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
 137 * to be 8 or 12 bytes
 138 * u8 *auth_tag, Authenticated Tag output.
 139 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
 140 * Valid values are 16 (most likely), 12 or 8.
 141 */
 142asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
 143			const u8 *in, unsigned long ciphertext_len, u8 *iv,
 144			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 145			u8 *auth_tag, unsigned long auth_tag_len);
 146
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 147static inline struct
 148aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
 149{
 150	return
 151		(struct aesni_rfc4106_gcm_ctx *)
 152		PTR_ALIGN((u8 *)
 153		crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
 154}
 155#endif
 156
 157static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
 158{
 159	unsigned long addr = (unsigned long)raw_ctx;
 160	unsigned long align = AESNI_ALIGN;
 161
 162	if (align <= crypto_tfm_ctx_alignment())
 163		align = 1;
 164	return (struct crypto_aes_ctx *)ALIGN(addr, align);
 165}
 166
 167static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
 168			      const u8 *in_key, unsigned int key_len)
 169{
 170	struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
 171	u32 *flags = &tfm->crt_flags;
 172	int err;
 173
 174	if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
 175	    key_len != AES_KEYSIZE_256) {
 176		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 177		return -EINVAL;
 178	}
 179
 180	if (!irq_fpu_usable())
 181		err = crypto_aes_expand_key(ctx, in_key, key_len);
 182	else {
 183		kernel_fpu_begin();
 184		err = aesni_set_key(ctx, in_key, key_len);
 185		kernel_fpu_end();
 186	}
 187
 188	return err;
 189}
 190
 191static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 192		       unsigned int key_len)
 193{
 194	return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
 195}
 196
 197static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 198{
 199	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 200
 201	if (!irq_fpu_usable())
 202		crypto_aes_encrypt_x86(ctx, dst, src);
 203	else {
 204		kernel_fpu_begin();
 205		aesni_enc(ctx, dst, src);
 206		kernel_fpu_end();
 207	}
 208}
 209
 210static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 211{
 212	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 213
 214	if (!irq_fpu_usable())
 215		crypto_aes_decrypt_x86(ctx, dst, src);
 216	else {
 217		kernel_fpu_begin();
 218		aesni_dec(ctx, dst, src);
 219		kernel_fpu_end();
 220	}
 221}
 222
 223static struct crypto_alg aesni_alg = {
 224	.cra_name		= "aes",
 225	.cra_driver_name	= "aes-aesni",
 226	.cra_priority		= 300,
 227	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
 228	.cra_blocksize		= AES_BLOCK_SIZE,
 229	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
 230	.cra_alignmask		= 0,
 231	.cra_module		= THIS_MODULE,
 232	.cra_list		= LIST_HEAD_INIT(aesni_alg.cra_list),
 233	.cra_u	= {
 234		.cipher	= {
 235			.cia_min_keysize	= AES_MIN_KEY_SIZE,
 236			.cia_max_keysize	= AES_MAX_KEY_SIZE,
 237			.cia_setkey		= aes_set_key,
 238			.cia_encrypt		= aes_encrypt,
 239			.cia_decrypt		= aes_decrypt
 240		}
 241	}
 242};
 243
 244static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 245{
 246	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 247
 248	aesni_enc(ctx, dst, src);
 249}
 250
 251static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 252{
 253	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 254
 255	aesni_dec(ctx, dst, src);
 256}
 257
 258static struct crypto_alg __aesni_alg = {
 259	.cra_name		= "__aes-aesni",
 260	.cra_driver_name	= "__driver-aes-aesni",
 261	.cra_priority		= 0,
 262	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
 263	.cra_blocksize		= AES_BLOCK_SIZE,
 264	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
 265	.cra_alignmask		= 0,
 266	.cra_module		= THIS_MODULE,
 267	.cra_list		= LIST_HEAD_INIT(__aesni_alg.cra_list),
 268	.cra_u	= {
 269		.cipher	= {
 270			.cia_min_keysize	= AES_MIN_KEY_SIZE,
 271			.cia_max_keysize	= AES_MAX_KEY_SIZE,
 272			.cia_setkey		= aes_set_key,
 273			.cia_encrypt		= __aes_encrypt,
 274			.cia_decrypt		= __aes_decrypt
 275		}
 276	}
 277};
 278
 279static int ecb_encrypt(struct blkcipher_desc *desc,
 280		       struct scatterlist *dst, struct scatterlist *src,
 281		       unsigned int nbytes)
 282{
 283	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 284	struct blkcipher_walk walk;
 285	int err;
 286
 287	blkcipher_walk_init(&walk, dst, src, nbytes);
 288	err = blkcipher_walk_virt(desc, &walk);
 289	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 290
 291	kernel_fpu_begin();
 292	while ((nbytes = walk.nbytes)) {
 293		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 294			      nbytes & AES_BLOCK_MASK);
 295		nbytes &= AES_BLOCK_SIZE - 1;
 296		err = blkcipher_walk_done(desc, &walk, nbytes);
 297	}
 298	kernel_fpu_end();
 299
 300	return err;
 301}
 302
 303static int ecb_decrypt(struct blkcipher_desc *desc,
 304		       struct scatterlist *dst, struct scatterlist *src,
 305		       unsigned int nbytes)
 306{
 307	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 308	struct blkcipher_walk walk;
 309	int err;
 310
 311	blkcipher_walk_init(&walk, dst, src, nbytes);
 312	err = blkcipher_walk_virt(desc, &walk);
 313	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 314
 315	kernel_fpu_begin();
 316	while ((nbytes = walk.nbytes)) {
 317		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 318			      nbytes & AES_BLOCK_MASK);
 319		nbytes &= AES_BLOCK_SIZE - 1;
 320		err = blkcipher_walk_done(desc, &walk, nbytes);
 321	}
 322	kernel_fpu_end();
 323
 324	return err;
 325}
 326
 327static struct crypto_alg blk_ecb_alg = {
 328	.cra_name		= "__ecb-aes-aesni",
 329	.cra_driver_name	= "__driver-ecb-aes-aesni",
 330	.cra_priority		= 0,
 331	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
 332	.cra_blocksize		= AES_BLOCK_SIZE,
 333	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
 334	.cra_alignmask		= 0,
 335	.cra_type		= &crypto_blkcipher_type,
 336	.cra_module		= THIS_MODULE,
 337	.cra_list		= LIST_HEAD_INIT(blk_ecb_alg.cra_list),
 338	.cra_u = {
 339		.blkcipher = {
 340			.min_keysize	= AES_MIN_KEY_SIZE,
 341			.max_keysize	= AES_MAX_KEY_SIZE,
 342			.setkey		= aes_set_key,
 343			.encrypt	= ecb_encrypt,
 344			.decrypt	= ecb_decrypt,
 345		},
 346	},
 347};
 348
 349static int cbc_encrypt(struct blkcipher_desc *desc,
 350		       struct scatterlist *dst, struct scatterlist *src,
 351		       unsigned int nbytes)
 352{
 353	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 354	struct blkcipher_walk walk;
 355	int err;
 356
 357	blkcipher_walk_init(&walk, dst, src, nbytes);
 358	err = blkcipher_walk_virt(desc, &walk);
 359	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 360
 361	kernel_fpu_begin();
 362	while ((nbytes = walk.nbytes)) {
 363		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 364			      nbytes & AES_BLOCK_MASK, walk.iv);
 365		nbytes &= AES_BLOCK_SIZE - 1;
 366		err = blkcipher_walk_done(desc, &walk, nbytes);
 367	}
 368	kernel_fpu_end();
 369
 370	return err;
 371}
 372
 373static int cbc_decrypt(struct blkcipher_desc *desc,
 374		       struct scatterlist *dst, struct scatterlist *src,
 375		       unsigned int nbytes)
 376{
 377	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 378	struct blkcipher_walk walk;
 379	int err;
 380
 381	blkcipher_walk_init(&walk, dst, src, nbytes);
 382	err = blkcipher_walk_virt(desc, &walk);
 383	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 384
 385	kernel_fpu_begin();
 386	while ((nbytes = walk.nbytes)) {
 387		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 388			      nbytes & AES_BLOCK_MASK, walk.iv);
 389		nbytes &= AES_BLOCK_SIZE - 1;
 390		err = blkcipher_walk_done(desc, &walk, nbytes);
 391	}
 392	kernel_fpu_end();
 393
 394	return err;
 395}
 396
 397static struct crypto_alg blk_cbc_alg = {
 398	.cra_name		= "__cbc-aes-aesni",
 399	.cra_driver_name	= "__driver-cbc-aes-aesni",
 400	.cra_priority		= 0,
 401	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
 402	.cra_blocksize		= AES_BLOCK_SIZE,
 403	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
 404	.cra_alignmask		= 0,
 405	.cra_type		= &crypto_blkcipher_type,
 406	.cra_module		= THIS_MODULE,
 407	.cra_list		= LIST_HEAD_INIT(blk_cbc_alg.cra_list),
 408	.cra_u = {
 409		.blkcipher = {
 410			.min_keysize	= AES_MIN_KEY_SIZE,
 411			.max_keysize	= AES_MAX_KEY_SIZE,
 412			.setkey		= aes_set_key,
 413			.encrypt	= cbc_encrypt,
 414			.decrypt	= cbc_decrypt,
 415		},
 416	},
 417};
 418
 419#ifdef CONFIG_X86_64
 420static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
 421			    struct blkcipher_walk *walk)
 422{
 423	u8 *ctrblk = walk->iv;
 424	u8 keystream[AES_BLOCK_SIZE];
 425	u8 *src = walk->src.virt.addr;
 426	u8 *dst = walk->dst.virt.addr;
 427	unsigned int nbytes = walk->nbytes;
 428
 429	aesni_enc(ctx, keystream, ctrblk);
 430	crypto_xor(keystream, src, nbytes);
 431	memcpy(dst, keystream, nbytes);
 432	crypto_inc(ctrblk, AES_BLOCK_SIZE);
 433}
 434
 435static int ctr_crypt(struct blkcipher_desc *desc,
 436		     struct scatterlist *dst, struct scatterlist *src,
 437		     unsigned int nbytes)
 438{
 439	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 440	struct blkcipher_walk walk;
 441	int err;
 442
 443	blkcipher_walk_init(&walk, dst, src, nbytes);
 444	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
 445	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 446
 447	kernel_fpu_begin();
 448	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
 449		aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 450			      nbytes & AES_BLOCK_MASK, walk.iv);
 451		nbytes &= AES_BLOCK_SIZE - 1;
 452		err = blkcipher_walk_done(desc, &walk, nbytes);
 453	}
 454	if (walk.nbytes) {
 455		ctr_crypt_final(ctx, &walk);
 456		err = blkcipher_walk_done(desc, &walk, 0);
 457	}
 458	kernel_fpu_end();
 459
 460	return err;
 461}
 462
 463static struct crypto_alg blk_ctr_alg = {
 464	.cra_name		= "__ctr-aes-aesni",
 465	.cra_driver_name	= "__driver-ctr-aes-aesni",
 466	.cra_priority		= 0,
 467	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
 468	.cra_blocksize		= 1,
 469	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
 470	.cra_alignmask		= 0,
 471	.cra_type		= &crypto_blkcipher_type,
 472	.cra_module		= THIS_MODULE,
 473	.cra_list		= LIST_HEAD_INIT(blk_ctr_alg.cra_list),
 474	.cra_u = {
 475		.blkcipher = {
 476			.min_keysize	= AES_MIN_KEY_SIZE,
 477			.max_keysize	= AES_MAX_KEY_SIZE,
 478			.ivsize		= AES_BLOCK_SIZE,
 479			.setkey		= aes_set_key,
 480			.encrypt	= ctr_crypt,
 481			.decrypt	= ctr_crypt,
 482		},
 483	},
 484};
 485#endif
 486
 487static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
 488			unsigned int key_len)
 489{
 490	struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 491	struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
 492	int err;
 493
 494	crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 495	crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
 496				    & CRYPTO_TFM_REQ_MASK);
 497	err = crypto_ablkcipher_setkey(child, key, key_len);
 498	crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
 499				    & CRYPTO_TFM_RES_MASK);
 500	return err;
 501}
 502
 503static int ablk_encrypt(struct ablkcipher_request *req)
 
 504{
 505	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 506	struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 507
 508	if (!irq_fpu_usable()) {
 509		struct ablkcipher_request *cryptd_req =
 510			ablkcipher_request_ctx(req);
 511		memcpy(cryptd_req, req, sizeof(*req));
 512		ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
 513		return crypto_ablkcipher_encrypt(cryptd_req);
 514	} else {
 515		struct blkcipher_desc desc;
 516		desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
 517		desc.info = req->info;
 518		desc.flags = 0;
 519		return crypto_blkcipher_crt(desc.tfm)->encrypt(
 520			&desc, req->dst, req->src, req->nbytes);
 521	}
 522}
 
 523
 524static int ablk_decrypt(struct ablkcipher_request *req)
 525{
 526	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 527	struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 528
 529	if (!irq_fpu_usable()) {
 530		struct ablkcipher_request *cryptd_req =
 531			ablkcipher_request_ctx(req);
 532		memcpy(cryptd_req, req, sizeof(*req));
 533		ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
 534		return crypto_ablkcipher_decrypt(cryptd_req);
 535	} else {
 536		struct blkcipher_desc desc;
 537		desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
 538		desc.info = req->info;
 539		desc.flags = 0;
 540		return crypto_blkcipher_crt(desc.tfm)->decrypt(
 541			&desc, req->dst, req->src, req->nbytes);
 542	}
 543}
 544
 545static void ablk_exit(struct crypto_tfm *tfm)
 
 546{
 547	struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 
 548
 549	cryptd_free_ablkcipher(ctx->cryptd_tfm);
 
 
 
 
 
 550}
 551
 552static void ablk_init_common(struct crypto_tfm *tfm,
 553			     struct cryptd_ablkcipher *cryptd_tfm)
 554{
 555	struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 556
 557	ctx->cryptd_tfm = cryptd_tfm;
 558	tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
 559		crypto_ablkcipher_reqsize(&cryptd_tfm->base);
 560}
 561
 562static int ablk_ecb_init(struct crypto_tfm *tfm)
 
 563{
 564	struct cryptd_ablkcipher *cryptd_tfm;
 
 
 
 
 565
 566	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
 567	if (IS_ERR(cryptd_tfm))
 568		return PTR_ERR(cryptd_tfm);
 569	ablk_init_common(tfm, cryptd_tfm);
 570	return 0;
 
 
 
 
 
 
 
 
 571}
 572
 573static struct crypto_alg ablk_ecb_alg = {
 574	.cra_name		= "ecb(aes)",
 575	.cra_driver_name	= "ecb-aes-aesni",
 576	.cra_priority		= 400,
 577	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 578	.cra_blocksize		= AES_BLOCK_SIZE,
 579	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 580	.cra_alignmask		= 0,
 581	.cra_type		= &crypto_ablkcipher_type,
 582	.cra_module		= THIS_MODULE,
 583	.cra_list		= LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
 584	.cra_init		= ablk_ecb_init,
 585	.cra_exit		= ablk_exit,
 586	.cra_u = {
 587		.ablkcipher = {
 588			.min_keysize	= AES_MIN_KEY_SIZE,
 589			.max_keysize	= AES_MAX_KEY_SIZE,
 590			.setkey		= ablk_set_key,
 591			.encrypt	= ablk_encrypt,
 592			.decrypt	= ablk_decrypt,
 593		},
 594	},
 595};
 596
 597static int ablk_cbc_init(struct crypto_tfm *tfm)
 
 
 
 
 
 
 
 
 
 
 598{
 599	struct cryptd_ablkcipher *cryptd_tfm;
 
 
 600
 601	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
 602	if (IS_ERR(cryptd_tfm))
 603		return PTR_ERR(cryptd_tfm);
 604	ablk_init_common(tfm, cryptd_tfm);
 605	return 0;
 
 
 
 
 
 
 
 
 
 
 
 606}
 607
 608static struct crypto_alg ablk_cbc_alg = {
 609	.cra_name		= "cbc(aes)",
 610	.cra_driver_name	= "cbc-aes-aesni",
 611	.cra_priority		= 400,
 612	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 613	.cra_blocksize		= AES_BLOCK_SIZE,
 614	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 615	.cra_alignmask		= 0,
 616	.cra_type		= &crypto_ablkcipher_type,
 617	.cra_module		= THIS_MODULE,
 618	.cra_list		= LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
 619	.cra_init		= ablk_cbc_init,
 620	.cra_exit		= ablk_exit,
 621	.cra_u = {
 622		.ablkcipher = {
 623			.min_keysize	= AES_MIN_KEY_SIZE,
 624			.max_keysize	= AES_MAX_KEY_SIZE,
 625			.ivsize		= AES_BLOCK_SIZE,
 626			.setkey		= ablk_set_key,
 627			.encrypt	= ablk_encrypt,
 628			.decrypt	= ablk_decrypt,
 629		},
 630	},
 631};
 632
 633#ifdef CONFIG_X86_64
 634static int ablk_ctr_init(struct crypto_tfm *tfm)
 635{
 636	struct cryptd_ablkcipher *cryptd_tfm;
 
 637
 638	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
 639	if (IS_ERR(cryptd_tfm))
 640		return PTR_ERR(cryptd_tfm);
 641	ablk_init_common(tfm, cryptd_tfm);
 642	return 0;
 643}
 644
 645static struct crypto_alg ablk_ctr_alg = {
 646	.cra_name		= "ctr(aes)",
 647	.cra_driver_name	= "ctr-aes-aesni",
 648	.cra_priority		= 400,
 649	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 650	.cra_blocksize		= 1,
 651	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 652	.cra_alignmask		= 0,
 653	.cra_type		= &crypto_ablkcipher_type,
 654	.cra_module		= THIS_MODULE,
 655	.cra_list		= LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
 656	.cra_init		= ablk_ctr_init,
 657	.cra_exit		= ablk_exit,
 658	.cra_u = {
 659		.ablkcipher = {
 660			.min_keysize	= AES_MIN_KEY_SIZE,
 661			.max_keysize	= AES_MAX_KEY_SIZE,
 662			.ivsize		= AES_BLOCK_SIZE,
 663			.setkey		= ablk_set_key,
 664			.encrypt	= ablk_encrypt,
 665			.decrypt	= ablk_encrypt,
 666			.geniv		= "chainiv",
 667		},
 668	},
 669};
 670
 671#ifdef HAS_CTR
 672static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
 673{
 674	struct cryptd_ablkcipher *cryptd_tfm;
 
 675
 676	cryptd_tfm = cryptd_alloc_ablkcipher(
 677		"rfc3686(__driver-ctr-aes-aesni)", 0, 0);
 678	if (IS_ERR(cryptd_tfm))
 679		return PTR_ERR(cryptd_tfm);
 680	ablk_init_common(tfm, cryptd_tfm);
 681	return 0;
 682}
 683
 684static struct crypto_alg ablk_rfc3686_ctr_alg = {
 685	.cra_name		= "rfc3686(ctr(aes))",
 686	.cra_driver_name	= "rfc3686-ctr-aes-aesni",
 687	.cra_priority		= 400,
 688	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 689	.cra_blocksize		= 1,
 690	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 691	.cra_alignmask		= 0,
 692	.cra_type		= &crypto_ablkcipher_type,
 693	.cra_module		= THIS_MODULE,
 694	.cra_list		= LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
 695	.cra_init		= ablk_rfc3686_ctr_init,
 696	.cra_exit		= ablk_exit,
 697	.cra_u = {
 698		.ablkcipher = {
 699			.min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
 700			.max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
 701			.ivsize	     = CTR_RFC3686_IV_SIZE,
 702			.setkey	     = ablk_set_key,
 703			.encrypt     = ablk_encrypt,
 704			.decrypt     = ablk_decrypt,
 705			.geniv	     = "seqiv",
 706		},
 707	},
 708};
 709#endif
 710#endif
 711
 712#ifdef HAS_LRW
 713static int ablk_lrw_init(struct crypto_tfm *tfm)
 714{
 715	struct cryptd_ablkcipher *cryptd_tfm;
 716
 717	cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
 718					     0, 0);
 719	if (IS_ERR(cryptd_tfm))
 720		return PTR_ERR(cryptd_tfm);
 721	ablk_init_common(tfm, cryptd_tfm);
 722	return 0;
 723}
 724
 725static struct crypto_alg ablk_lrw_alg = {
 726	.cra_name		= "lrw(aes)",
 727	.cra_driver_name	= "lrw-aes-aesni",
 728	.cra_priority		= 400,
 729	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 730	.cra_blocksize		= AES_BLOCK_SIZE,
 731	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 732	.cra_alignmask		= 0,
 733	.cra_type		= &crypto_ablkcipher_type,
 734	.cra_module		= THIS_MODULE,
 735	.cra_list		= LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
 736	.cra_init		= ablk_lrw_init,
 737	.cra_exit		= ablk_exit,
 738	.cra_u = {
 739		.ablkcipher = {
 740			.min_keysize	= AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
 741			.max_keysize	= AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
 742			.ivsize		= AES_BLOCK_SIZE,
 743			.setkey		= ablk_set_key,
 744			.encrypt	= ablk_encrypt,
 745			.decrypt	= ablk_decrypt,
 746		},
 747	},
 748};
 749#endif
 750
 751#ifdef HAS_PCBC
 752static int ablk_pcbc_init(struct crypto_tfm *tfm)
 753{
 754	struct cryptd_ablkcipher *cryptd_tfm;
 755
 756	cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
 757					     0, 0);
 758	if (IS_ERR(cryptd_tfm))
 759		return PTR_ERR(cryptd_tfm);
 760	ablk_init_common(tfm, cryptd_tfm);
 761	return 0;
 762}
 763
 764static struct crypto_alg ablk_pcbc_alg = {
 765	.cra_name		= "pcbc(aes)",
 766	.cra_driver_name	= "pcbc-aes-aesni",
 767	.cra_priority		= 400,
 768	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 769	.cra_blocksize		= AES_BLOCK_SIZE,
 770	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 771	.cra_alignmask		= 0,
 772	.cra_type		= &crypto_ablkcipher_type,
 773	.cra_module		= THIS_MODULE,
 774	.cra_list		= LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
 775	.cra_init		= ablk_pcbc_init,
 776	.cra_exit		= ablk_exit,
 777	.cra_u = {
 778		.ablkcipher = {
 779			.min_keysize	= AES_MIN_KEY_SIZE,
 780			.max_keysize	= AES_MAX_KEY_SIZE,
 781			.ivsize		= AES_BLOCK_SIZE,
 782			.setkey		= ablk_set_key,
 783			.encrypt	= ablk_encrypt,
 784			.decrypt	= ablk_decrypt,
 785		},
 786	},
 787};
 788#endif
 789
 790#ifdef HAS_XTS
 791static int ablk_xts_init(struct crypto_tfm *tfm)
 792{
 793	struct cryptd_ablkcipher *cryptd_tfm;
 
 
 
 
 794
 795	cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
 796					     0, 0);
 797	if (IS_ERR(cryptd_tfm))
 798		return PTR_ERR(cryptd_tfm);
 799	ablk_init_common(tfm, cryptd_tfm);
 800	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 801}
 802
 803static struct crypto_alg ablk_xts_alg = {
 804	.cra_name		= "xts(aes)",
 805	.cra_driver_name	= "xts-aes-aesni",
 806	.cra_priority		= 400,
 807	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 808	.cra_blocksize		= AES_BLOCK_SIZE,
 809	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 810	.cra_alignmask		= 0,
 811	.cra_type		= &crypto_ablkcipher_type,
 812	.cra_module		= THIS_MODULE,
 813	.cra_list		= LIST_HEAD_INIT(ablk_xts_alg.cra_list),
 814	.cra_init		= ablk_xts_init,
 815	.cra_exit		= ablk_exit,
 816	.cra_u = {
 817		.ablkcipher = {
 818			.min_keysize	= 2 * AES_MIN_KEY_SIZE,
 819			.max_keysize	= 2 * AES_MAX_KEY_SIZE,
 820			.ivsize		= AES_BLOCK_SIZE,
 821			.setkey		= ablk_set_key,
 822			.encrypt	= ablk_encrypt,
 823			.decrypt	= ablk_decrypt,
 824		},
 825	},
 826};
 827#endif
 828
 829#ifdef CONFIG_X86_64
 830static int rfc4106_init(struct crypto_tfm *tfm)
 831{
 832	struct cryptd_aead *cryptd_tfm;
 833	struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
 834		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
 835	struct crypto_aead *cryptd_child;
 836	struct aesni_rfc4106_gcm_ctx *child_ctx;
 837	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
 838	if (IS_ERR(cryptd_tfm))
 839		return PTR_ERR(cryptd_tfm);
 840
 841	cryptd_child = cryptd_aead_child(cryptd_tfm);
 842	child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
 843	memcpy(child_ctx, ctx, sizeof(*ctx));
 844	ctx->cryptd_tfm = cryptd_tfm;
 845	tfm->crt_aead.reqsize = sizeof(struct aead_request)
 846		+ crypto_aead_reqsize(&cryptd_tfm->base);
 847	return 0;
 848}
 849
 850static void rfc4106_exit(struct crypto_tfm *tfm)
 851{
 852	struct aesni_rfc4106_gcm_ctx *ctx =
 853		(struct aesni_rfc4106_gcm_ctx *)
 854		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
 855	if (!IS_ERR(ctx->cryptd_tfm))
 856		cryptd_free_aead(ctx->cryptd_tfm);
 857	return;
 858}
 859
 860static void
 861rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
 862{
 863	struct aesni_gcm_set_hash_subkey_result *result = req->data;
 864
 865	if (err == -EINPROGRESS)
 866		return;
 867	result->err = err;
 868	complete(&result->completion);
 869}
 870
 871static int
 872rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
 873{
 874	struct crypto_ablkcipher *ctr_tfm;
 875	struct ablkcipher_request *req;
 876	int ret = -EINVAL;
 877	struct aesni_hash_subkey_req_data *req_data;
 878
 879	ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
 880	if (IS_ERR(ctr_tfm))
 881		return PTR_ERR(ctr_tfm);
 882
 883	crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
 884
 885	ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
 886	if (ret)
 887		goto out_free_ablkcipher;
 888
 889	ret = -ENOMEM;
 890	req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
 891	if (!req)
 892		goto out_free_ablkcipher;
 893
 894	req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
 895	if (!req_data)
 896		goto out_free_request;
 897
 898	memset(req_data->iv, 0, sizeof(req_data->iv));
 899
 900	/* Clear the data in the hash sub key container to zero.*/
 901	/* We want to cipher all zeros to create the hash sub key. */
 902	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
 903
 904	init_completion(&req_data->result.completion);
 905	sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
 906	ablkcipher_request_set_tfm(req, ctr_tfm);
 907	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
 908					CRYPTO_TFM_REQ_MAY_BACKLOG,
 909					rfc4106_set_hash_subkey_done,
 910					&req_data->result);
 911
 912	ablkcipher_request_set_crypt(req, &req_data->sg,
 913		&req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
 914
 915	ret = crypto_ablkcipher_encrypt(req);
 916	if (ret == -EINPROGRESS || ret == -EBUSY) {
 917		ret = wait_for_completion_interruptible
 918			(&req_data->result.completion);
 919		if (!ret)
 920			ret = req_data->result.err;
 921	}
 922	kfree(req_data);
 923out_free_request:
 924	ablkcipher_request_free(req);
 925out_free_ablkcipher:
 926	crypto_free_ablkcipher(ctr_tfm);
 927	return ret;
 928}
 929
 930static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
 931						   unsigned int key_len)
 932{
 933	int ret = 0;
 934	struct crypto_tfm *tfm = crypto_aead_tfm(parent);
 935	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
 936	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
 937	struct aesni_rfc4106_gcm_ctx *child_ctx =
 938                                 aesni_rfc4106_gcm_ctx_get(cryptd_child);
 939	u8 *new_key_mem = NULL;
 940
 941	if (key_len < 4) {
 942		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 943		return -EINVAL;
 944	}
 945	/*Account for 4 byte nonce at the end.*/
 946	key_len -= 4;
 947	if (key_len != AES_KEYSIZE_128) {
 948		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 949		return -EINVAL;
 950	}
 951
 952	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
 953	/*This must be on a 16 byte boundary!*/
 954	if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
 955		return -EINVAL;
 956
 957	if ((unsigned long)key % AESNI_ALIGN) {
 958		/*key is not aligned: use an auxuliar aligned pointer*/
 959		new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
 960		if (!new_key_mem)
 961			return -ENOMEM;
 962
 963		new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
 964		memcpy(new_key_mem, key, key_len);
 965		key = new_key_mem;
 966	}
 967
 968	if (!irq_fpu_usable())
 969		ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
 970		key, key_len);
 971	else {
 972		kernel_fpu_begin();
 973		ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
 974		kernel_fpu_end();
 975	}
 976	/*This must be on a 16 byte boundary!*/
 977	if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
 978		ret = -EINVAL;
 979		goto exit;
 980	}
 981	ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
 982	memcpy(child_ctx, ctx, sizeof(*ctx));
 983exit:
 984	kfree(new_key_mem);
 985	return ret;
 986}
 987
 988/* This is the Integrity Check Value (aka the authentication tag length and can
 989 * be 8, 12 or 16 bytes long. */
 990static int rfc4106_set_authsize(struct crypto_aead *parent,
 991				unsigned int authsize)
 992{
 993	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
 994	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
 995
 996	switch (authsize) {
 997	case 8:
 998	case 12:
 999	case 16:
1000		break;
1001	default:
1002		return -EINVAL;
1003	}
1004	crypto_aead_crt(parent)->authsize = authsize;
1005	crypto_aead_crt(cryptd_child)->authsize = authsize;
1006	return 0;
1007}
1008
1009static int rfc4106_encrypt(struct aead_request *req)
1010{
1011	int ret;
1012	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1013	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1014
1015	if (!irq_fpu_usable()) {
1016		struct aead_request *cryptd_req =
1017			(struct aead_request *) aead_request_ctx(req);
1018		memcpy(cryptd_req, req, sizeof(*req));
1019		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1020		return crypto_aead_encrypt(cryptd_req);
1021	} else {
1022		struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1023		kernel_fpu_begin();
1024		ret = cryptd_child->base.crt_aead.encrypt(req);
1025		kernel_fpu_end();
1026		return ret;
1027	}
1028}
1029
1030static int rfc4106_decrypt(struct aead_request *req)
1031{
1032	int ret;
1033	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1034	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1035
1036	if (!irq_fpu_usable()) {
1037		struct aead_request *cryptd_req =
1038			(struct aead_request *) aead_request_ctx(req);
1039		memcpy(cryptd_req, req, sizeof(*req));
1040		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1041		return crypto_aead_decrypt(cryptd_req);
1042	} else {
1043		struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1044		kernel_fpu_begin();
1045		ret = cryptd_child->base.crt_aead.decrypt(req);
1046		kernel_fpu_end();
1047		return ret;
1048	}
1049}
1050
1051static struct crypto_alg rfc4106_alg = {
1052	.cra_name = "rfc4106(gcm(aes))",
1053	.cra_driver_name = "rfc4106-gcm-aesni",
1054	.cra_priority = 400,
1055	.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1056	.cra_blocksize = 1,
1057	.cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1058	.cra_alignmask = 0,
1059	.cra_type = &crypto_nivaead_type,
1060	.cra_module = THIS_MODULE,
1061	.cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
1062	.cra_init = rfc4106_init,
1063	.cra_exit = rfc4106_exit,
1064	.cra_u = {
1065		.aead = {
1066			.setkey = rfc4106_set_key,
1067			.setauthsize = rfc4106_set_authsize,
1068			.encrypt = rfc4106_encrypt,
1069			.decrypt = rfc4106_decrypt,
1070			.geniv = "seqiv",
1071			.ivsize = 8,
1072			.maxauthsize = 16,
1073		},
1074	},
1075};
1076
1077static int __driver_rfc4106_encrypt(struct aead_request *req)
1078{
1079	u8 one_entry_in_sg = 0;
1080	u8 *src, *dst, *assoc;
1081	__be32 counter = cpu_to_be32(1);
1082	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1083	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1084	void *aes_ctx = &(ctx->aes_key_expanded);
1085	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1086	u8 iv_tab[16+AESNI_ALIGN];
1087	u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1088	struct scatter_walk src_sg_walk;
1089	struct scatter_walk assoc_sg_walk;
1090	struct scatter_walk dst_sg_walk;
1091	unsigned int i;
1092
1093	/* Assuming we are supporting rfc4106 64-bit extended */
1094	/* sequence numbers We need to have the AAD length equal */
1095	/* to 8 or 12 bytes */
1096	if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1097		return -EINVAL;
1098	/* IV below built */
1099	for (i = 0; i < 4; i++)
1100		*(iv+i) = ctx->nonce[i];
1101	for (i = 0; i < 8; i++)
1102		*(iv+4+i) = req->iv[i];
1103	*((__be32 *)(iv+12)) = counter;
1104
1105	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1106		one_entry_in_sg = 1;
1107		scatterwalk_start(&src_sg_walk, req->src);
1108		scatterwalk_start(&assoc_sg_walk, req->assoc);
1109		src = scatterwalk_map(&src_sg_walk, 0);
1110		assoc = scatterwalk_map(&assoc_sg_walk, 0);
1111		dst = src;
1112		if (unlikely(req->src != req->dst)) {
1113			scatterwalk_start(&dst_sg_walk, req->dst);
1114			dst = scatterwalk_map(&dst_sg_walk, 0);
1115		}
1116
1117	} else {
1118		/* Allocate memory for src, dst, assoc */
1119		src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1120			GFP_ATOMIC);
1121		if (unlikely(!src))
1122			return -ENOMEM;
1123		assoc = (src + req->cryptlen + auth_tag_len);
1124		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1125		scatterwalk_map_and_copy(assoc, req->assoc, 0,
1126					req->assoclen, 0);
1127		dst = src;
1128	}
1129
1130	aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1131		ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1132		+ ((unsigned long)req->cryptlen), auth_tag_len);
1133
1134	/* The authTag (aka the Integrity Check Value) needs to be written
1135	 * back to the packet. */
1136	if (one_entry_in_sg) {
1137		if (unlikely(req->src != req->dst)) {
1138			scatterwalk_unmap(dst, 0);
1139			scatterwalk_done(&dst_sg_walk, 0, 0);
1140		}
1141		scatterwalk_unmap(src, 0);
1142		scatterwalk_unmap(assoc, 0);
1143		scatterwalk_done(&src_sg_walk, 0, 0);
1144		scatterwalk_done(&assoc_sg_walk, 0, 0);
1145	} else {
1146		scatterwalk_map_and_copy(dst, req->dst, 0,
1147			req->cryptlen + auth_tag_len, 1);
1148		kfree(src);
1149	}
1150	return 0;
1151}
1152
1153static int __driver_rfc4106_decrypt(struct aead_request *req)
1154{
1155	u8 one_entry_in_sg = 0;
1156	u8 *src, *dst, *assoc;
1157	unsigned long tempCipherLen = 0;
1158	__be32 counter = cpu_to_be32(1);
1159	int retval = 0;
1160	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1161	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1162	void *aes_ctx = &(ctx->aes_key_expanded);
1163	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1164	u8 iv_and_authTag[32+AESNI_ALIGN];
1165	u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1166	u8 *authTag = iv + 16;
1167	struct scatter_walk src_sg_walk;
1168	struct scatter_walk assoc_sg_walk;
1169	struct scatter_walk dst_sg_walk;
1170	unsigned int i;
1171
1172	if (unlikely((req->cryptlen < auth_tag_len) ||
1173		(req->assoclen != 8 && req->assoclen != 12)))
1174		return -EINVAL;
1175	/* Assuming we are supporting rfc4106 64-bit extended */
1176	/* sequence numbers We need to have the AAD length */
1177	/* equal to 8 or 12 bytes */
1178
1179	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1180	/* IV below built */
1181	for (i = 0; i < 4; i++)
1182		*(iv+i) = ctx->nonce[i];
1183	for (i = 0; i < 8; i++)
1184		*(iv+4+i) = req->iv[i];
1185	*((__be32 *)(iv+12)) = counter;
1186
1187	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1188		one_entry_in_sg = 1;
1189		scatterwalk_start(&src_sg_walk, req->src);
1190		scatterwalk_start(&assoc_sg_walk, req->assoc);
1191		src = scatterwalk_map(&src_sg_walk, 0);
1192		assoc = scatterwalk_map(&assoc_sg_walk, 0);
1193		dst = src;
1194		if (unlikely(req->src != req->dst)) {
1195			scatterwalk_start(&dst_sg_walk, req->dst);
1196			dst = scatterwalk_map(&dst_sg_walk, 0);
1197		}
1198
1199	} else {
1200		/* Allocate memory for src, dst, assoc */
1201		src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1202		if (!src)
1203			return -ENOMEM;
1204		assoc = (src + req->cryptlen + auth_tag_len);
1205		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1206		scatterwalk_map_and_copy(assoc, req->assoc, 0,
1207			req->assoclen, 0);
1208		dst = src;
1209	}
1210
1211	aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
1212		ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1213		authTag, auth_tag_len);
1214
1215	/* Compare generated tag with passed in tag. */
1216	retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
1217		-EBADMSG : 0;
1218
1219	if (one_entry_in_sg) {
1220		if (unlikely(req->src != req->dst)) {
1221			scatterwalk_unmap(dst, 0);
1222			scatterwalk_done(&dst_sg_walk, 0, 0);
1223		}
1224		scatterwalk_unmap(src, 0);
1225		scatterwalk_unmap(assoc, 0);
1226		scatterwalk_done(&src_sg_walk, 0, 0);
1227		scatterwalk_done(&assoc_sg_walk, 0, 0);
1228	} else {
1229		scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1230		kfree(src);
1231	}
1232	return retval;
1233}
 
1234
1235static struct crypto_alg __rfc4106_alg = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1236	.cra_name		= "__gcm-aes-aesni",
1237	.cra_driver_name	= "__driver-gcm-aes-aesni",
1238	.cra_priority		= 0,
1239	.cra_flags		= CRYPTO_ALG_TYPE_AEAD,
1240	.cra_blocksize		= 1,
1241	.cra_ctxsize	= sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
 
1242	.cra_alignmask		= 0,
1243	.cra_type		= &crypto_aead_type,
1244	.cra_module		= THIS_MODULE,
1245	.cra_list		= LIST_HEAD_INIT(__rfc4106_alg.cra_list),
1246	.cra_u = {
1247		.aead = {
1248			.encrypt	= __driver_rfc4106_encrypt,
1249			.decrypt	= __driver_rfc4106_decrypt,
1250		},
1251	},
1252};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1253#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1254
1255static int __init aesni_init(void)
1256{
1257	int err;
1258
1259	if (!cpu_has_aes) {
1260		printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
1261		return -ENODEV;
1262	}
1263
1264	if ((err = crypto_fpu_init()))
1265		goto fpu_err;
1266	if ((err = crypto_register_alg(&aesni_alg)))
1267		goto aes_err;
1268	if ((err = crypto_register_alg(&__aesni_alg)))
1269		goto __aes_err;
1270	if ((err = crypto_register_alg(&blk_ecb_alg)))
1271		goto blk_ecb_err;
1272	if ((err = crypto_register_alg(&blk_cbc_alg)))
1273		goto blk_cbc_err;
1274	if ((err = crypto_register_alg(&ablk_ecb_alg)))
1275		goto ablk_ecb_err;
1276	if ((err = crypto_register_alg(&ablk_cbc_alg)))
1277		goto ablk_cbc_err;
1278#ifdef CONFIG_X86_64
1279	if ((err = crypto_register_alg(&blk_ctr_alg)))
1280		goto blk_ctr_err;
1281	if ((err = crypto_register_alg(&ablk_ctr_alg)))
1282		goto ablk_ctr_err;
1283	if ((err = crypto_register_alg(&__rfc4106_alg)))
1284		goto __aead_gcm_err;
1285	if ((err = crypto_register_alg(&rfc4106_alg)))
1286		goto aead_gcm_err;
1287#ifdef HAS_CTR
1288	if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
1289		goto ablk_rfc3686_ctr_err;
1290#endif
1291#endif
1292#ifdef HAS_LRW
1293	if ((err = crypto_register_alg(&ablk_lrw_alg)))
1294		goto ablk_lrw_err;
1295#endif
1296#ifdef HAS_PCBC
1297	if ((err = crypto_register_alg(&ablk_pcbc_alg)))
1298		goto ablk_pcbc_err;
1299#endif
1300#ifdef HAS_XTS
1301	if ((err = crypto_register_alg(&ablk_xts_alg)))
1302		goto ablk_xts_err;
1303#endif
1304	return err;
1305
1306#ifdef HAS_XTS
1307ablk_xts_err:
1308#endif
1309#ifdef HAS_PCBC
1310	crypto_unregister_alg(&ablk_pcbc_alg);
1311ablk_pcbc_err:
1312#endif
1313#ifdef HAS_LRW
1314	crypto_unregister_alg(&ablk_lrw_alg);
1315ablk_lrw_err:
1316#endif
1317#ifdef CONFIG_X86_64
1318#ifdef HAS_CTR
1319	crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1320ablk_rfc3686_ctr_err:
1321#endif
1322	crypto_unregister_alg(&rfc4106_alg);
1323aead_gcm_err:
1324	crypto_unregister_alg(&__rfc4106_alg);
1325__aead_gcm_err:
1326	crypto_unregister_alg(&ablk_ctr_alg);
1327ablk_ctr_err:
1328	crypto_unregister_alg(&blk_ctr_alg);
1329blk_ctr_err:
1330#endif
1331	crypto_unregister_alg(&ablk_cbc_alg);
1332ablk_cbc_err:
1333	crypto_unregister_alg(&ablk_ecb_alg);
1334ablk_ecb_err:
1335	crypto_unregister_alg(&blk_cbc_alg);
1336blk_cbc_err:
1337	crypto_unregister_alg(&blk_ecb_alg);
1338blk_ecb_err:
1339	crypto_unregister_alg(&__aesni_alg);
1340__aes_err:
1341	crypto_unregister_alg(&aesni_alg);
1342aes_err:
1343fpu_err:
1344	return err;
1345}
1346
1347static void __exit aesni_exit(void)
1348{
1349#ifdef HAS_XTS
1350	crypto_unregister_alg(&ablk_xts_alg);
1351#endif
1352#ifdef HAS_PCBC
1353	crypto_unregister_alg(&ablk_pcbc_alg);
1354#endif
1355#ifdef HAS_LRW
1356	crypto_unregister_alg(&ablk_lrw_alg);
1357#endif
1358#ifdef CONFIG_X86_64
1359#ifdef HAS_CTR
1360	crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1361#endif
1362	crypto_unregister_alg(&rfc4106_alg);
1363	crypto_unregister_alg(&__rfc4106_alg);
1364	crypto_unregister_alg(&ablk_ctr_alg);
1365	crypto_unregister_alg(&blk_ctr_alg);
1366#endif
1367	crypto_unregister_alg(&ablk_cbc_alg);
1368	crypto_unregister_alg(&ablk_ecb_alg);
1369	crypto_unregister_alg(&blk_cbc_alg);
1370	crypto_unregister_alg(&blk_ecb_alg);
1371	crypto_unregister_alg(&__aesni_alg);
1372	crypto_unregister_alg(&aesni_alg);
1373
1374	crypto_fpu_exit();
1375}
1376
1377module_init(aesni_init);
1378module_exit(aesni_exit);
1379
1380MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1381MODULE_LICENSE("GPL");
1382MODULE_ALIAS("aes");
v3.15
   1/*
   2 * Support for Intel AES-NI instructions. This file contains glue
   3 * code, the real AES implementation is in intel-aes_asm.S.
   4 *
   5 * Copyright (C) 2008, Intel Corp.
   6 *    Author: Huang Ying <ying.huang@intel.com>
   7 *
   8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
   9 * interface for 64-bit kernels.
  10 *    Authors: Adrian Hoban <adrian.hoban@intel.com>
  11 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  12 *             Tadeusz Struk (tadeusz.struk@intel.com)
  13 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  14 *    Copyright (c) 2010, Intel Corporation.
  15 *
  16 * This program is free software; you can redistribute it and/or modify
  17 * it under the terms of the GNU General Public License as published by
  18 * the Free Software Foundation; either version 2 of the License, or
  19 * (at your option) any later version.
  20 */
  21
  22#include <linux/hardirq.h>
  23#include <linux/types.h>
  24#include <linux/crypto.h>
  25#include <linux/module.h>
  26#include <linux/err.h>
  27#include <crypto/algapi.h>
  28#include <crypto/aes.h>
  29#include <crypto/cryptd.h>
  30#include <crypto/ctr.h>
  31#include <crypto/b128ops.h>
  32#include <crypto/lrw.h>
  33#include <crypto/xts.h>
  34#include <asm/cpu_device_id.h>
  35#include <asm/i387.h>
  36#include <asm/crypto/aes.h>
  37#include <crypto/ablk_helper.h>
  38#include <crypto/scatterwalk.h>
  39#include <crypto/internal/aead.h>
  40#include <linux/workqueue.h>
  41#include <linux/spinlock.h>
  42#ifdef CONFIG_X86_64
  43#include <asm/crypto/glue_helper.h>
 
 
 
 
 
  44#endif
  45
  46#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
  47#define HAS_PCBC
  48#endif
  49
 
 
 
 
 
 
 
 
  50/* This data is stored at the end of the crypto_tfm struct.
  51 * It's a type of per "session" data storage location.
  52 * This needs to be 16 byte aligned.
  53 */
  54struct aesni_rfc4106_gcm_ctx {
  55	u8 hash_subkey[16];
  56	struct crypto_aes_ctx aes_key_expanded;
  57	u8 nonce[4];
  58	struct cryptd_aead *cryptd_tfm;
  59};
  60
  61struct aesni_gcm_set_hash_subkey_result {
  62	int err;
  63	struct completion completion;
  64};
  65
  66struct aesni_hash_subkey_req_data {
  67	u8 iv[16];
  68	struct aesni_gcm_set_hash_subkey_result result;
  69	struct scatterlist sg;
  70};
  71
  72#define AESNI_ALIGN	(16)
  73#define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE-1))
  74#define RFC4106_HASH_SUBKEY_SIZE 16
  75
  76struct aesni_lrw_ctx {
  77	struct lrw_table_ctx lrw_table;
  78	u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
  79};
  80
  81struct aesni_xts_ctx {
  82	u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
  83	u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
  84};
  85
  86asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
  87			     unsigned int key_len);
  88asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
  89			  const u8 *in);
  90asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
  91			  const u8 *in);
  92asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
  93			      const u8 *in, unsigned int len);
  94asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
  95			      const u8 *in, unsigned int len);
  96asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
  97			      const u8 *in, unsigned int len, u8 *iv);
  98asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
  99			      const u8 *in, unsigned int len, u8 *iv);
 100
 101int crypto_fpu_init(void);
 102void crypto_fpu_exit(void);
 103
 104#define AVX_GEN2_OPTSIZE 640
 105#define AVX_GEN4_OPTSIZE 4096
 106
 107#ifdef CONFIG_X86_64
 108asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
 109			      const u8 *in, unsigned int len, u8 *iv);
 110
 111asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
 112				 const u8 *in, bool enc, u8 *iv);
 113
 114/* asmlinkage void aesni_gcm_enc()
 115 * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
 116 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
 117 * const u8 *in, Plaintext input
 118 * unsigned long plaintext_len, Length of data in bytes for encryption.
 119 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
 120 *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
 121 *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
 122 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
 123 * const u8 *aad, Additional Authentication Data (AAD)
 124 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
 125 *          is going to be 8 or 12 bytes
 126 * u8 *auth_tag, Authenticated Tag output.
 127 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
 128 *          Valid values are 16 (most likely), 12 or 8.
 129 */
 130asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
 131			const u8 *in, unsigned long plaintext_len, u8 *iv,
 132			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 133			u8 *auth_tag, unsigned long auth_tag_len);
 134
 135/* asmlinkage void aesni_gcm_dec()
 136 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
 137 * u8 *out, Plaintext output. Decrypt in-place is allowed.
 138 * const u8 *in, Ciphertext input
 139 * unsigned long ciphertext_len, Length of data in bytes for decryption.
 140 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
 141 *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
 142 *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
 143 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
 144 * const u8 *aad, Additional Authentication Data (AAD)
 145 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
 146 * to be 8 or 12 bytes
 147 * u8 *auth_tag, Authenticated Tag output.
 148 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
 149 * Valid values are 16 (most likely), 12 or 8.
 150 */
 151asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
 152			const u8 *in, unsigned long ciphertext_len, u8 *iv,
 153			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 154			u8 *auth_tag, unsigned long auth_tag_len);
 155
 156
 157#ifdef CONFIG_AS_AVX
 158/*
 159 * asmlinkage void aesni_gcm_precomp_avx_gen2()
 160 * gcm_data *my_ctx_data, context data
 161 * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
 162 */
 163asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
 164
 165asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
 166			const u8 *in, unsigned long plaintext_len, u8 *iv,
 167			const u8 *aad, unsigned long aad_len,
 168			u8 *auth_tag, unsigned long auth_tag_len);
 169
 170asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
 171			const u8 *in, unsigned long ciphertext_len, u8 *iv,
 172			const u8 *aad, unsigned long aad_len,
 173			u8 *auth_tag, unsigned long auth_tag_len);
 174
 175static void aesni_gcm_enc_avx(void *ctx, u8 *out,
 176			const u8 *in, unsigned long plaintext_len, u8 *iv,
 177			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 178			u8 *auth_tag, unsigned long auth_tag_len)
 179{
 180	if (plaintext_len < AVX_GEN2_OPTSIZE) {
 181		aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
 182				aad_len, auth_tag, auth_tag_len);
 183	} else {
 184		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
 185		aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
 186					aad_len, auth_tag, auth_tag_len);
 187	}
 188}
 189
 190static void aesni_gcm_dec_avx(void *ctx, u8 *out,
 191			const u8 *in, unsigned long ciphertext_len, u8 *iv,
 192			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 193			u8 *auth_tag, unsigned long auth_tag_len)
 194{
 195	if (ciphertext_len < AVX_GEN2_OPTSIZE) {
 196		aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
 197				aad_len, auth_tag, auth_tag_len);
 198	} else {
 199		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
 200		aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
 201					aad_len, auth_tag, auth_tag_len);
 202	}
 203}
 204#endif
 205
 206#ifdef CONFIG_AS_AVX2
 207/*
 208 * asmlinkage void aesni_gcm_precomp_avx_gen4()
 209 * gcm_data *my_ctx_data, context data
 210 * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
 211 */
 212asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
 213
 214asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
 215			const u8 *in, unsigned long plaintext_len, u8 *iv,
 216			const u8 *aad, unsigned long aad_len,
 217			u8 *auth_tag, unsigned long auth_tag_len);
 218
 219asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
 220			const u8 *in, unsigned long ciphertext_len, u8 *iv,
 221			const u8 *aad, unsigned long aad_len,
 222			u8 *auth_tag, unsigned long auth_tag_len);
 223
 224static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
 225			const u8 *in, unsigned long plaintext_len, u8 *iv,
 226			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 227			u8 *auth_tag, unsigned long auth_tag_len)
 228{
 229	if (plaintext_len < AVX_GEN2_OPTSIZE) {
 230		aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
 231				aad_len, auth_tag, auth_tag_len);
 232	} else if (plaintext_len < AVX_GEN4_OPTSIZE) {
 233		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
 234		aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
 235					aad_len, auth_tag, auth_tag_len);
 236	} else {
 237		aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
 238		aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
 239					aad_len, auth_tag, auth_tag_len);
 240	}
 241}
 242
 243static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
 244			const u8 *in, unsigned long ciphertext_len, u8 *iv,
 245			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 246			u8 *auth_tag, unsigned long auth_tag_len)
 247{
 248	if (ciphertext_len < AVX_GEN2_OPTSIZE) {
 249		aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
 250				aad, aad_len, auth_tag, auth_tag_len);
 251	} else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
 252		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
 253		aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
 254					aad_len, auth_tag, auth_tag_len);
 255	} else {
 256		aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
 257		aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
 258					aad_len, auth_tag, auth_tag_len);
 259	}
 260}
 261#endif
 262
 263static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
 264			const u8 *in, unsigned long plaintext_len, u8 *iv,
 265			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 266			u8 *auth_tag, unsigned long auth_tag_len);
 267
 268static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
 269			const u8 *in, unsigned long ciphertext_len, u8 *iv,
 270			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 271			u8 *auth_tag, unsigned long auth_tag_len);
 272
 273static inline struct
 274aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
 275{
 276	return
 277		(struct aesni_rfc4106_gcm_ctx *)
 278		PTR_ALIGN((u8 *)
 279		crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
 280}
 281#endif
 282
 283static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
 284{
 285	unsigned long addr = (unsigned long)raw_ctx;
 286	unsigned long align = AESNI_ALIGN;
 287
 288	if (align <= crypto_tfm_ctx_alignment())
 289		align = 1;
 290	return (struct crypto_aes_ctx *)ALIGN(addr, align);
 291}
 292
 293static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
 294			      const u8 *in_key, unsigned int key_len)
 295{
 296	struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
 297	u32 *flags = &tfm->crt_flags;
 298	int err;
 299
 300	if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
 301	    key_len != AES_KEYSIZE_256) {
 302		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 303		return -EINVAL;
 304	}
 305
 306	if (!irq_fpu_usable())
 307		err = crypto_aes_expand_key(ctx, in_key, key_len);
 308	else {
 309		kernel_fpu_begin();
 310		err = aesni_set_key(ctx, in_key, key_len);
 311		kernel_fpu_end();
 312	}
 313
 314	return err;
 315}
 316
 317static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 318		       unsigned int key_len)
 319{
 320	return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
 321}
 322
 323static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 324{
 325	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 326
 327	if (!irq_fpu_usable())
 328		crypto_aes_encrypt_x86(ctx, dst, src);
 329	else {
 330		kernel_fpu_begin();
 331		aesni_enc(ctx, dst, src);
 332		kernel_fpu_end();
 333	}
 334}
 335
 336static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 337{
 338	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 339
 340	if (!irq_fpu_usable())
 341		crypto_aes_decrypt_x86(ctx, dst, src);
 342	else {
 343		kernel_fpu_begin();
 344		aesni_dec(ctx, dst, src);
 345		kernel_fpu_end();
 346	}
 347}
 348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 349static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 350{
 351	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 352
 353	aesni_enc(ctx, dst, src);
 354}
 355
 356static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 357{
 358	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 359
 360	aesni_dec(ctx, dst, src);
 361}
 362
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 363static int ecb_encrypt(struct blkcipher_desc *desc,
 364		       struct scatterlist *dst, struct scatterlist *src,
 365		       unsigned int nbytes)
 366{
 367	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 368	struct blkcipher_walk walk;
 369	int err;
 370
 371	blkcipher_walk_init(&walk, dst, src, nbytes);
 372	err = blkcipher_walk_virt(desc, &walk);
 373	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 374
 375	kernel_fpu_begin();
 376	while ((nbytes = walk.nbytes)) {
 377		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 378			      nbytes & AES_BLOCK_MASK);
 379		nbytes &= AES_BLOCK_SIZE - 1;
 380		err = blkcipher_walk_done(desc, &walk, nbytes);
 381	}
 382	kernel_fpu_end();
 383
 384	return err;
 385}
 386
 387static int ecb_decrypt(struct blkcipher_desc *desc,
 388		       struct scatterlist *dst, struct scatterlist *src,
 389		       unsigned int nbytes)
 390{
 391	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 392	struct blkcipher_walk walk;
 393	int err;
 394
 395	blkcipher_walk_init(&walk, dst, src, nbytes);
 396	err = blkcipher_walk_virt(desc, &walk);
 397	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 398
 399	kernel_fpu_begin();
 400	while ((nbytes = walk.nbytes)) {
 401		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 402			      nbytes & AES_BLOCK_MASK);
 403		nbytes &= AES_BLOCK_SIZE - 1;
 404		err = blkcipher_walk_done(desc, &walk, nbytes);
 405	}
 406	kernel_fpu_end();
 407
 408	return err;
 409}
 410
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 411static int cbc_encrypt(struct blkcipher_desc *desc,
 412		       struct scatterlist *dst, struct scatterlist *src,
 413		       unsigned int nbytes)
 414{
 415	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 416	struct blkcipher_walk walk;
 417	int err;
 418
 419	blkcipher_walk_init(&walk, dst, src, nbytes);
 420	err = blkcipher_walk_virt(desc, &walk);
 421	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 422
 423	kernel_fpu_begin();
 424	while ((nbytes = walk.nbytes)) {
 425		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 426			      nbytes & AES_BLOCK_MASK, walk.iv);
 427		nbytes &= AES_BLOCK_SIZE - 1;
 428		err = blkcipher_walk_done(desc, &walk, nbytes);
 429	}
 430	kernel_fpu_end();
 431
 432	return err;
 433}
 434
 435static int cbc_decrypt(struct blkcipher_desc *desc,
 436		       struct scatterlist *dst, struct scatterlist *src,
 437		       unsigned int nbytes)
 438{
 439	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 440	struct blkcipher_walk walk;
 441	int err;
 442
 443	blkcipher_walk_init(&walk, dst, src, nbytes);
 444	err = blkcipher_walk_virt(desc, &walk);
 445	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 446
 447	kernel_fpu_begin();
 448	while ((nbytes = walk.nbytes)) {
 449		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 450			      nbytes & AES_BLOCK_MASK, walk.iv);
 451		nbytes &= AES_BLOCK_SIZE - 1;
 452		err = blkcipher_walk_done(desc, &walk, nbytes);
 453	}
 454	kernel_fpu_end();
 455
 456	return err;
 457}
 458
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 459#ifdef CONFIG_X86_64
 460static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
 461			    struct blkcipher_walk *walk)
 462{
 463	u8 *ctrblk = walk->iv;
 464	u8 keystream[AES_BLOCK_SIZE];
 465	u8 *src = walk->src.virt.addr;
 466	u8 *dst = walk->dst.virt.addr;
 467	unsigned int nbytes = walk->nbytes;
 468
 469	aesni_enc(ctx, keystream, ctrblk);
 470	crypto_xor(keystream, src, nbytes);
 471	memcpy(dst, keystream, nbytes);
 472	crypto_inc(ctrblk, AES_BLOCK_SIZE);
 473}
 474
 475static int ctr_crypt(struct blkcipher_desc *desc,
 476		     struct scatterlist *dst, struct scatterlist *src,
 477		     unsigned int nbytes)
 478{
 479	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 480	struct blkcipher_walk walk;
 481	int err;
 482
 483	blkcipher_walk_init(&walk, dst, src, nbytes);
 484	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
 485	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 486
 487	kernel_fpu_begin();
 488	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
 489		aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 490			      nbytes & AES_BLOCK_MASK, walk.iv);
 491		nbytes &= AES_BLOCK_SIZE - 1;
 492		err = blkcipher_walk_done(desc, &walk, nbytes);
 493	}
 494	if (walk.nbytes) {
 495		ctr_crypt_final(ctx, &walk);
 496		err = blkcipher_walk_done(desc, &walk, 0);
 497	}
 498	kernel_fpu_end();
 499
 500	return err;
 501}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 502#endif
 503
 504static int ablk_ecb_init(struct crypto_tfm *tfm)
 
 505{
 506	return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
 507}
 
 508
 509static int ablk_cbc_init(struct crypto_tfm *tfm)
 510{
 511	return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
 
 
 
 
 512}
 513
 514#ifdef CONFIG_X86_64
 515static int ablk_ctr_init(struct crypto_tfm *tfm)
 516{
 517	return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
 518}
 519
 520#endif
 521
 522#ifdef HAS_PCBC
 523static int ablk_pcbc_init(struct crypto_tfm *tfm)
 524{
 525	return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
 
 
 
 
 
 
 
 
 526}
 527#endif
 528
 529static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
 530{
 531	aesni_ecb_enc(ctx, blks, blks, nbytes);
 532}
 533
 534static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
 535{
 536	aesni_ecb_dec(ctx, blks, blks, nbytes);
 
 
 
 
 
 
 
 
 
 
 
 537}
 538
 539static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
 540			    unsigned int keylen)
 541{
 542	struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
 543	int err;
 544
 545	err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
 546				 keylen - AES_BLOCK_SIZE);
 547	if (err)
 548		return err;
 549
 550	return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
 551}
 552
 553static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
 
 554{
 555	struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
 556
 557	lrw_free_table(&ctx->lrw_table);
 
 
 558}
 559
 560static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 561		       struct scatterlist *src, unsigned int nbytes)
 562{
 563	struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
 564	be128 buf[8];
 565	struct lrw_crypt_req req = {
 566		.tbuf = buf,
 567		.tbuflen = sizeof(buf),
 568
 569		.table_ctx = &ctx->lrw_table,
 570		.crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
 571		.crypt_fn = lrw_xts_encrypt_callback,
 572	};
 573	int ret;
 574
 575	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 576
 577	kernel_fpu_begin();
 578	ret = lrw_crypt(desc, dst, src, nbytes, &req);
 579	kernel_fpu_end();
 580
 581	return ret;
 582}
 583
 584static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 585		       struct scatterlist *src, unsigned int nbytes)
 586{
 587	struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
 588	be128 buf[8];
 589	struct lrw_crypt_req req = {
 590		.tbuf = buf,
 591		.tbuflen = sizeof(buf),
 592
 593		.table_ctx = &ctx->lrw_table,
 594		.crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
 595		.crypt_fn = lrw_xts_decrypt_callback,
 596	};
 597	int ret;
 
 
 
 
 
 
 
 
 
 598
 599	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 600
 601	kernel_fpu_begin();
 602	ret = lrw_crypt(desc, dst, src, nbytes, &req);
 603	kernel_fpu_end();
 604
 605	return ret;
 606}
 607
 608static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
 609			    unsigned int keylen)
 610{
 611	struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
 612	u32 *flags = &tfm->crt_flags;
 613	int err;
 614
 615	/* key consists of keys of equal size concatenated, therefore
 616	 * the length must be even
 617	 */
 618	if (keylen % 2) {
 619		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 620		return -EINVAL;
 621	}
 622
 623	/* first half of xts-key is for crypt */
 624	err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
 625	if (err)
 626		return err;
 627
 628	/* second half of xts-key is for tweak */
 629	return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
 630				  keylen / 2);
 631}
 632
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 633
 634static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
 
 635{
 636	aesni_enc(ctx, out, in);
 637}
 638
 639#ifdef CONFIG_X86_64
 640
 641static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
 642{
 643	glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
 644}
 645
 646static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
 647{
 648	glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
 649}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 650
 651static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
 
 652{
 653	aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
 654}
 655
 656static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
 657{
 658	aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
 
 
 
 659}
 660
 661static const struct common_glue_ctx aesni_enc_xts = {
 662	.num_funcs = 2,
 663	.fpu_blocks_limit = 1,
 664
 665	.funcs = { {
 666		.num_blocks = 8,
 667		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
 668	}, {
 669		.num_blocks = 1,
 670		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
 671	} }
 672};
 673
 674static const struct common_glue_ctx aesni_dec_xts = {
 675	.num_funcs = 2,
 676	.fpu_blocks_limit = 1,
 677
 678	.funcs = { {
 679		.num_blocks = 8,
 680		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
 681	}, {
 682		.num_blocks = 1,
 683		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
 684	} }
 685};
 
 
 686
 687static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 688		       struct scatterlist *src, unsigned int nbytes)
 689{
 690	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
 691
 692	return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
 693				     XTS_TWEAK_CAST(aesni_xts_tweak),
 694				     aes_ctx(ctx->raw_tweak_ctx),
 695				     aes_ctx(ctx->raw_crypt_ctx));
 
 
 696}
 697
 698static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 699		       struct scatterlist *src, unsigned int nbytes)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 700{
 701	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
 702
 703	return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
 704				     XTS_TWEAK_CAST(aesni_xts_tweak),
 705				     aes_ctx(ctx->raw_tweak_ctx),
 706				     aes_ctx(ctx->raw_crypt_ctx));
 
 
 707}
 708
 709#else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 710
 711static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 712		       struct scatterlist *src, unsigned int nbytes)
 713{
 714	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
 715	be128 buf[8];
 716	struct xts_crypt_req req = {
 717		.tbuf = buf,
 718		.tbuflen = sizeof(buf),
 719
 720		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
 721		.tweak_fn = aesni_xts_tweak,
 722		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
 723		.crypt_fn = lrw_xts_encrypt_callback,
 724	};
 725	int ret;
 726
 727	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 728
 729	kernel_fpu_begin();
 730	ret = xts_crypt(desc, dst, src, nbytes, &req);
 731	kernel_fpu_end();
 732
 733	return ret;
 734}
 735
 736static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 737		       struct scatterlist *src, unsigned int nbytes)
 738{
 739	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
 740	be128 buf[8];
 741	struct xts_crypt_req req = {
 742		.tbuf = buf,
 743		.tbuflen = sizeof(buf),
 744
 745		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
 746		.tweak_fn = aesni_xts_tweak,
 747		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
 748		.crypt_fn = lrw_xts_decrypt_callback,
 749	};
 750	int ret;
 751
 752	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 753
 754	kernel_fpu_begin();
 755	ret = xts_crypt(desc, dst, src, nbytes, &req);
 756	kernel_fpu_end();
 757
 758	return ret;
 759}
 760
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 761#endif
 762
 763#ifdef CONFIG_X86_64
 764static int rfc4106_init(struct crypto_tfm *tfm)
 765{
 766	struct cryptd_aead *cryptd_tfm;
 767	struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
 768		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
 769	struct crypto_aead *cryptd_child;
 770	struct aesni_rfc4106_gcm_ctx *child_ctx;
 771	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
 772	if (IS_ERR(cryptd_tfm))
 773		return PTR_ERR(cryptd_tfm);
 774
 775	cryptd_child = cryptd_aead_child(cryptd_tfm);
 776	child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
 777	memcpy(child_ctx, ctx, sizeof(*ctx));
 778	ctx->cryptd_tfm = cryptd_tfm;
 779	tfm->crt_aead.reqsize = sizeof(struct aead_request)
 780		+ crypto_aead_reqsize(&cryptd_tfm->base);
 781	return 0;
 782}
 783
 784static void rfc4106_exit(struct crypto_tfm *tfm)
 785{
 786	struct aesni_rfc4106_gcm_ctx *ctx =
 787		(struct aesni_rfc4106_gcm_ctx *)
 788		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
 789	if (!IS_ERR(ctx->cryptd_tfm))
 790		cryptd_free_aead(ctx->cryptd_tfm);
 791	return;
 792}
 793
 794static void
 795rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
 796{
 797	struct aesni_gcm_set_hash_subkey_result *result = req->data;
 798
 799	if (err == -EINPROGRESS)
 800		return;
 801	result->err = err;
 802	complete(&result->completion);
 803}
 804
 805static int
 806rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
 807{
 808	struct crypto_ablkcipher *ctr_tfm;
 809	struct ablkcipher_request *req;
 810	int ret = -EINVAL;
 811	struct aesni_hash_subkey_req_data *req_data;
 812
 813	ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
 814	if (IS_ERR(ctr_tfm))
 815		return PTR_ERR(ctr_tfm);
 816
 817	crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
 818
 819	ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
 820	if (ret)
 821		goto out_free_ablkcipher;
 822
 823	ret = -ENOMEM;
 824	req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
 825	if (!req)
 826		goto out_free_ablkcipher;
 827
 828	req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
 829	if (!req_data)
 830		goto out_free_request;
 831
 832	memset(req_data->iv, 0, sizeof(req_data->iv));
 833
 834	/* Clear the data in the hash sub key container to zero.*/
 835	/* We want to cipher all zeros to create the hash sub key. */
 836	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
 837
 838	init_completion(&req_data->result.completion);
 839	sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
 840	ablkcipher_request_set_tfm(req, ctr_tfm);
 841	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
 842					CRYPTO_TFM_REQ_MAY_BACKLOG,
 843					rfc4106_set_hash_subkey_done,
 844					&req_data->result);
 845
 846	ablkcipher_request_set_crypt(req, &req_data->sg,
 847		&req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
 848
 849	ret = crypto_ablkcipher_encrypt(req);
 850	if (ret == -EINPROGRESS || ret == -EBUSY) {
 851		ret = wait_for_completion_interruptible
 852			(&req_data->result.completion);
 853		if (!ret)
 854			ret = req_data->result.err;
 855	}
 856	kfree(req_data);
 857out_free_request:
 858	ablkcipher_request_free(req);
 859out_free_ablkcipher:
 860	crypto_free_ablkcipher(ctr_tfm);
 861	return ret;
 862}
 863
 864static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
 865						   unsigned int key_len)
 866{
 867	int ret = 0;
 868	struct crypto_tfm *tfm = crypto_aead_tfm(parent);
 869	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
 870	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
 871	struct aesni_rfc4106_gcm_ctx *child_ctx =
 872                                 aesni_rfc4106_gcm_ctx_get(cryptd_child);
 873	u8 *new_key_align, *new_key_mem = NULL;
 874
 875	if (key_len < 4) {
 876		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 877		return -EINVAL;
 878	}
 879	/*Account for 4 byte nonce at the end.*/
 880	key_len -= 4;
 881	if (key_len != AES_KEYSIZE_128) {
 882		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 883		return -EINVAL;
 884	}
 885
 886	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
 887	/*This must be on a 16 byte boundary!*/
 888	if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
 889		return -EINVAL;
 890
 891	if ((unsigned long)key % AESNI_ALIGN) {
 892		/*key is not aligned: use an auxuliar aligned pointer*/
 893		new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
 894		if (!new_key_mem)
 895			return -ENOMEM;
 896
 897		new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
 898		memcpy(new_key_align, key, key_len);
 899		key = new_key_align;
 900	}
 901
 902	if (!irq_fpu_usable())
 903		ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
 904		key, key_len);
 905	else {
 906		kernel_fpu_begin();
 907		ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
 908		kernel_fpu_end();
 909	}
 910	/*This must be on a 16 byte boundary!*/
 911	if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
 912		ret = -EINVAL;
 913		goto exit;
 914	}
 915	ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
 916	memcpy(child_ctx, ctx, sizeof(*ctx));
 917exit:
 918	kfree(new_key_mem);
 919	return ret;
 920}
 921
 922/* This is the Integrity Check Value (aka the authentication tag length and can
 923 * be 8, 12 or 16 bytes long. */
 924static int rfc4106_set_authsize(struct crypto_aead *parent,
 925				unsigned int authsize)
 926{
 927	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
 928	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
 929
 930	switch (authsize) {
 931	case 8:
 932	case 12:
 933	case 16:
 934		break;
 935	default:
 936		return -EINVAL;
 937	}
 938	crypto_aead_crt(parent)->authsize = authsize;
 939	crypto_aead_crt(cryptd_child)->authsize = authsize;
 940	return 0;
 941}
 942
 943static int rfc4106_encrypt(struct aead_request *req)
 944{
 945	int ret;
 946	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 947	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 948
 949	if (!irq_fpu_usable()) {
 950		struct aead_request *cryptd_req =
 951			(struct aead_request *) aead_request_ctx(req);
 952		memcpy(cryptd_req, req, sizeof(*req));
 953		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
 954		return crypto_aead_encrypt(cryptd_req);
 955	} else {
 956		struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
 957		kernel_fpu_begin();
 958		ret = cryptd_child->base.crt_aead.encrypt(req);
 959		kernel_fpu_end();
 960		return ret;
 961	}
 962}
 963
 964static int rfc4106_decrypt(struct aead_request *req)
 965{
 966	int ret;
 967	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 968	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 969
 970	if (!irq_fpu_usable()) {
 971		struct aead_request *cryptd_req =
 972			(struct aead_request *) aead_request_ctx(req);
 973		memcpy(cryptd_req, req, sizeof(*req));
 974		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
 975		return crypto_aead_decrypt(cryptd_req);
 976	} else {
 977		struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
 978		kernel_fpu_begin();
 979		ret = cryptd_child->base.crt_aead.decrypt(req);
 980		kernel_fpu_end();
 981		return ret;
 982	}
 983}
 984
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 985static int __driver_rfc4106_encrypt(struct aead_request *req)
 986{
 987	u8 one_entry_in_sg = 0;
 988	u8 *src, *dst, *assoc;
 989	__be32 counter = cpu_to_be32(1);
 990	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 991	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 992	void *aes_ctx = &(ctx->aes_key_expanded);
 993	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
 994	u8 iv_tab[16+AESNI_ALIGN];
 995	u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
 996	struct scatter_walk src_sg_walk;
 997	struct scatter_walk assoc_sg_walk;
 998	struct scatter_walk dst_sg_walk;
 999	unsigned int i;
1000
1001	/* Assuming we are supporting rfc4106 64-bit extended */
1002	/* sequence numbers We need to have the AAD length equal */
1003	/* to 8 or 12 bytes */
1004	if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1005		return -EINVAL;
1006	/* IV below built */
1007	for (i = 0; i < 4; i++)
1008		*(iv+i) = ctx->nonce[i];
1009	for (i = 0; i < 8; i++)
1010		*(iv+4+i) = req->iv[i];
1011	*((__be32 *)(iv+12)) = counter;
1012
1013	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1014		one_entry_in_sg = 1;
1015		scatterwalk_start(&src_sg_walk, req->src);
1016		scatterwalk_start(&assoc_sg_walk, req->assoc);
1017		src = scatterwalk_map(&src_sg_walk);
1018		assoc = scatterwalk_map(&assoc_sg_walk);
1019		dst = src;
1020		if (unlikely(req->src != req->dst)) {
1021			scatterwalk_start(&dst_sg_walk, req->dst);
1022			dst = scatterwalk_map(&dst_sg_walk);
1023		}
1024
1025	} else {
1026		/* Allocate memory for src, dst, assoc */
1027		src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1028			GFP_ATOMIC);
1029		if (unlikely(!src))
1030			return -ENOMEM;
1031		assoc = (src + req->cryptlen + auth_tag_len);
1032		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1033		scatterwalk_map_and_copy(assoc, req->assoc, 0,
1034					req->assoclen, 0);
1035		dst = src;
1036	}
1037
1038	aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1039		ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1040		+ ((unsigned long)req->cryptlen), auth_tag_len);
1041
1042	/* The authTag (aka the Integrity Check Value) needs to be written
1043	 * back to the packet. */
1044	if (one_entry_in_sg) {
1045		if (unlikely(req->src != req->dst)) {
1046			scatterwalk_unmap(dst);
1047			scatterwalk_done(&dst_sg_walk, 0, 0);
1048		}
1049		scatterwalk_unmap(src);
1050		scatterwalk_unmap(assoc);
1051		scatterwalk_done(&src_sg_walk, 0, 0);
1052		scatterwalk_done(&assoc_sg_walk, 0, 0);
1053	} else {
1054		scatterwalk_map_and_copy(dst, req->dst, 0,
1055			req->cryptlen + auth_tag_len, 1);
1056		kfree(src);
1057	}
1058	return 0;
1059}
1060
1061static int __driver_rfc4106_decrypt(struct aead_request *req)
1062{
1063	u8 one_entry_in_sg = 0;
1064	u8 *src, *dst, *assoc;
1065	unsigned long tempCipherLen = 0;
1066	__be32 counter = cpu_to_be32(1);
1067	int retval = 0;
1068	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1069	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1070	void *aes_ctx = &(ctx->aes_key_expanded);
1071	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1072	u8 iv_and_authTag[32+AESNI_ALIGN];
1073	u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1074	u8 *authTag = iv + 16;
1075	struct scatter_walk src_sg_walk;
1076	struct scatter_walk assoc_sg_walk;
1077	struct scatter_walk dst_sg_walk;
1078	unsigned int i;
1079
1080	if (unlikely((req->cryptlen < auth_tag_len) ||
1081		(req->assoclen != 8 && req->assoclen != 12)))
1082		return -EINVAL;
1083	/* Assuming we are supporting rfc4106 64-bit extended */
1084	/* sequence numbers We need to have the AAD length */
1085	/* equal to 8 or 12 bytes */
1086
1087	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1088	/* IV below built */
1089	for (i = 0; i < 4; i++)
1090		*(iv+i) = ctx->nonce[i];
1091	for (i = 0; i < 8; i++)
1092		*(iv+4+i) = req->iv[i];
1093	*((__be32 *)(iv+12)) = counter;
1094
1095	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1096		one_entry_in_sg = 1;
1097		scatterwalk_start(&src_sg_walk, req->src);
1098		scatterwalk_start(&assoc_sg_walk, req->assoc);
1099		src = scatterwalk_map(&src_sg_walk);
1100		assoc = scatterwalk_map(&assoc_sg_walk);
1101		dst = src;
1102		if (unlikely(req->src != req->dst)) {
1103			scatterwalk_start(&dst_sg_walk, req->dst);
1104			dst = scatterwalk_map(&dst_sg_walk);
1105		}
1106
1107	} else {
1108		/* Allocate memory for src, dst, assoc */
1109		src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1110		if (!src)
1111			return -ENOMEM;
1112		assoc = (src + req->cryptlen + auth_tag_len);
1113		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1114		scatterwalk_map_and_copy(assoc, req->assoc, 0,
1115			req->assoclen, 0);
1116		dst = src;
1117	}
1118
1119	aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
1120		ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1121		authTag, auth_tag_len);
1122
1123	/* Compare generated tag with passed in tag. */
1124	retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1125		-EBADMSG : 0;
1126
1127	if (one_entry_in_sg) {
1128		if (unlikely(req->src != req->dst)) {
1129			scatterwalk_unmap(dst);
1130			scatterwalk_done(&dst_sg_walk, 0, 0);
1131		}
1132		scatterwalk_unmap(src);
1133		scatterwalk_unmap(assoc);
1134		scatterwalk_done(&src_sg_walk, 0, 0);
1135		scatterwalk_done(&assoc_sg_walk, 0, 0);
1136	} else {
1137		scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1138		kfree(src);
1139	}
1140	return retval;
1141}
1142#endif
1143
1144static struct crypto_alg aesni_algs[] = { {
1145	.cra_name		= "aes",
1146	.cra_driver_name	= "aes-aesni",
1147	.cra_priority		= 300,
1148	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
1149	.cra_blocksize		= AES_BLOCK_SIZE,
1150	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
1151				  AESNI_ALIGN - 1,
1152	.cra_alignmask		= 0,
1153	.cra_module		= THIS_MODULE,
1154	.cra_u	= {
1155		.cipher	= {
1156			.cia_min_keysize	= AES_MIN_KEY_SIZE,
1157			.cia_max_keysize	= AES_MAX_KEY_SIZE,
1158			.cia_setkey		= aes_set_key,
1159			.cia_encrypt		= aes_encrypt,
1160			.cia_decrypt		= aes_decrypt
1161		}
1162	}
1163}, {
1164	.cra_name		= "__aes-aesni",
1165	.cra_driver_name	= "__driver-aes-aesni",
1166	.cra_priority		= 0,
1167	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
1168	.cra_blocksize		= AES_BLOCK_SIZE,
1169	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
1170				  AESNI_ALIGN - 1,
1171	.cra_alignmask		= 0,
1172	.cra_module		= THIS_MODULE,
1173	.cra_u	= {
1174		.cipher	= {
1175			.cia_min_keysize	= AES_MIN_KEY_SIZE,
1176			.cia_max_keysize	= AES_MAX_KEY_SIZE,
1177			.cia_setkey		= aes_set_key,
1178			.cia_encrypt		= __aes_encrypt,
1179			.cia_decrypt		= __aes_decrypt
1180		}
1181	}
1182}, {
1183	.cra_name		= "__ecb-aes-aesni",
1184	.cra_driver_name	= "__driver-ecb-aes-aesni",
1185	.cra_priority		= 0,
1186	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
1187	.cra_blocksize		= AES_BLOCK_SIZE,
1188	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
1189				  AESNI_ALIGN - 1,
1190	.cra_alignmask		= 0,
1191	.cra_type		= &crypto_blkcipher_type,
1192	.cra_module		= THIS_MODULE,
1193	.cra_u = {
1194		.blkcipher = {
1195			.min_keysize	= AES_MIN_KEY_SIZE,
1196			.max_keysize	= AES_MAX_KEY_SIZE,
1197			.setkey		= aes_set_key,
1198			.encrypt	= ecb_encrypt,
1199			.decrypt	= ecb_decrypt,
1200		},
1201	},
1202}, {
1203	.cra_name		= "__cbc-aes-aesni",
1204	.cra_driver_name	= "__driver-cbc-aes-aesni",
1205	.cra_priority		= 0,
1206	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
1207	.cra_blocksize		= AES_BLOCK_SIZE,
1208	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
1209				  AESNI_ALIGN - 1,
1210	.cra_alignmask		= 0,
1211	.cra_type		= &crypto_blkcipher_type,
1212	.cra_module		= THIS_MODULE,
1213	.cra_u = {
1214		.blkcipher = {
1215			.min_keysize	= AES_MIN_KEY_SIZE,
1216			.max_keysize	= AES_MAX_KEY_SIZE,
1217			.setkey		= aes_set_key,
1218			.encrypt	= cbc_encrypt,
1219			.decrypt	= cbc_decrypt,
1220		},
1221	},
1222}, {
1223	.cra_name		= "ecb(aes)",
1224	.cra_driver_name	= "ecb-aes-aesni",
1225	.cra_priority		= 400,
1226	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1227	.cra_blocksize		= AES_BLOCK_SIZE,
1228	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1229	.cra_alignmask		= 0,
1230	.cra_type		= &crypto_ablkcipher_type,
1231	.cra_module		= THIS_MODULE,
1232	.cra_init		= ablk_ecb_init,
1233	.cra_exit		= ablk_exit,
1234	.cra_u = {
1235		.ablkcipher = {
1236			.min_keysize	= AES_MIN_KEY_SIZE,
1237			.max_keysize	= AES_MAX_KEY_SIZE,
1238			.setkey		= ablk_set_key,
1239			.encrypt	= ablk_encrypt,
1240			.decrypt	= ablk_decrypt,
1241		},
1242	},
1243}, {
1244	.cra_name		= "cbc(aes)",
1245	.cra_driver_name	= "cbc-aes-aesni",
1246	.cra_priority		= 400,
1247	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1248	.cra_blocksize		= AES_BLOCK_SIZE,
1249	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1250	.cra_alignmask		= 0,
1251	.cra_type		= &crypto_ablkcipher_type,
1252	.cra_module		= THIS_MODULE,
1253	.cra_init		= ablk_cbc_init,
1254	.cra_exit		= ablk_exit,
1255	.cra_u = {
1256		.ablkcipher = {
1257			.min_keysize	= AES_MIN_KEY_SIZE,
1258			.max_keysize	= AES_MAX_KEY_SIZE,
1259			.ivsize		= AES_BLOCK_SIZE,
1260			.setkey		= ablk_set_key,
1261			.encrypt	= ablk_encrypt,
1262			.decrypt	= ablk_decrypt,
1263		},
1264	},
1265#ifdef CONFIG_X86_64
1266}, {
1267	.cra_name		= "__ctr-aes-aesni",
1268	.cra_driver_name	= "__driver-ctr-aes-aesni",
1269	.cra_priority		= 0,
1270	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
1271	.cra_blocksize		= 1,
1272	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
1273				  AESNI_ALIGN - 1,
1274	.cra_alignmask		= 0,
1275	.cra_type		= &crypto_blkcipher_type,
1276	.cra_module		= THIS_MODULE,
1277	.cra_u = {
1278		.blkcipher = {
1279			.min_keysize	= AES_MIN_KEY_SIZE,
1280			.max_keysize	= AES_MAX_KEY_SIZE,
1281			.ivsize		= AES_BLOCK_SIZE,
1282			.setkey		= aes_set_key,
1283			.encrypt	= ctr_crypt,
1284			.decrypt	= ctr_crypt,
1285		},
1286	},
1287}, {
1288	.cra_name		= "ctr(aes)",
1289	.cra_driver_name	= "ctr-aes-aesni",
1290	.cra_priority		= 400,
1291	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1292	.cra_blocksize		= 1,
1293	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1294	.cra_alignmask		= 0,
1295	.cra_type		= &crypto_ablkcipher_type,
1296	.cra_module		= THIS_MODULE,
1297	.cra_init		= ablk_ctr_init,
1298	.cra_exit		= ablk_exit,
1299	.cra_u = {
1300		.ablkcipher = {
1301			.min_keysize	= AES_MIN_KEY_SIZE,
1302			.max_keysize	= AES_MAX_KEY_SIZE,
1303			.ivsize		= AES_BLOCK_SIZE,
1304			.setkey		= ablk_set_key,
1305			.encrypt	= ablk_encrypt,
1306			.decrypt	= ablk_encrypt,
1307			.geniv		= "chainiv",
1308		},
1309	},
1310}, {
1311	.cra_name		= "__gcm-aes-aesni",
1312	.cra_driver_name	= "__driver-gcm-aes-aesni",
1313	.cra_priority		= 0,
1314	.cra_flags		= CRYPTO_ALG_TYPE_AEAD,
1315	.cra_blocksize		= 1,
1316	.cra_ctxsize		= sizeof(struct aesni_rfc4106_gcm_ctx) +
1317				  AESNI_ALIGN,
1318	.cra_alignmask		= 0,
1319	.cra_type		= &crypto_aead_type,
1320	.cra_module		= THIS_MODULE,
 
1321	.cra_u = {
1322		.aead = {
1323			.encrypt	= __driver_rfc4106_encrypt,
1324			.decrypt	= __driver_rfc4106_decrypt,
1325		},
1326	},
1327}, {
1328	.cra_name		= "rfc4106(gcm(aes))",
1329	.cra_driver_name	= "rfc4106-gcm-aesni",
1330	.cra_priority		= 400,
1331	.cra_flags		= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1332	.cra_blocksize		= 1,
1333	.cra_ctxsize		= sizeof(struct aesni_rfc4106_gcm_ctx) +
1334				  AESNI_ALIGN,
1335	.cra_alignmask		= 0,
1336	.cra_type		= &crypto_nivaead_type,
1337	.cra_module		= THIS_MODULE,
1338	.cra_init		= rfc4106_init,
1339	.cra_exit		= rfc4106_exit,
1340	.cra_u = {
1341		.aead = {
1342			.setkey		= rfc4106_set_key,
1343			.setauthsize	= rfc4106_set_authsize,
1344			.encrypt	= rfc4106_encrypt,
1345			.decrypt	= rfc4106_decrypt,
1346			.geniv		= "seqiv",
1347			.ivsize		= 8,
1348			.maxauthsize	= 16,
1349		},
1350	},
1351#endif
1352#ifdef HAS_PCBC
1353}, {
1354	.cra_name		= "pcbc(aes)",
1355	.cra_driver_name	= "pcbc-aes-aesni",
1356	.cra_priority		= 400,
1357	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1358	.cra_blocksize		= AES_BLOCK_SIZE,
1359	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1360	.cra_alignmask		= 0,
1361	.cra_type		= &crypto_ablkcipher_type,
1362	.cra_module		= THIS_MODULE,
1363	.cra_init		= ablk_pcbc_init,
1364	.cra_exit		= ablk_exit,
1365	.cra_u = {
1366		.ablkcipher = {
1367			.min_keysize	= AES_MIN_KEY_SIZE,
1368			.max_keysize	= AES_MAX_KEY_SIZE,
1369			.ivsize		= AES_BLOCK_SIZE,
1370			.setkey		= ablk_set_key,
1371			.encrypt	= ablk_encrypt,
1372			.decrypt	= ablk_decrypt,
1373		},
1374	},
1375#endif
1376}, {
1377	.cra_name		= "__lrw-aes-aesni",
1378	.cra_driver_name	= "__driver-lrw-aes-aesni",
1379	.cra_priority		= 0,
1380	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
1381	.cra_blocksize		= AES_BLOCK_SIZE,
1382	.cra_ctxsize		= sizeof(struct aesni_lrw_ctx),
1383	.cra_alignmask		= 0,
1384	.cra_type		= &crypto_blkcipher_type,
1385	.cra_module		= THIS_MODULE,
1386	.cra_exit		= lrw_aesni_exit_tfm,
1387	.cra_u = {
1388		.blkcipher = {
1389			.min_keysize	= AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1390			.max_keysize	= AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1391			.ivsize		= AES_BLOCK_SIZE,
1392			.setkey		= lrw_aesni_setkey,
1393			.encrypt	= lrw_encrypt,
1394			.decrypt	= lrw_decrypt,
1395		},
1396	},
1397}, {
1398	.cra_name		= "__xts-aes-aesni",
1399	.cra_driver_name	= "__driver-xts-aes-aesni",
1400	.cra_priority		= 0,
1401	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
1402	.cra_blocksize		= AES_BLOCK_SIZE,
1403	.cra_ctxsize		= sizeof(struct aesni_xts_ctx),
1404	.cra_alignmask		= 0,
1405	.cra_type		= &crypto_blkcipher_type,
1406	.cra_module		= THIS_MODULE,
1407	.cra_u = {
1408		.blkcipher = {
1409			.min_keysize	= 2 * AES_MIN_KEY_SIZE,
1410			.max_keysize	= 2 * AES_MAX_KEY_SIZE,
1411			.ivsize		= AES_BLOCK_SIZE,
1412			.setkey		= xts_aesni_setkey,
1413			.encrypt	= xts_encrypt,
1414			.decrypt	= xts_decrypt,
1415		},
1416	},
1417}, {
1418	.cra_name		= "lrw(aes)",
1419	.cra_driver_name	= "lrw-aes-aesni",
1420	.cra_priority		= 400,
1421	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1422	.cra_blocksize		= AES_BLOCK_SIZE,
1423	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1424	.cra_alignmask		= 0,
1425	.cra_type		= &crypto_ablkcipher_type,
1426	.cra_module		= THIS_MODULE,
1427	.cra_init		= ablk_init,
1428	.cra_exit		= ablk_exit,
1429	.cra_u = {
1430		.ablkcipher = {
1431			.min_keysize	= AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1432			.max_keysize	= AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1433			.ivsize		= AES_BLOCK_SIZE,
1434			.setkey		= ablk_set_key,
1435			.encrypt	= ablk_encrypt,
1436			.decrypt	= ablk_decrypt,
1437		},
1438	},
1439}, {
1440	.cra_name		= "xts(aes)",
1441	.cra_driver_name	= "xts-aes-aesni",
1442	.cra_priority		= 400,
1443	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1444	.cra_blocksize		= AES_BLOCK_SIZE,
1445	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1446	.cra_alignmask		= 0,
1447	.cra_type		= &crypto_ablkcipher_type,
1448	.cra_module		= THIS_MODULE,
1449	.cra_init		= ablk_init,
1450	.cra_exit		= ablk_exit,
1451	.cra_u = {
1452		.ablkcipher = {
1453			.min_keysize	= 2 * AES_MIN_KEY_SIZE,
1454			.max_keysize	= 2 * AES_MAX_KEY_SIZE,
1455			.ivsize		= AES_BLOCK_SIZE,
1456			.setkey		= ablk_set_key,
1457			.encrypt	= ablk_encrypt,
1458			.decrypt	= ablk_decrypt,
1459		},
1460	},
1461} };
1462
1463
1464static const struct x86_cpu_id aesni_cpu_id[] = {
1465	X86_FEATURE_MATCH(X86_FEATURE_AES),
1466	{}
1467};
1468MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1469
1470static int __init aesni_init(void)
1471{
1472	int err;
1473
1474	if (!x86_match_cpu(aesni_cpu_id))
 
1475		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1476#ifdef CONFIG_X86_64
1477#ifdef CONFIG_AS_AVX2
1478	if (boot_cpu_has(X86_FEATURE_AVX2)) {
1479		pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1480		aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1481		aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1482	} else
1483#endif
1484#ifdef CONFIG_AS_AVX
1485	if (boot_cpu_has(X86_FEATURE_AVX)) {
1486		pr_info("AVX version of gcm_enc/dec engaged.\n");
1487		aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1488		aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1489	} else
1490#endif
1491	{
1492		pr_info("SSE version of gcm_enc/dec engaged.\n");
1493		aesni_gcm_enc_tfm = aesni_gcm_enc;
1494		aesni_gcm_dec_tfm = aesni_gcm_dec;
1495	}
 
 
 
 
 
1496#endif
 
1497
1498	err = crypto_fpu_init();
1499	if (err)
1500		return err;
1501
1502	return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1503}
1504
1505static void __exit aesni_exit(void)
1506{
1507	crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1508
1509	crypto_fpu_exit();
1510}
1511
1512module_init(aesni_init);
1513module_exit(aesni_exit);
1514
1515MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1516MODULE_LICENSE("GPL");
1517MODULE_ALIAS("aes");