Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Support for Intel AES-NI instructions. This file contains glue
   4 * code, the real AES implementation is in intel-aes_asm.S.
   5 *
   6 * Copyright (C) 2008, Intel Corp.
   7 *    Author: Huang Ying <ying.huang@intel.com>
   8 *
   9 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
  10 * interface for 64-bit kernels.
  11 *    Authors: Adrian Hoban <adrian.hoban@intel.com>
  12 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  13 *             Tadeusz Struk (tadeusz.struk@intel.com)
  14 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  15 *    Copyright (c) 2010, Intel Corporation.
 
 
 
 
 
  16 */
  17
  18#include <linux/hardirq.h>
  19#include <linux/types.h>
  20#include <linux/module.h>
  21#include <linux/err.h>
  22#include <crypto/algapi.h>
  23#include <crypto/aes.h>
 
  24#include <crypto/ctr.h>
  25#include <crypto/b128ops.h>
  26#include <crypto/gcm.h>
  27#include <crypto/xts.h>
  28#include <asm/cpu_device_id.h>
  29#include <asm/simd.h>
  30#include <crypto/scatterwalk.h>
  31#include <crypto/internal/aead.h>
  32#include <crypto/internal/simd.h>
  33#include <crypto/internal/skcipher.h>
  34#include <linux/jump_label.h>
  35#include <linux/workqueue.h>
  36#include <linux/spinlock.h>
  37#include <linux/static_call.h>
  38
 
 
 
  39
  40#define AESNI_ALIGN	16
  41#define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
  42#define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE - 1))
  43#define RFC4106_HASH_SUBKEY_SIZE 16
  44#define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
  45#define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
  46#define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
 
 
 
 
 
 
 
 
  47
  48/* This data is stored at the end of the crypto_tfm struct.
  49 * It's a type of per "session" data storage location.
  50 * This needs to be 16 byte aligned.
  51 */
  52struct aesni_rfc4106_gcm_ctx {
  53	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
  54	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
  55	u8 nonce[4];
 
  56};
  57
  58struct generic_gcmaes_ctx {
  59	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
  60	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
  61};
  62
  63struct aesni_xts_ctx {
  64	u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
  65	u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
 
  66};
  67
  68#define GCM_BLOCK_LEN 16
  69
  70struct gcm_context_data {
  71	/* init, update and finalize context data */
  72	u8 aad_hash[GCM_BLOCK_LEN];
  73	u64 aad_length;
  74	u64 in_length;
  75	u8 partial_block_enc_key[GCM_BLOCK_LEN];
  76	u8 orig_IV[GCM_BLOCK_LEN];
  77	u8 current_counter[GCM_BLOCK_LEN];
  78	u64 partial_block_len;
  79	u64 unused;
  80	u8 hash_keys[GCM_BLOCK_LEN * 16];
  81};
  82
  83asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
  84			     unsigned int key_len);
  85asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
  86asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
 
 
  87asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
  88			      const u8 *in, unsigned int len);
  89asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
  90			      const u8 *in, unsigned int len);
  91asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
  92			      const u8 *in, unsigned int len, u8 *iv);
  93asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
  94			      const u8 *in, unsigned int len, u8 *iv);
  95asmlinkage void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
  96				  const u8 *in, unsigned int len, u8 *iv);
  97asmlinkage void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
  98				  const u8 *in, unsigned int len, u8 *iv);
  99
 100#define AVX_GEN2_OPTSIZE 640
 101#define AVX_GEN4_OPTSIZE 4096
 102
 103asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
 104				  const u8 *in, unsigned int len, u8 *iv);
 105
 106asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
 107				  const u8 *in, unsigned int len, u8 *iv);
 108
 109#ifdef CONFIG_X86_64
 110
 111asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
 112			      const u8 *in, unsigned int len, u8 *iv);
 113DEFINE_STATIC_CALL(aesni_ctr_enc_tfm, aesni_ctr_enc);
 114
 115/* Scatter / Gather routines, with args similar to above */
 116asmlinkage void aesni_gcm_init(void *ctx,
 117			       struct gcm_context_data *gdata,
 118			       u8 *iv,
 119			       u8 *hash_subkey, const u8 *aad,
 120			       unsigned long aad_len);
 121asmlinkage void aesni_gcm_enc_update(void *ctx,
 122				     struct gcm_context_data *gdata, u8 *out,
 123				     const u8 *in, unsigned long plaintext_len);
 124asmlinkage void aesni_gcm_dec_update(void *ctx,
 125				     struct gcm_context_data *gdata, u8 *out,
 126				     const u8 *in,
 127				     unsigned long ciphertext_len);
 128asmlinkage void aesni_gcm_finalize(void *ctx,
 129				   struct gcm_context_data *gdata,
 130				   u8 *auth_tag, unsigned long auth_tag_len);
 131
 132asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
 133		void *keys, u8 *out, unsigned int num_bytes);
 134asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
 135		void *keys, u8 *out, unsigned int num_bytes);
 136asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
 137		void *keys, u8 *out, unsigned int num_bytes);
 138
 139
 140asmlinkage void aes_xctr_enc_128_avx_by8(const u8 *in, const u8 *iv,
 141	const void *keys, u8 *out, unsigned int num_bytes,
 142	unsigned int byte_ctr);
 143
 144asmlinkage void aes_xctr_enc_192_avx_by8(const u8 *in, const u8 *iv,
 145	const void *keys, u8 *out, unsigned int num_bytes,
 146	unsigned int byte_ctr);
 147
 148asmlinkage void aes_xctr_enc_256_avx_by8(const u8 *in, const u8 *iv,
 149	const void *keys, u8 *out, unsigned int num_bytes,
 150	unsigned int byte_ctr);
 151
 152/*
 153 * asmlinkage void aesni_gcm_init_avx_gen2()
 154 * gcm_data *my_ctx_data, context data
 155 * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
 
 
 
 
 
 
 
 
 
 
 
 156 */
 157asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
 158					struct gcm_context_data *gdata,
 159					u8 *iv,
 160					u8 *hash_subkey,
 161					const u8 *aad,
 162					unsigned long aad_len);
 163
 164asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
 165				     struct gcm_context_data *gdata, u8 *out,
 166				     const u8 *in, unsigned long plaintext_len);
 167asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
 168				     struct gcm_context_data *gdata, u8 *out,
 169				     const u8 *in,
 170				     unsigned long ciphertext_len);
 171asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
 172				   struct gcm_context_data *gdata,
 173				   u8 *auth_tag, unsigned long auth_tag_len);
 174
 175/*
 176 * asmlinkage void aesni_gcm_init_avx_gen4()
 177 * gcm_data *my_ctx_data, context data
 178 * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
 179 */
 180asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
 181					struct gcm_context_data *gdata,
 182					u8 *iv,
 183					u8 *hash_subkey,
 184					const u8 *aad,
 185					unsigned long aad_len);
 186
 187asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
 188				     struct gcm_context_data *gdata, u8 *out,
 189				     const u8 *in, unsigned long plaintext_len);
 190asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
 191				     struct gcm_context_data *gdata, u8 *out,
 192				     const u8 *in,
 193				     unsigned long ciphertext_len);
 194asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
 195				   struct gcm_context_data *gdata,
 196				   u8 *auth_tag, unsigned long auth_tag_len);
 197
 198static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx);
 199static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx2);
 200
 201static inline struct
 202aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
 203{
 204	unsigned long align = AESNI_ALIGN;
 205
 206	if (align <= crypto_tfm_ctx_alignment())
 207		align = 1;
 208	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
 209}
 210
 211static inline struct
 212generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
 213{
 214	unsigned long align = AESNI_ALIGN;
 215
 216	if (align <= crypto_tfm_ctx_alignment())
 217		align = 1;
 218	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
 219}
 220#endif
 221
 222static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
 223{
 224	unsigned long addr = (unsigned long)raw_ctx;
 225	unsigned long align = AESNI_ALIGN;
 226
 227	if (align <= crypto_tfm_ctx_alignment())
 228		align = 1;
 229	return (struct crypto_aes_ctx *)ALIGN(addr, align);
 230}
 231
 232static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
 233			      const u8 *in_key, unsigned int key_len)
 234{
 235	struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
 
 236	int err;
 237
 238	if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
 239	    key_len != AES_KEYSIZE_256)
 
 240		return -EINVAL;
 
 241
 242	if (!crypto_simd_usable())
 243		err = aes_expandkey(ctx, in_key, key_len);
 244	else {
 245		kernel_fpu_begin();
 246		err = aesni_set_key(ctx, in_key, key_len);
 247		kernel_fpu_end();
 248	}
 249
 250	return err;
 251}
 252
 253static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 254		       unsigned int key_len)
 255{
 256	return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
 257}
 258
 259static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 260{
 261	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 262
 263	if (!crypto_simd_usable()) {
 264		aes_encrypt(ctx, dst, src);
 265	} else {
 266		kernel_fpu_begin();
 267		aesni_enc(ctx, dst, src);
 268		kernel_fpu_end();
 269	}
 270}
 271
 272static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 273{
 274	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 275
 276	if (!crypto_simd_usable()) {
 277		aes_decrypt(ctx, dst, src);
 278	} else {
 279		kernel_fpu_begin();
 280		aesni_dec(ctx, dst, src);
 281		kernel_fpu_end();
 282	}
 283}
 284
 285static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
 286			         unsigned int len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 287{
 288	return aes_set_key_common(crypto_skcipher_tfm(tfm),
 289				  crypto_skcipher_ctx(tfm), key, len);
 
 290}
 291
 292static int ecb_encrypt(struct skcipher_request *req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 293{
 294	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 295	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 296	struct skcipher_walk walk;
 297	unsigned int nbytes;
 298	int err;
 299
 300	err = skcipher_walk_virt(&walk, req, false);
 
 
 301
 
 302	while ((nbytes = walk.nbytes)) {
 303		kernel_fpu_begin();
 304		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 305			      nbytes & AES_BLOCK_MASK);
 306		kernel_fpu_end();
 307		nbytes &= AES_BLOCK_SIZE - 1;
 308		err = skcipher_walk_done(&walk, nbytes);
 309	}
 
 310
 311	return err;
 312}
 313
 314static int ecb_decrypt(struct skcipher_request *req)
 
 
 315{
 316	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 317	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 318	struct skcipher_walk walk;
 319	unsigned int nbytes;
 320	int err;
 321
 322	err = skcipher_walk_virt(&walk, req, false);
 
 
 323
 
 324	while ((nbytes = walk.nbytes)) {
 325		kernel_fpu_begin();
 326		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 327			      nbytes & AES_BLOCK_MASK);
 328		kernel_fpu_end();
 329		nbytes &= AES_BLOCK_SIZE - 1;
 330		err = skcipher_walk_done(&walk, nbytes);
 331	}
 
 332
 333	return err;
 334}
 335
 336static int cbc_encrypt(struct skcipher_request *req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 337{
 338	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 339	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 340	struct skcipher_walk walk;
 341	unsigned int nbytes;
 342	int err;
 343
 344	err = skcipher_walk_virt(&walk, req, false);
 
 
 345
 
 346	while ((nbytes = walk.nbytes)) {
 347		kernel_fpu_begin();
 348		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 349			      nbytes & AES_BLOCK_MASK, walk.iv);
 350		kernel_fpu_end();
 351		nbytes &= AES_BLOCK_SIZE - 1;
 352		err = skcipher_walk_done(&walk, nbytes);
 353	}
 
 354
 355	return err;
 356}
 357
 358static int cbc_decrypt(struct skcipher_request *req)
 
 
 359{
 360	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 361	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 362	struct skcipher_walk walk;
 363	unsigned int nbytes;
 364	int err;
 365
 366	err = skcipher_walk_virt(&walk, req, false);
 
 
 367
 
 368	while ((nbytes = walk.nbytes)) {
 369		kernel_fpu_begin();
 370		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 371			      nbytes & AES_BLOCK_MASK, walk.iv);
 372		kernel_fpu_end();
 373		nbytes &= AES_BLOCK_SIZE - 1;
 374		err = skcipher_walk_done(&walk, nbytes);
 375	}
 
 376
 377	return err;
 378}
 379
 380static int cts_cbc_encrypt(struct skcipher_request *req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 381{
 382	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 383	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 384	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
 385	struct scatterlist *src = req->src, *dst = req->dst;
 386	struct scatterlist sg_src[2], sg_dst[2];
 387	struct skcipher_request subreq;
 388	struct skcipher_walk walk;
 
 
 
 
 
 
 
 
 
 
 
 389	int err;
 390
 391	skcipher_request_set_tfm(&subreq, tfm);
 392	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
 393				      NULL, NULL);
 394
 395	if (req->cryptlen <= AES_BLOCK_SIZE) {
 396		if (req->cryptlen < AES_BLOCK_SIZE)
 397			return -EINVAL;
 398		cbc_blocks = 1;
 399	}
 400
 401	if (cbc_blocks > 0) {
 402		skcipher_request_set_crypt(&subreq, req->src, req->dst,
 403					   cbc_blocks * AES_BLOCK_SIZE,
 404					   req->iv);
 405
 406		err = cbc_encrypt(&subreq);
 407		if (err)
 408			return err;
 409
 410		if (req->cryptlen == AES_BLOCK_SIZE)
 411			return 0;
 412
 413		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
 414		if (req->dst != req->src)
 415			dst = scatterwalk_ffwd(sg_dst, req->dst,
 416					       subreq.cryptlen);
 417	}
 418
 419	/* handle ciphertext stealing */
 420	skcipher_request_set_crypt(&subreq, src, dst,
 421				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
 422				   req->iv);
 423
 424	err = skcipher_walk_virt(&walk, &subreq, false);
 425	if (err)
 426		return err;
 427
 428	kernel_fpu_begin();
 429	aesni_cts_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 430			  walk.nbytes, walk.iv);
 
 
 
 
 
 
 
 
 431	kernel_fpu_end();
 432
 433	return skcipher_walk_done(&walk, 0);
 434}
 435
 436static int cts_cbc_decrypt(struct skcipher_request *req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 437{
 438	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 439	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 440	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
 441	struct scatterlist *src = req->src, *dst = req->dst;
 442	struct scatterlist sg_src[2], sg_dst[2];
 443	struct skcipher_request subreq;
 444	struct skcipher_walk walk;
 445	int err;
 446
 447	skcipher_request_set_tfm(&subreq, tfm);
 448	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
 449				      NULL, NULL);
 450
 451	if (req->cryptlen <= AES_BLOCK_SIZE) {
 452		if (req->cryptlen < AES_BLOCK_SIZE)
 453			return -EINVAL;
 454		cbc_blocks = 1;
 455	}
 456
 457	if (cbc_blocks > 0) {
 458		skcipher_request_set_crypt(&subreq, req->src, req->dst,
 459					   cbc_blocks * AES_BLOCK_SIZE,
 460					   req->iv);
 461
 462		err = cbc_decrypt(&subreq);
 463		if (err)
 464			return err;
 465
 466		if (req->cryptlen == AES_BLOCK_SIZE)
 467			return 0;
 468
 469		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
 470		if (req->dst != req->src)
 471			dst = scatterwalk_ffwd(sg_dst, req->dst,
 472					       subreq.cryptlen);
 473	}
 474
 475	/* handle ciphertext stealing */
 476	skcipher_request_set_crypt(&subreq, src, dst,
 477				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
 478				   req->iv);
 479
 480	err = skcipher_walk_virt(&walk, &subreq, false);
 481	if (err)
 482		return err;
 483
 484	kernel_fpu_begin();
 485	aesni_cts_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 486			  walk.nbytes, walk.iv);
 487	kernel_fpu_end();
 488
 489	return skcipher_walk_done(&walk, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 490}
 491
 492#ifdef CONFIG_X86_64
 493static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
 494			      const u8 *in, unsigned int len, u8 *iv)
 495{
 496	/*
 497	 * based on key length, override with the by8 version
 498	 * of ctr mode encryption/decryption for improved performance
 499	 * aes_set_key_common() ensures that key length is one of
 500	 * {128,192,256}
 501	 */
 502	if (ctx->key_length == AES_KEYSIZE_128)
 503		aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
 504	else if (ctx->key_length == AES_KEYSIZE_192)
 505		aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
 506	else
 507		aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
 
 
 
 508}
 509
 510static int ctr_crypt(struct skcipher_request *req)
 511{
 512	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 513	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 514	u8 keystream[AES_BLOCK_SIZE];
 515	struct skcipher_walk walk;
 516	unsigned int nbytes;
 517	int err;
 518
 519	err = skcipher_walk_virt(&walk, req, false);
 520
 521	while ((nbytes = walk.nbytes) > 0) {
 522		kernel_fpu_begin();
 523		if (nbytes & AES_BLOCK_MASK)
 524			static_call(aesni_ctr_enc_tfm)(ctx, walk.dst.virt.addr,
 525						       walk.src.virt.addr,
 526						       nbytes & AES_BLOCK_MASK,
 527						       walk.iv);
 528		nbytes &= ~AES_BLOCK_MASK;
 529
 530		if (walk.nbytes == walk.total && nbytes > 0) {
 531			aesni_enc(ctx, keystream, walk.iv);
 532			crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes - nbytes,
 533				       walk.src.virt.addr + walk.nbytes - nbytes,
 534				       keystream, nbytes);
 535			crypto_inc(walk.iv, AES_BLOCK_SIZE);
 536			nbytes = 0;
 537		}
 538		kernel_fpu_end();
 539		err = skcipher_walk_done(&walk, nbytes);
 540	}
 541	return err;
 542}
 543
 544static void aesni_xctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
 545				   const u8 *in, unsigned int len, u8 *iv,
 546				   unsigned int byte_ctr)
 547{
 548	if (ctx->key_length == AES_KEYSIZE_128)
 549		aes_xctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len,
 550					 byte_ctr);
 551	else if (ctx->key_length == AES_KEYSIZE_192)
 552		aes_xctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len,
 553					 byte_ctr);
 554	else
 555		aes_xctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len,
 556					 byte_ctr);
 557}
 558
 559static int xctr_crypt(struct skcipher_request *req)
 560{
 561	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 562	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 563	u8 keystream[AES_BLOCK_SIZE];
 564	struct skcipher_walk walk;
 565	unsigned int nbytes;
 566	unsigned int byte_ctr = 0;
 567	int err;
 568	__le32 block[AES_BLOCK_SIZE / sizeof(__le32)];
 569
 570	err = skcipher_walk_virt(&walk, req, false);
 571
 572	while ((nbytes = walk.nbytes) > 0) {
 573		kernel_fpu_begin();
 574		if (nbytes & AES_BLOCK_MASK)
 575			aesni_xctr_enc_avx_tfm(ctx, walk.dst.virt.addr,
 576				walk.src.virt.addr, nbytes & AES_BLOCK_MASK,
 577				walk.iv, byte_ctr);
 578		nbytes &= ~AES_BLOCK_MASK;
 579		byte_ctr += walk.nbytes - nbytes;
 580
 581		if (walk.nbytes == walk.total && nbytes > 0) {
 582			memcpy(block, walk.iv, AES_BLOCK_SIZE);
 583			block[0] ^= cpu_to_le32(1 + byte_ctr / AES_BLOCK_SIZE);
 584			aesni_enc(ctx, keystream, (u8 *)block);
 585			crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes -
 586				       nbytes, walk.src.virt.addr + walk.nbytes
 587				       - nbytes, keystream, nbytes);
 588			byte_ctr += nbytes;
 589			nbytes = 0;
 590		}
 591		kernel_fpu_end();
 592		err = skcipher_walk_done(&walk, nbytes);
 593	}
 594	return err;
 595}
 596
 597static int
 598rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 599{
 600	struct crypto_aes_ctx ctx;
 601	int ret;
 602
 603	ret = aes_expandkey(&ctx, key, key_len);
 604	if (ret)
 605		return ret;
 
 
 
 606
 607	/* Clear the data in the hash sub key container to zero.*/
 608	/* We want to cipher all zeros to create the hash sub key. */
 609	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 610
 611	aes_encrypt(&ctx, hash_subkey, hash_subkey);
 
 
 
 612
 613	memzero_explicit(&ctx, sizeof(ctx));
 
 
 
 614	return 0;
 615}
 616
 617static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
 618				  unsigned int key_len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 619{
 620	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
 621
 622	if (key_len < 4)
 623		return -EINVAL;
 
 
 
 
 
 624
 625	/*Account for 4 byte nonce at the end.*/
 626	key_len -= 4;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 627
 628	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
 
 
 
 629
 630	return aes_set_key_common(crypto_aead_tfm(aead),
 631				  &ctx->aes_key_expanded, key, key_len) ?:
 632	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
 
 
 
 633}
 634
 635/* This is the Integrity Check Value (aka the authentication tag) length and can
 636 * be 8, 12 or 16 bytes long. */
 637static int common_rfc4106_set_authsize(struct crypto_aead *aead,
 638				       unsigned int authsize)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 639{
 640	switch (authsize) {
 641	case 8:
 642	case 12:
 643	case 16:
 644		break;
 645	default:
 646		return -EINVAL;
 647	}
 648
 
 
 
 
 
 649	return 0;
 650}
 651
 652static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
 653				       unsigned int authsize)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 654{
 655	switch (authsize) {
 656	case 4:
 657	case 8:
 658	case 12:
 659	case 13:
 660	case 14:
 661	case 15:
 662	case 16:
 663		break;
 664	default:
 665		return -EINVAL;
 666	}
 667
 
 
 
 
 
 668	return 0;
 669}
 670
 671static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
 672			      unsigned int assoclen, u8 *hash_subkey,
 673			      u8 *iv, void *aes_ctx, u8 *auth_tag,
 674			      unsigned long auth_tag_len)
 675{
 676	u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
 677	struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
 678	unsigned long left = req->cryptlen;
 679	struct scatter_walk assoc_sg_walk;
 680	struct skcipher_walk walk;
 681	bool do_avx, do_avx2;
 682	u8 *assocmem = NULL;
 683	u8 *assoc;
 684	int err;
 
 
 
 
 
 
 
 
 
 
 
 685
 686	if (!enc)
 687		left -= auth_tag_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 688
 689	do_avx = (left >= AVX_GEN2_OPTSIZE);
 690	do_avx2 = (left >= AVX_GEN4_OPTSIZE);
 
 
 
 
 
 
 
 691
 692	/* Linearize assoc, if not already linear */
 693	if (req->src->length >= assoclen && req->src->length) {
 694		scatterwalk_start(&assoc_sg_walk, req->src);
 695		assoc = scatterwalk_map(&assoc_sg_walk);
 696	} else {
 697		gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 698			      GFP_KERNEL : GFP_ATOMIC;
 699
 700		/* assoc can be any length, so must be on heap */
 701		assocmem = kmalloc(assoclen, flags);
 702		if (unlikely(!assocmem))
 703			return -ENOMEM;
 704		assoc = assocmem;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 705
 706		scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 707	}
 708
 709	kernel_fpu_begin();
 710	if (static_branch_likely(&gcm_use_avx2) && do_avx2)
 711		aesni_gcm_init_avx_gen4(aes_ctx, data, iv, hash_subkey, assoc,
 712					assoclen);
 713	else if (static_branch_likely(&gcm_use_avx) && do_avx)
 714		aesni_gcm_init_avx_gen2(aes_ctx, data, iv, hash_subkey, assoc,
 715					assoclen);
 716	else
 717		aesni_gcm_init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
 718	kernel_fpu_end();
 719
 720	if (!assocmem)
 721		scatterwalk_unmap(assoc);
 722	else
 723		kfree(assocmem);
 
 724
 725	err = enc ? skcipher_walk_aead_encrypt(&walk, req, false)
 726		  : skcipher_walk_aead_decrypt(&walk, req, false);
 
 
 727
 728	while (walk.nbytes > 0) {
 
 
 
 729		kernel_fpu_begin();
 730		if (static_branch_likely(&gcm_use_avx2) && do_avx2) {
 731			if (enc)
 732				aesni_gcm_enc_update_avx_gen4(aes_ctx, data,
 733							      walk.dst.virt.addr,
 734							      walk.src.virt.addr,
 735							      walk.nbytes);
 736			else
 737				aesni_gcm_dec_update_avx_gen4(aes_ctx, data,
 738							      walk.dst.virt.addr,
 739							      walk.src.virt.addr,
 740							      walk.nbytes);
 741		} else if (static_branch_likely(&gcm_use_avx) && do_avx) {
 742			if (enc)
 743				aesni_gcm_enc_update_avx_gen2(aes_ctx, data,
 744							      walk.dst.virt.addr,
 745							      walk.src.virt.addr,
 746							      walk.nbytes);
 747			else
 748				aesni_gcm_dec_update_avx_gen2(aes_ctx, data,
 749							      walk.dst.virt.addr,
 750							      walk.src.virt.addr,
 751							      walk.nbytes);
 752		} else if (enc) {
 753			aesni_gcm_enc_update(aes_ctx, data, walk.dst.virt.addr,
 754					     walk.src.virt.addr, walk.nbytes);
 755		} else {
 756			aesni_gcm_dec_update(aes_ctx, data, walk.dst.virt.addr,
 757					     walk.src.virt.addr, walk.nbytes);
 758		}
 759		kernel_fpu_end();
 760
 761		err = skcipher_walk_done(&walk, 0);
 762	}
 
 
 
 
 
 
 
 
 
 
 
 763
 764	if (err)
 765		return err;
 766
 767	kernel_fpu_begin();
 768	if (static_branch_likely(&gcm_use_avx2) && do_avx2)
 769		aesni_gcm_finalize_avx_gen4(aes_ctx, data, auth_tag,
 770					    auth_tag_len);
 771	else if (static_branch_likely(&gcm_use_avx) && do_avx)
 772		aesni_gcm_finalize_avx_gen2(aes_ctx, data, auth_tag,
 773					    auth_tag_len);
 774	else
 775		aesni_gcm_finalize(aes_ctx, data, auth_tag, auth_tag_len);
 776	kernel_fpu_end();
 777
 
 
 
 
 
 
 
 
 
 
 778	return 0;
 779}
 780
 781static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
 782			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
 783{
 
 784	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 785	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
 786	u8 auth_tag[16];
 787	int err;
 788
 789	err = gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, aes_ctx,
 790				 auth_tag, auth_tag_len);
 791	if (err)
 792		return err;
 793
 794	scatterwalk_map_and_copy(auth_tag, req->dst,
 795				 req->assoclen + req->cryptlen,
 796				 auth_tag_len, 1);
 797	return 0;
 
 
 
 
 798}
 799
 800static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
 801			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
 802{
 
 803	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 804	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
 805	u8 auth_tag_msg[16];
 806	u8 auth_tag[16];
 807	int err;
 808
 809	err = gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, aes_ctx,
 810				 auth_tag, auth_tag_len);
 811	if (err)
 812		return err;
 813
 814	/* Copy out original auth_tag */
 815	scatterwalk_map_and_copy(auth_tag_msg, req->src,
 816				 req->assoclen + req->cryptlen - auth_tag_len,
 817				 auth_tag_len, 0);
 818
 819	/* Compare generated tag with passed in tag. */
 820	if (crypto_memneq(auth_tag_msg, auth_tag, auth_tag_len)) {
 821		memzero_explicit(auth_tag, sizeof(auth_tag));
 822		return -EBADMSG;
 
 
 
 
 
 
 
 
 823	}
 824	return 0;
 825}
 826
 827static int helper_rfc4106_encrypt(struct aead_request *req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 828{
 
 
 
 829	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 830	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 831	void *aes_ctx = &(ctx->aes_key_expanded);
 832	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
 833	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
 
 
 
 
 834	unsigned int i;
 835	__be32 counter = cpu_to_be32(1);
 836
 837	/* Assuming we are supporting rfc4106 64-bit extended */
 838	/* sequence numbers We need to have the AAD length equal */
 839	/* to 16 or 20 bytes */
 840	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
 841		return -EINVAL;
 842
 843	/* IV below built */
 844	for (i = 0; i < 4; i++)
 845		*(iv+i) = ctx->nonce[i];
 846	for (i = 0; i < 8; i++)
 847		*(iv+4+i) = req->iv[i];
 848	*((__be32 *)(iv+12)) = counter;
 849
 850	return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
 851			      aes_ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 852}
 853
 854static int helper_rfc4106_decrypt(struct aead_request *req)
 855{
 
 
 
 856	__be32 counter = cpu_to_be32(1);
 
 857	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 858	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 859	void *aes_ctx = &(ctx->aes_key_expanded);
 860	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
 861	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
 
 
 
 
 
 862	unsigned int i;
 863
 864	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
 
 865		return -EINVAL;
 866
 867	/* Assuming we are supporting rfc4106 64-bit extended */
 868	/* sequence numbers We need to have the AAD length */
 869	/* equal to 16 or 20 bytes */
 870
 
 871	/* IV below built */
 872	for (i = 0; i < 4; i++)
 873		*(iv+i) = ctx->nonce[i];
 874	for (i = 0; i < 8; i++)
 875		*(iv+4+i) = req->iv[i];
 876	*((__be32 *)(iv+12)) = counter;
 877
 878	return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
 879			      aes_ctx);
 880}
 881#endif
 882
 883static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
 884			    unsigned int keylen)
 885{
 886	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 887	int err;
 888
 889	err = xts_verify_key(tfm, key, keylen);
 890	if (err)
 891		return err;
 892
 893	keylen /= 2;
 894
 895	/* first half of xts-key is for crypt */
 896	err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
 897				 key, keylen);
 898	if (err)
 899		return err;
 900
 901	/* second half of xts-key is for tweak */
 902	return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
 903				  key + keylen, keylen);
 904}
 905
 906static int xts_crypt(struct skcipher_request *req, bool encrypt)
 907{
 908	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 909	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 910	int tail = req->cryptlen % AES_BLOCK_SIZE;
 911	struct skcipher_request subreq;
 912	struct skcipher_walk walk;
 913	int err;
 914
 915	if (req->cryptlen < AES_BLOCK_SIZE)
 916		return -EINVAL;
 917
 918	err = skcipher_walk_virt(&walk, req, false);
 919	if (!walk.nbytes)
 920		return err;
 921
 922	if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
 923		int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
 924
 925		skcipher_walk_abort(&walk);
 926
 927		skcipher_request_set_tfm(&subreq, tfm);
 928		skcipher_request_set_callback(&subreq,
 929					      skcipher_request_flags(req),
 930					      NULL, NULL);
 931		skcipher_request_set_crypt(&subreq, req->src, req->dst,
 932					   blocks * AES_BLOCK_SIZE, req->iv);
 933		req = &subreq;
 934
 935		err = skcipher_walk_virt(&walk, req, false);
 936		if (!walk.nbytes)
 937			return err;
 938	} else {
 939		tail = 0;
 940	}
 941
 942	kernel_fpu_begin();
 943
 944	/* calculate first value of T */
 945	aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);
 946
 947	while (walk.nbytes > 0) {
 948		int nbytes = walk.nbytes;
 949
 950		if (nbytes < walk.total)
 951			nbytes &= ~(AES_BLOCK_SIZE - 1);
 952
 953		if (encrypt)
 954			aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
 955					  walk.dst.virt.addr, walk.src.virt.addr,
 956					  nbytes, walk.iv);
 957		else
 958			aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
 959					  walk.dst.virt.addr, walk.src.virt.addr,
 960					  nbytes, walk.iv);
 961		kernel_fpu_end();
 962
 963		err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
 964
 965		if (walk.nbytes > 0)
 966			kernel_fpu_begin();
 967	}
 968
 969	if (unlikely(tail > 0 && !err)) {
 970		struct scatterlist sg_src[2], sg_dst[2];
 971		struct scatterlist *src, *dst;
 972
 973		dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
 974		if (req->dst != req->src)
 975			dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
 976
 977		skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
 978					   req->iv);
 979
 980		err = skcipher_walk_virt(&walk, &subreq, false);
 981		if (err)
 982			return err;
 983
 984		kernel_fpu_begin();
 985		if (encrypt)
 986			aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
 987					  walk.dst.virt.addr, walk.src.virt.addr,
 988					  walk.nbytes, walk.iv);
 989		else
 990			aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
 991					  walk.dst.virt.addr, walk.src.virt.addr,
 992					  walk.nbytes, walk.iv);
 993		kernel_fpu_end();
 994
 995		err = skcipher_walk_done(&walk, 0);
 
 
 
 
 
 
 
 
 
 
 
 996	}
 997	return err;
 998}
 999
1000static int xts_encrypt(struct skcipher_request *req)
1001{
1002	return xts_crypt(req, true);
1003}
1004
1005static int xts_decrypt(struct skcipher_request *req)
1006{
1007	return xts_crypt(req, false);
1008}
1009
1010static struct crypto_alg aesni_cipher_alg = {
1011	.cra_name		= "aes",
1012	.cra_driver_name	= "aes-aesni",
1013	.cra_priority		= 300,
1014	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
1015	.cra_blocksize		= AES_BLOCK_SIZE,
1016	.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
 
 
1017	.cra_module		= THIS_MODULE,
1018	.cra_u	= {
1019		.cipher	= {
1020			.cia_min_keysize	= AES_MIN_KEY_SIZE,
1021			.cia_max_keysize	= AES_MAX_KEY_SIZE,
1022			.cia_setkey		= aes_set_key,
1023			.cia_encrypt		= aesni_encrypt,
1024			.cia_decrypt		= aesni_decrypt
1025		}
1026	}
1027};
1028
1029static struct skcipher_alg aesni_skciphers[] = {
1030	{
1031		.base = {
1032			.cra_name		= "__ecb(aes)",
1033			.cra_driver_name	= "__ecb-aes-aesni",
1034			.cra_priority		= 400,
1035			.cra_flags		= CRYPTO_ALG_INTERNAL,
1036			.cra_blocksize		= AES_BLOCK_SIZE,
1037			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1038			.cra_module		= THIS_MODULE,
1039		},
1040		.min_keysize	= AES_MIN_KEY_SIZE,
1041		.max_keysize	= AES_MAX_KEY_SIZE,
1042		.setkey		= aesni_skcipher_setkey,
1043		.encrypt	= ecb_encrypt,
1044		.decrypt	= ecb_decrypt,
1045	}, {
1046		.base = {
1047			.cra_name		= "__cbc(aes)",
1048			.cra_driver_name	= "__cbc-aes-aesni",
1049			.cra_priority		= 400,
1050			.cra_flags		= CRYPTO_ALG_INTERNAL,
1051			.cra_blocksize		= AES_BLOCK_SIZE,
1052			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1053			.cra_module		= THIS_MODULE,
1054		},
1055		.min_keysize	= AES_MIN_KEY_SIZE,
1056		.max_keysize	= AES_MAX_KEY_SIZE,
1057		.ivsize		= AES_BLOCK_SIZE,
1058		.setkey		= aesni_skcipher_setkey,
1059		.encrypt	= cbc_encrypt,
1060		.decrypt	= cbc_decrypt,
1061	}, {
1062		.base = {
1063			.cra_name		= "__cts(cbc(aes))",
1064			.cra_driver_name	= "__cts-cbc-aes-aesni",
1065			.cra_priority		= 400,
1066			.cra_flags		= CRYPTO_ALG_INTERNAL,
1067			.cra_blocksize		= AES_BLOCK_SIZE,
1068			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1069			.cra_module		= THIS_MODULE,
1070		},
1071		.min_keysize	= AES_MIN_KEY_SIZE,
1072		.max_keysize	= AES_MAX_KEY_SIZE,
1073		.ivsize		= AES_BLOCK_SIZE,
1074		.walksize	= 2 * AES_BLOCK_SIZE,
1075		.setkey		= aesni_skcipher_setkey,
1076		.encrypt	= cts_cbc_encrypt,
1077		.decrypt	= cts_cbc_decrypt,
1078#ifdef CONFIG_X86_64
1079	}, {
1080		.base = {
1081			.cra_name		= "__ctr(aes)",
1082			.cra_driver_name	= "__ctr-aes-aesni",
1083			.cra_priority		= 400,
1084			.cra_flags		= CRYPTO_ALG_INTERNAL,
1085			.cra_blocksize		= 1,
1086			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1087			.cra_module		= THIS_MODULE,
1088		},
1089		.min_keysize	= AES_MIN_KEY_SIZE,
1090		.max_keysize	= AES_MAX_KEY_SIZE,
1091		.ivsize		= AES_BLOCK_SIZE,
1092		.chunksize	= AES_BLOCK_SIZE,
1093		.setkey		= aesni_skcipher_setkey,
1094		.encrypt	= ctr_crypt,
1095		.decrypt	= ctr_crypt,
1096#endif
1097	}, {
1098		.base = {
1099			.cra_name		= "__xts(aes)",
1100			.cra_driver_name	= "__xts-aes-aesni",
1101			.cra_priority		= 401,
1102			.cra_flags		= CRYPTO_ALG_INTERNAL,
1103			.cra_blocksize		= AES_BLOCK_SIZE,
1104			.cra_ctxsize		= XTS_AES_CTX_SIZE,
1105			.cra_module		= THIS_MODULE,
1106		},
1107		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
1108		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
1109		.ivsize		= AES_BLOCK_SIZE,
1110		.walksize	= 2 * AES_BLOCK_SIZE,
1111		.setkey		= xts_aesni_setkey,
1112		.encrypt	= xts_encrypt,
1113		.decrypt	= xts_decrypt,
1114	}
1115};
1116
1117static
1118struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1119
1120#ifdef CONFIG_X86_64
1121/*
1122 * XCTR does not have a non-AVX implementation, so it must be enabled
1123 * conditionally.
1124 */
1125static struct skcipher_alg aesni_xctr = {
1126	.base = {
1127		.cra_name		= "__xctr(aes)",
1128		.cra_driver_name	= "__xctr-aes-aesni",
1129		.cra_priority		= 400,
1130		.cra_flags		= CRYPTO_ALG_INTERNAL,
1131		.cra_blocksize		= 1,
1132		.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1133		.cra_module		= THIS_MODULE,
1134	},
1135	.min_keysize	= AES_MIN_KEY_SIZE,
1136	.max_keysize	= AES_MAX_KEY_SIZE,
1137	.ivsize		= AES_BLOCK_SIZE,
1138	.chunksize	= AES_BLOCK_SIZE,
1139	.setkey		= aesni_skcipher_setkey,
1140	.encrypt	= xctr_crypt,
1141	.decrypt	= xctr_crypt,
1142};
1143
1144static struct simd_skcipher_alg *aesni_simd_xctr;
1145#endif /* CONFIG_X86_64 */
1146
1147#ifdef CONFIG_X86_64
1148static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1149				  unsigned int key_len)
1150{
1151	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
1152
1153	return aes_set_key_common(crypto_aead_tfm(aead),
1154				  &ctx->aes_key_expanded, key, key_len) ?:
1155	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1156}
1157
1158static int generic_gcmaes_encrypt(struct aead_request *req)
1159{
1160	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1161	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1162	void *aes_ctx = &(ctx->aes_key_expanded);
1163	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1164	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1165	__be32 counter = cpu_to_be32(1);
1166
1167	memcpy(iv, req->iv, 12);
1168	*((__be32 *)(iv+12)) = counter;
1169
1170	return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1171			      aes_ctx);
1172}
1173
1174static int generic_gcmaes_decrypt(struct aead_request *req)
1175{
1176	__be32 counter = cpu_to_be32(1);
1177	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1178	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1179	void *aes_ctx = &(ctx->aes_key_expanded);
1180	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1181	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1182
1183	memcpy(iv, req->iv, 12);
1184	*((__be32 *)(iv+12)) = counter;
1185
1186	return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1187			      aes_ctx);
1188}
1189
1190static struct aead_alg aesni_aeads[] = { {
1191	.setkey			= common_rfc4106_set_key,
1192	.setauthsize		= common_rfc4106_set_authsize,
1193	.encrypt		= helper_rfc4106_encrypt,
1194	.decrypt		= helper_rfc4106_decrypt,
1195	.ivsize			= GCM_RFC4106_IV_SIZE,
1196	.maxauthsize		= 16,
1197	.base = {
1198		.cra_name		= "__rfc4106(gcm(aes))",
1199		.cra_driver_name	= "__rfc4106-gcm-aesni",
1200		.cra_priority		= 400,
1201		.cra_flags		= CRYPTO_ALG_INTERNAL,
1202		.cra_blocksize		= 1,
1203		.cra_ctxsize		= sizeof(struct aesni_rfc4106_gcm_ctx),
1204		.cra_alignmask		= 0,
1205		.cra_module		= THIS_MODULE,
1206	},
1207}, {
1208	.setkey			= generic_gcmaes_set_key,
1209	.setauthsize		= generic_gcmaes_set_authsize,
1210	.encrypt		= generic_gcmaes_encrypt,
1211	.decrypt		= generic_gcmaes_decrypt,
1212	.ivsize			= GCM_AES_IV_SIZE,
1213	.maxauthsize		= 16,
1214	.base = {
1215		.cra_name		= "__gcm(aes)",
1216		.cra_driver_name	= "__generic-gcm-aesni",
1217		.cra_priority		= 400,
1218		.cra_flags		= CRYPTO_ALG_INTERNAL,
1219		.cra_blocksize		= 1,
1220		.cra_ctxsize		= sizeof(struct generic_gcmaes_ctx),
1221		.cra_alignmask		= 0,
1222		.cra_module		= THIS_MODULE,
1223	},
1224} };
1225#else
1226static struct aead_alg aesni_aeads[0];
1227#endif
1228
1229static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
1230
1231static const struct x86_cpu_id aesni_cpu_id[] = {
1232	X86_MATCH_FEATURE(X86_FEATURE_AES, NULL),
1233	{}
1234};
1235MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1236
1237static int __init aesni_init(void)
1238{
1239	int err;
1240
1241	if (!x86_match_cpu(aesni_cpu_id))
 
1242		return -ENODEV;
1243#ifdef CONFIG_X86_64
1244	if (boot_cpu_has(X86_FEATURE_AVX2)) {
1245		pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1246		static_branch_enable(&gcm_use_avx);
1247		static_branch_enable(&gcm_use_avx2);
1248	} else
1249	if (boot_cpu_has(X86_FEATURE_AVX)) {
1250		pr_info("AVX version of gcm_enc/dec engaged.\n");
1251		static_branch_enable(&gcm_use_avx);
1252	} else {
1253		pr_info("SSE version of gcm_enc/dec engaged.\n");
1254	}
1255	if (boot_cpu_has(X86_FEATURE_AVX)) {
1256		/* optimize performance of ctr mode encryption transform */
1257		static_call_update(aesni_ctr_enc_tfm, aesni_ctr_enc_avx_tfm);
1258		pr_info("AES CTR mode by8 optimization enabled\n");
1259	}
1260#endif /* CONFIG_X86_64 */
1261
1262	err = crypto_register_alg(&aesni_cipher_alg);
1263	if (err)
1264		return err;
1265
1266	err = simd_register_skciphers_compat(aesni_skciphers,
1267					     ARRAY_SIZE(aesni_skciphers),
1268					     aesni_simd_skciphers);
1269	if (err)
1270		goto unregister_cipher;
1271
1272	err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1273					 aesni_simd_aeads);
1274	if (err)
1275		goto unregister_skciphers;
1276
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1277#ifdef CONFIG_X86_64
1278	if (boot_cpu_has(X86_FEATURE_AVX))
1279		err = simd_register_skciphers_compat(&aesni_xctr, 1,
1280						     &aesni_simd_xctr);
1281	if (err)
1282		goto unregister_aeads;
1283#endif /* CONFIG_X86_64 */
1284
1285	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1286
 
 
 
 
 
 
 
 
 
 
 
1287#ifdef CONFIG_X86_64
1288unregister_aeads:
1289	simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1290				aesni_simd_aeads);
1291#endif /* CONFIG_X86_64 */
1292
1293unregister_skciphers:
1294	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1295				  aesni_simd_skciphers);
1296unregister_cipher:
1297	crypto_unregister_alg(&aesni_cipher_alg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1298	return err;
1299}
1300
1301static void __exit aesni_exit(void)
1302{
1303	simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1304			      aesni_simd_aeads);
1305	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1306				  aesni_simd_skciphers);
1307	crypto_unregister_alg(&aesni_cipher_alg);
 
 
 
 
1308#ifdef CONFIG_X86_64
1309	if (boot_cpu_has(X86_FEATURE_AVX))
1310		simd_unregister_skciphers(&aesni_xctr, 1, &aesni_simd_xctr);
1311#endif /* CONFIG_X86_64 */
 
 
 
 
 
 
 
 
 
 
 
 
 
1312}
1313
1314late_initcall(aesni_init);
1315module_exit(aesni_exit);
1316
1317MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1318MODULE_LICENSE("GPL");
1319MODULE_ALIAS_CRYPTO("aes");
v3.1
 
   1/*
   2 * Support for Intel AES-NI instructions. This file contains glue
   3 * code, the real AES implementation is in intel-aes_asm.S.
   4 *
   5 * Copyright (C) 2008, Intel Corp.
   6 *    Author: Huang Ying <ying.huang@intel.com>
   7 *
   8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
   9 * interface for 64-bit kernels.
  10 *    Authors: Adrian Hoban <adrian.hoban@intel.com>
  11 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  12 *             Tadeusz Struk (tadeusz.struk@intel.com)
  13 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  14 *    Copyright (c) 2010, Intel Corporation.
  15 *
  16 * This program is free software; you can redistribute it and/or modify
  17 * it under the terms of the GNU General Public License as published by
  18 * the Free Software Foundation; either version 2 of the License, or
  19 * (at your option) any later version.
  20 */
  21
  22#include <linux/hardirq.h>
  23#include <linux/types.h>
  24#include <linux/crypto.h>
  25#include <linux/err.h>
  26#include <crypto/algapi.h>
  27#include <crypto/aes.h>
  28#include <crypto/cryptd.h>
  29#include <crypto/ctr.h>
  30#include <asm/i387.h>
  31#include <asm/aes.h>
 
 
 
  32#include <crypto/scatterwalk.h>
  33#include <crypto/internal/aead.h>
 
 
 
  34#include <linux/workqueue.h>
  35#include <linux/spinlock.h>
 
  36
  37#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
  38#define HAS_CTR
  39#endif
  40
  41#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
  42#define HAS_LRW
  43#endif
  44
  45#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
  46#define HAS_PCBC
  47#endif
  48
  49#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
  50#define HAS_XTS
  51#endif
  52
  53struct async_aes_ctx {
  54	struct cryptd_ablkcipher *cryptd_tfm;
  55};
  56
  57/* This data is stored at the end of the crypto_tfm struct.
  58 * It's a type of per "session" data storage location.
  59 * This needs to be 16 byte aligned.
  60 */
  61struct aesni_rfc4106_gcm_ctx {
  62	u8 hash_subkey[16];
  63	struct crypto_aes_ctx aes_key_expanded;
  64	u8 nonce[4];
  65	struct cryptd_aead *cryptd_tfm;
  66};
  67
  68struct aesni_gcm_set_hash_subkey_result {
  69	int err;
  70	struct completion completion;
  71};
  72
  73struct aesni_hash_subkey_req_data {
  74	u8 iv[16];
  75	struct aesni_gcm_set_hash_subkey_result result;
  76	struct scatterlist sg;
  77};
  78
  79#define AESNI_ALIGN	(16)
  80#define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE-1))
  81#define RFC4106_HASH_SUBKEY_SIZE 16
 
 
 
 
 
 
 
 
 
 
 
  82
  83asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
  84			     unsigned int key_len);
  85asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
  86			  const u8 *in);
  87asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
  88			  const u8 *in);
  89asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
  90			      const u8 *in, unsigned int len);
  91asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
  92			      const u8 *in, unsigned int len);
  93asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
  94			      const u8 *in, unsigned int len, u8 *iv);
  95asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
  96			      const u8 *in, unsigned int len, u8 *iv);
 
 
 
 
  97
  98int crypto_fpu_init(void);
  99void crypto_fpu_exit(void);
 
 
 
 
 
 
 100
 101#ifdef CONFIG_X86_64
 
 102asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
 103			      const u8 *in, unsigned int len, u8 *iv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 104
 105/* asmlinkage void aesni_gcm_enc()
 106 * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
 107 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
 108 * const u8 *in, Plaintext input
 109 * unsigned long plaintext_len, Length of data in bytes for encryption.
 110 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
 111 *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
 112 *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
 113 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
 114 * const u8 *aad, Additional Authentication Data (AAD)
 115 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
 116 *          is going to be 8 or 12 bytes
 117 * u8 *auth_tag, Authenticated Tag output.
 118 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
 119 *          Valid values are 16 (most likely), 12 or 8.
 120 */
 121asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
 122			const u8 *in, unsigned long plaintext_len, u8 *iv,
 123			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 124			u8 *auth_tag, unsigned long auth_tag_len);
 125
 126/* asmlinkage void aesni_gcm_dec()
 127 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
 128 * u8 *out, Plaintext output. Decrypt in-place is allowed.
 129 * const u8 *in, Ciphertext input
 130 * unsigned long ciphertext_len, Length of data in bytes for decryption.
 131 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
 132 *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
 133 *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
 134 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
 135 * const u8 *aad, Additional Authentication Data (AAD)
 136 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
 137 * to be 8 or 12 bytes
 138 * u8 *auth_tag, Authenticated Tag output.
 139 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
 140 * Valid values are 16 (most likely), 12 or 8.
 
 
 141 */
 142asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
 143			const u8 *in, unsigned long ciphertext_len, u8 *iv,
 144			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 145			u8 *auth_tag, unsigned long auth_tag_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 146
 147static inline struct
 148aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
 149{
 150	return
 151		(struct aesni_rfc4106_gcm_ctx *)
 152		PTR_ALIGN((u8 *)
 153		crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
 
 
 
 
 
 
 
 
 
 
 
 154}
 155#endif
 156
 157static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
 158{
 159	unsigned long addr = (unsigned long)raw_ctx;
 160	unsigned long align = AESNI_ALIGN;
 161
 162	if (align <= crypto_tfm_ctx_alignment())
 163		align = 1;
 164	return (struct crypto_aes_ctx *)ALIGN(addr, align);
 165}
 166
 167static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
 168			      const u8 *in_key, unsigned int key_len)
 169{
 170	struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
 171	u32 *flags = &tfm->crt_flags;
 172	int err;
 173
 174	if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
 175	    key_len != AES_KEYSIZE_256) {
 176		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 177		return -EINVAL;
 178	}
 179
 180	if (!irq_fpu_usable())
 181		err = crypto_aes_expand_key(ctx, in_key, key_len);
 182	else {
 183		kernel_fpu_begin();
 184		err = aesni_set_key(ctx, in_key, key_len);
 185		kernel_fpu_end();
 186	}
 187
 188	return err;
 189}
 190
 191static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 192		       unsigned int key_len)
 193{
 194	return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
 195}
 196
 197static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 198{
 199	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 200
 201	if (!irq_fpu_usable())
 202		crypto_aes_encrypt_x86(ctx, dst, src);
 203	else {
 204		kernel_fpu_begin();
 205		aesni_enc(ctx, dst, src);
 206		kernel_fpu_end();
 207	}
 208}
 209
 210static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 211{
 212	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 213
 214	if (!irq_fpu_usable())
 215		crypto_aes_decrypt_x86(ctx, dst, src);
 216	else {
 217		kernel_fpu_begin();
 218		aesni_dec(ctx, dst, src);
 219		kernel_fpu_end();
 220	}
 221}
 222
 223static struct crypto_alg aesni_alg = {
 224	.cra_name		= "aes",
 225	.cra_driver_name	= "aes-aesni",
 226	.cra_priority		= 300,
 227	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
 228	.cra_blocksize		= AES_BLOCK_SIZE,
 229	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
 230	.cra_alignmask		= 0,
 231	.cra_module		= THIS_MODULE,
 232	.cra_list		= LIST_HEAD_INIT(aesni_alg.cra_list),
 233	.cra_u	= {
 234		.cipher	= {
 235			.cia_min_keysize	= AES_MIN_KEY_SIZE,
 236			.cia_max_keysize	= AES_MAX_KEY_SIZE,
 237			.cia_setkey		= aes_set_key,
 238			.cia_encrypt		= aes_encrypt,
 239			.cia_decrypt		= aes_decrypt
 240		}
 241	}
 242};
 243
 244static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 245{
 246	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 247
 248	aesni_enc(ctx, dst, src);
 249}
 250
 251static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 252{
 253	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 254
 255	aesni_dec(ctx, dst, src);
 256}
 257
 258static struct crypto_alg __aesni_alg = {
 259	.cra_name		= "__aes-aesni",
 260	.cra_driver_name	= "__driver-aes-aesni",
 261	.cra_priority		= 0,
 262	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
 263	.cra_blocksize		= AES_BLOCK_SIZE,
 264	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
 265	.cra_alignmask		= 0,
 266	.cra_module		= THIS_MODULE,
 267	.cra_list		= LIST_HEAD_INIT(__aesni_alg.cra_list),
 268	.cra_u	= {
 269		.cipher	= {
 270			.cia_min_keysize	= AES_MIN_KEY_SIZE,
 271			.cia_max_keysize	= AES_MAX_KEY_SIZE,
 272			.cia_setkey		= aes_set_key,
 273			.cia_encrypt		= __aes_encrypt,
 274			.cia_decrypt		= __aes_decrypt
 275		}
 276	}
 277};
 278
 279static int ecb_encrypt(struct blkcipher_desc *desc,
 280		       struct scatterlist *dst, struct scatterlist *src,
 281		       unsigned int nbytes)
 282{
 283	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 284	struct blkcipher_walk walk;
 
 
 285	int err;
 286
 287	blkcipher_walk_init(&walk, dst, src, nbytes);
 288	err = blkcipher_walk_virt(desc, &walk);
 289	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 290
 291	kernel_fpu_begin();
 292	while ((nbytes = walk.nbytes)) {
 
 293		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 294			      nbytes & AES_BLOCK_MASK);
 
 295		nbytes &= AES_BLOCK_SIZE - 1;
 296		err = blkcipher_walk_done(desc, &walk, nbytes);
 297	}
 298	kernel_fpu_end();
 299
 300	return err;
 301}
 302
 303static int ecb_decrypt(struct blkcipher_desc *desc,
 304		       struct scatterlist *dst, struct scatterlist *src,
 305		       unsigned int nbytes)
 306{
 307	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 308	struct blkcipher_walk walk;
 
 
 309	int err;
 310
 311	blkcipher_walk_init(&walk, dst, src, nbytes);
 312	err = blkcipher_walk_virt(desc, &walk);
 313	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 314
 315	kernel_fpu_begin();
 316	while ((nbytes = walk.nbytes)) {
 
 317		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 318			      nbytes & AES_BLOCK_MASK);
 
 319		nbytes &= AES_BLOCK_SIZE - 1;
 320		err = blkcipher_walk_done(desc, &walk, nbytes);
 321	}
 322	kernel_fpu_end();
 323
 324	return err;
 325}
 326
 327static struct crypto_alg blk_ecb_alg = {
 328	.cra_name		= "__ecb-aes-aesni",
 329	.cra_driver_name	= "__driver-ecb-aes-aesni",
 330	.cra_priority		= 0,
 331	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
 332	.cra_blocksize		= AES_BLOCK_SIZE,
 333	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
 334	.cra_alignmask		= 0,
 335	.cra_type		= &crypto_blkcipher_type,
 336	.cra_module		= THIS_MODULE,
 337	.cra_list		= LIST_HEAD_INIT(blk_ecb_alg.cra_list),
 338	.cra_u = {
 339		.blkcipher = {
 340			.min_keysize	= AES_MIN_KEY_SIZE,
 341			.max_keysize	= AES_MAX_KEY_SIZE,
 342			.setkey		= aes_set_key,
 343			.encrypt	= ecb_encrypt,
 344			.decrypt	= ecb_decrypt,
 345		},
 346	},
 347};
 348
 349static int cbc_encrypt(struct blkcipher_desc *desc,
 350		       struct scatterlist *dst, struct scatterlist *src,
 351		       unsigned int nbytes)
 352{
 353	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 354	struct blkcipher_walk walk;
 
 
 355	int err;
 356
 357	blkcipher_walk_init(&walk, dst, src, nbytes);
 358	err = blkcipher_walk_virt(desc, &walk);
 359	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 360
 361	kernel_fpu_begin();
 362	while ((nbytes = walk.nbytes)) {
 
 363		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 364			      nbytes & AES_BLOCK_MASK, walk.iv);
 
 365		nbytes &= AES_BLOCK_SIZE - 1;
 366		err = blkcipher_walk_done(desc, &walk, nbytes);
 367	}
 368	kernel_fpu_end();
 369
 370	return err;
 371}
 372
 373static int cbc_decrypt(struct blkcipher_desc *desc,
 374		       struct scatterlist *dst, struct scatterlist *src,
 375		       unsigned int nbytes)
 376{
 377	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 378	struct blkcipher_walk walk;
 
 
 379	int err;
 380
 381	blkcipher_walk_init(&walk, dst, src, nbytes);
 382	err = blkcipher_walk_virt(desc, &walk);
 383	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 384
 385	kernel_fpu_begin();
 386	while ((nbytes = walk.nbytes)) {
 
 387		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 388			      nbytes & AES_BLOCK_MASK, walk.iv);
 
 389		nbytes &= AES_BLOCK_SIZE - 1;
 390		err = blkcipher_walk_done(desc, &walk, nbytes);
 391	}
 392	kernel_fpu_end();
 393
 394	return err;
 395}
 396
 397static struct crypto_alg blk_cbc_alg = {
 398	.cra_name		= "__cbc-aes-aesni",
 399	.cra_driver_name	= "__driver-cbc-aes-aesni",
 400	.cra_priority		= 0,
 401	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
 402	.cra_blocksize		= AES_BLOCK_SIZE,
 403	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
 404	.cra_alignmask		= 0,
 405	.cra_type		= &crypto_blkcipher_type,
 406	.cra_module		= THIS_MODULE,
 407	.cra_list		= LIST_HEAD_INIT(blk_cbc_alg.cra_list),
 408	.cra_u = {
 409		.blkcipher = {
 410			.min_keysize	= AES_MIN_KEY_SIZE,
 411			.max_keysize	= AES_MAX_KEY_SIZE,
 412			.setkey		= aes_set_key,
 413			.encrypt	= cbc_encrypt,
 414			.decrypt	= cbc_decrypt,
 415		},
 416	},
 417};
 418
 419#ifdef CONFIG_X86_64
 420static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
 421			    struct blkcipher_walk *walk)
 422{
 423	u8 *ctrblk = walk->iv;
 424	u8 keystream[AES_BLOCK_SIZE];
 425	u8 *src = walk->src.virt.addr;
 426	u8 *dst = walk->dst.virt.addr;
 427	unsigned int nbytes = walk->nbytes;
 428
 429	aesni_enc(ctx, keystream, ctrblk);
 430	crypto_xor(keystream, src, nbytes);
 431	memcpy(dst, keystream, nbytes);
 432	crypto_inc(ctrblk, AES_BLOCK_SIZE);
 433}
 434
 435static int ctr_crypt(struct blkcipher_desc *desc,
 436		     struct scatterlist *dst, struct scatterlist *src,
 437		     unsigned int nbytes)
 438{
 439	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
 440	struct blkcipher_walk walk;
 441	int err;
 442
 443	blkcipher_walk_init(&walk, dst, src, nbytes);
 444	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
 445	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 446
 447	kernel_fpu_begin();
 448	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
 449		aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 450			      nbytes & AES_BLOCK_MASK, walk.iv);
 451		nbytes &= AES_BLOCK_SIZE - 1;
 452		err = blkcipher_walk_done(desc, &walk, nbytes);
 453	}
 454	if (walk.nbytes) {
 455		ctr_crypt_final(ctx, &walk);
 456		err = blkcipher_walk_done(desc, &walk, 0);
 457	}
 458	kernel_fpu_end();
 459
 460	return err;
 461}
 462
 463static struct crypto_alg blk_ctr_alg = {
 464	.cra_name		= "__ctr-aes-aesni",
 465	.cra_driver_name	= "__driver-ctr-aes-aesni",
 466	.cra_priority		= 0,
 467	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
 468	.cra_blocksize		= 1,
 469	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
 470	.cra_alignmask		= 0,
 471	.cra_type		= &crypto_blkcipher_type,
 472	.cra_module		= THIS_MODULE,
 473	.cra_list		= LIST_HEAD_INIT(blk_ctr_alg.cra_list),
 474	.cra_u = {
 475		.blkcipher = {
 476			.min_keysize	= AES_MIN_KEY_SIZE,
 477			.max_keysize	= AES_MAX_KEY_SIZE,
 478			.ivsize		= AES_BLOCK_SIZE,
 479			.setkey		= aes_set_key,
 480			.encrypt	= ctr_crypt,
 481			.decrypt	= ctr_crypt,
 482		},
 483	},
 484};
 485#endif
 486
 487static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
 488			unsigned int key_len)
 489{
 490	struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 491	struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
 
 
 
 
 
 492	int err;
 493
 494	crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 495	crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
 496				    & CRYPTO_TFM_REQ_MASK);
 497	err = crypto_ablkcipher_setkey(child, key, key_len);
 498	crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
 499				    & CRYPTO_TFM_RES_MASK);
 500	return err;
 501}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 502
 503static int ablk_encrypt(struct ablkcipher_request *req)
 504{
 505	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 506	struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 507
 508	if (!irq_fpu_usable()) {
 509		struct ablkcipher_request *cryptd_req =
 510			ablkcipher_request_ctx(req);
 511		memcpy(cryptd_req, req, sizeof(*req));
 512		ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
 513		return crypto_ablkcipher_encrypt(cryptd_req);
 514	} else {
 515		struct blkcipher_desc desc;
 516		desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
 517		desc.info = req->info;
 518		desc.flags = 0;
 519		return crypto_blkcipher_crt(desc.tfm)->encrypt(
 520			&desc, req->dst, req->src, req->nbytes);
 521	}
 522}
 523
 524static int ablk_decrypt(struct ablkcipher_request *req)
 525{
 526	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 527	struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 528
 529	if (!irq_fpu_usable()) {
 530		struct ablkcipher_request *cryptd_req =
 531			ablkcipher_request_ctx(req);
 532		memcpy(cryptd_req, req, sizeof(*req));
 533		ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
 534		return crypto_ablkcipher_decrypt(cryptd_req);
 535	} else {
 536		struct blkcipher_desc desc;
 537		desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
 538		desc.info = req->info;
 539		desc.flags = 0;
 540		return crypto_blkcipher_crt(desc.tfm)->decrypt(
 541			&desc, req->dst, req->src, req->nbytes);
 542	}
 543}
 544
 545static void ablk_exit(struct crypto_tfm *tfm)
 546{
 547	struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 
 
 
 
 
 
 
 548
 549	cryptd_free_ablkcipher(ctx->cryptd_tfm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 550}
 551
 552static void ablk_init_common(struct crypto_tfm *tfm,
 553			     struct cryptd_ablkcipher *cryptd_tfm)
 554{
 555	struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 556
 557	ctx->cryptd_tfm = cryptd_tfm;
 558	tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
 559		crypto_ablkcipher_reqsize(&cryptd_tfm->base);
 
 
 
 
 
 560}
 561
 562static int ablk_ecb_init(struct crypto_tfm *tfm)
 563{
 564	struct cryptd_ablkcipher *cryptd_tfm;
 
 
 
 
 
 
 
 
 
 565
 566	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
 567	if (IS_ERR(cryptd_tfm))
 568		return PTR_ERR(cryptd_tfm);
 569	ablk_init_common(tfm, cryptd_tfm);
 570	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 571}
 572
 573static struct crypto_alg ablk_ecb_alg = {
 574	.cra_name		= "ecb(aes)",
 575	.cra_driver_name	= "ecb-aes-aesni",
 576	.cra_priority		= 400,
 577	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 578	.cra_blocksize		= AES_BLOCK_SIZE,
 579	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 580	.cra_alignmask		= 0,
 581	.cra_type		= &crypto_ablkcipher_type,
 582	.cra_module		= THIS_MODULE,
 583	.cra_list		= LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
 584	.cra_init		= ablk_ecb_init,
 585	.cra_exit		= ablk_exit,
 586	.cra_u = {
 587		.ablkcipher = {
 588			.min_keysize	= AES_MIN_KEY_SIZE,
 589			.max_keysize	= AES_MAX_KEY_SIZE,
 590			.setkey		= ablk_set_key,
 591			.encrypt	= ablk_encrypt,
 592			.decrypt	= ablk_decrypt,
 593		},
 594	},
 595};
 596
 597static int ablk_cbc_init(struct crypto_tfm *tfm)
 598{
 599	struct cryptd_ablkcipher *cryptd_tfm;
 
 600
 601	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
 602	if (IS_ERR(cryptd_tfm))
 603		return PTR_ERR(cryptd_tfm);
 604	ablk_init_common(tfm, cryptd_tfm);
 605	return 0;
 606}
 607
 608static struct crypto_alg ablk_cbc_alg = {
 609	.cra_name		= "cbc(aes)",
 610	.cra_driver_name	= "cbc-aes-aesni",
 611	.cra_priority		= 400,
 612	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 613	.cra_blocksize		= AES_BLOCK_SIZE,
 614	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 615	.cra_alignmask		= 0,
 616	.cra_type		= &crypto_ablkcipher_type,
 617	.cra_module		= THIS_MODULE,
 618	.cra_list		= LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
 619	.cra_init		= ablk_cbc_init,
 620	.cra_exit		= ablk_exit,
 621	.cra_u = {
 622		.ablkcipher = {
 623			.min_keysize	= AES_MIN_KEY_SIZE,
 624			.max_keysize	= AES_MAX_KEY_SIZE,
 625			.ivsize		= AES_BLOCK_SIZE,
 626			.setkey		= ablk_set_key,
 627			.encrypt	= ablk_encrypt,
 628			.decrypt	= ablk_decrypt,
 629		},
 630	},
 631};
 632
 633#ifdef CONFIG_X86_64
 634static int ablk_ctr_init(struct crypto_tfm *tfm)
 635{
 636	struct cryptd_ablkcipher *cryptd_tfm;
 637
 638	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
 639	if (IS_ERR(cryptd_tfm))
 640		return PTR_ERR(cryptd_tfm);
 641	ablk_init_common(tfm, cryptd_tfm);
 642	return 0;
 643}
 644
 645static struct crypto_alg ablk_ctr_alg = {
 646	.cra_name		= "ctr(aes)",
 647	.cra_driver_name	= "ctr-aes-aesni",
 648	.cra_priority		= 400,
 649	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 650	.cra_blocksize		= 1,
 651	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 652	.cra_alignmask		= 0,
 653	.cra_type		= &crypto_ablkcipher_type,
 654	.cra_module		= THIS_MODULE,
 655	.cra_list		= LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
 656	.cra_init		= ablk_ctr_init,
 657	.cra_exit		= ablk_exit,
 658	.cra_u = {
 659		.ablkcipher = {
 660			.min_keysize	= AES_MIN_KEY_SIZE,
 661			.max_keysize	= AES_MAX_KEY_SIZE,
 662			.ivsize		= AES_BLOCK_SIZE,
 663			.setkey		= ablk_set_key,
 664			.encrypt	= ablk_encrypt,
 665			.decrypt	= ablk_encrypt,
 666			.geniv		= "chainiv",
 667		},
 668	},
 669};
 670
 671#ifdef HAS_CTR
 672static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
 673{
 674	struct cryptd_ablkcipher *cryptd_tfm;
 675
 676	cryptd_tfm = cryptd_alloc_ablkcipher(
 677		"rfc3686(__driver-ctr-aes-aesni)", 0, 0);
 678	if (IS_ERR(cryptd_tfm))
 679		return PTR_ERR(cryptd_tfm);
 680	ablk_init_common(tfm, cryptd_tfm);
 681	return 0;
 682}
 683
 684static struct crypto_alg ablk_rfc3686_ctr_alg = {
 685	.cra_name		= "rfc3686(ctr(aes))",
 686	.cra_driver_name	= "rfc3686-ctr-aes-aesni",
 687	.cra_priority		= 400,
 688	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 689	.cra_blocksize		= 1,
 690	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 691	.cra_alignmask		= 0,
 692	.cra_type		= &crypto_ablkcipher_type,
 693	.cra_module		= THIS_MODULE,
 694	.cra_list		= LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
 695	.cra_init		= ablk_rfc3686_ctr_init,
 696	.cra_exit		= ablk_exit,
 697	.cra_u = {
 698		.ablkcipher = {
 699			.min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
 700			.max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
 701			.ivsize	     = CTR_RFC3686_IV_SIZE,
 702			.setkey	     = ablk_set_key,
 703			.encrypt     = ablk_encrypt,
 704			.decrypt     = ablk_decrypt,
 705			.geniv	     = "seqiv",
 706		},
 707	},
 708};
 709#endif
 710#endif
 711
 712#ifdef HAS_LRW
 713static int ablk_lrw_init(struct crypto_tfm *tfm)
 714{
 715	struct cryptd_ablkcipher *cryptd_tfm;
 716
 717	cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
 718					     0, 0);
 719	if (IS_ERR(cryptd_tfm))
 720		return PTR_ERR(cryptd_tfm);
 721	ablk_init_common(tfm, cryptd_tfm);
 722	return 0;
 723}
 724
 725static struct crypto_alg ablk_lrw_alg = {
 726	.cra_name		= "lrw(aes)",
 727	.cra_driver_name	= "lrw-aes-aesni",
 728	.cra_priority		= 400,
 729	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 730	.cra_blocksize		= AES_BLOCK_SIZE,
 731	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 732	.cra_alignmask		= 0,
 733	.cra_type		= &crypto_ablkcipher_type,
 734	.cra_module		= THIS_MODULE,
 735	.cra_list		= LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
 736	.cra_init		= ablk_lrw_init,
 737	.cra_exit		= ablk_exit,
 738	.cra_u = {
 739		.ablkcipher = {
 740			.min_keysize	= AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
 741			.max_keysize	= AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
 742			.ivsize		= AES_BLOCK_SIZE,
 743			.setkey		= ablk_set_key,
 744			.encrypt	= ablk_encrypt,
 745			.decrypt	= ablk_decrypt,
 746		},
 747	},
 748};
 749#endif
 750
 751#ifdef HAS_PCBC
 752static int ablk_pcbc_init(struct crypto_tfm *tfm)
 753{
 754	struct cryptd_ablkcipher *cryptd_tfm;
 
 
 
 
 
 
 
 755
 756	cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
 757					     0, 0);
 758	if (IS_ERR(cryptd_tfm))
 759		return PTR_ERR(cryptd_tfm);
 760	ablk_init_common(tfm, cryptd_tfm);
 761	return 0;
 762}
 763
 764static struct crypto_alg ablk_pcbc_alg = {
 765	.cra_name		= "pcbc(aes)",
 766	.cra_driver_name	= "pcbc-aes-aesni",
 767	.cra_priority		= 400,
 768	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 769	.cra_blocksize		= AES_BLOCK_SIZE,
 770	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 771	.cra_alignmask		= 0,
 772	.cra_type		= &crypto_ablkcipher_type,
 773	.cra_module		= THIS_MODULE,
 774	.cra_list		= LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
 775	.cra_init		= ablk_pcbc_init,
 776	.cra_exit		= ablk_exit,
 777	.cra_u = {
 778		.ablkcipher = {
 779			.min_keysize	= AES_MIN_KEY_SIZE,
 780			.max_keysize	= AES_MAX_KEY_SIZE,
 781			.ivsize		= AES_BLOCK_SIZE,
 782			.setkey		= ablk_set_key,
 783			.encrypt	= ablk_encrypt,
 784			.decrypt	= ablk_decrypt,
 785		},
 786	},
 787};
 788#endif
 789
 790#ifdef HAS_XTS
 791static int ablk_xts_init(struct crypto_tfm *tfm)
 792{
 793	struct cryptd_ablkcipher *cryptd_tfm;
 
 
 
 
 
 
 
 
 
 
 
 794
 795	cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
 796					     0, 0);
 797	if (IS_ERR(cryptd_tfm))
 798		return PTR_ERR(cryptd_tfm);
 799	ablk_init_common(tfm, cryptd_tfm);
 800	return 0;
 801}
 802
 803static struct crypto_alg ablk_xts_alg = {
 804	.cra_name		= "xts(aes)",
 805	.cra_driver_name	= "xts-aes-aesni",
 806	.cra_priority		= 400,
 807	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
 808	.cra_blocksize		= AES_BLOCK_SIZE,
 809	.cra_ctxsize		= sizeof(struct async_aes_ctx),
 810	.cra_alignmask		= 0,
 811	.cra_type		= &crypto_ablkcipher_type,
 812	.cra_module		= THIS_MODULE,
 813	.cra_list		= LIST_HEAD_INIT(ablk_xts_alg.cra_list),
 814	.cra_init		= ablk_xts_init,
 815	.cra_exit		= ablk_exit,
 816	.cra_u = {
 817		.ablkcipher = {
 818			.min_keysize	= 2 * AES_MIN_KEY_SIZE,
 819			.max_keysize	= 2 * AES_MAX_KEY_SIZE,
 820			.ivsize		= AES_BLOCK_SIZE,
 821			.setkey		= ablk_set_key,
 822			.encrypt	= ablk_encrypt,
 823			.decrypt	= ablk_decrypt,
 824		},
 825	},
 826};
 827#endif
 828
 829#ifdef CONFIG_X86_64
 830static int rfc4106_init(struct crypto_tfm *tfm)
 831{
 832	struct cryptd_aead *cryptd_tfm;
 833	struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
 834		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
 835	struct crypto_aead *cryptd_child;
 836	struct aesni_rfc4106_gcm_ctx *child_ctx;
 837	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
 838	if (IS_ERR(cryptd_tfm))
 839		return PTR_ERR(cryptd_tfm);
 840
 841	cryptd_child = cryptd_aead_child(cryptd_tfm);
 842	child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
 843	memcpy(child_ctx, ctx, sizeof(*ctx));
 844	ctx->cryptd_tfm = cryptd_tfm;
 845	tfm->crt_aead.reqsize = sizeof(struct aead_request)
 846		+ crypto_aead_reqsize(&cryptd_tfm->base);
 847	return 0;
 848}
 849
 850static void rfc4106_exit(struct crypto_tfm *tfm)
 851{
 852	struct aesni_rfc4106_gcm_ctx *ctx =
 853		(struct aesni_rfc4106_gcm_ctx *)
 854		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
 855	if (!IS_ERR(ctx->cryptd_tfm))
 856		cryptd_free_aead(ctx->cryptd_tfm);
 857	return;
 858}
 859
 860static void
 861rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
 862{
 863	struct aesni_gcm_set_hash_subkey_result *result = req->data;
 
 
 
 864
 865	if (err == -EINPROGRESS)
 866		return;
 867	result->err = err;
 868	complete(&result->completion);
 869}
 870
 871static int
 872rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
 873{
 874	struct crypto_ablkcipher *ctr_tfm;
 875	struct ablkcipher_request *req;
 876	int ret = -EINVAL;
 877	struct aesni_hash_subkey_req_data *req_data;
 878
 879	ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
 880	if (IS_ERR(ctr_tfm))
 881		return PTR_ERR(ctr_tfm);
 882
 883	crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
 884
 885	ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
 886	if (ret)
 887		goto out_free_ablkcipher;
 888
 889	ret = -ENOMEM;
 890	req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
 891	if (!req)
 892		goto out_free_ablkcipher;
 893
 894	req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
 895	if (!req_data)
 896		goto out_free_request;
 897
 898	memset(req_data->iv, 0, sizeof(req_data->iv));
 899
 900	/* Clear the data in the hash sub key container to zero.*/
 901	/* We want to cipher all zeros to create the hash sub key. */
 902	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
 903
 904	init_completion(&req_data->result.completion);
 905	sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
 906	ablkcipher_request_set_tfm(req, ctr_tfm);
 907	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
 908					CRYPTO_TFM_REQ_MAY_BACKLOG,
 909					rfc4106_set_hash_subkey_done,
 910					&req_data->result);
 911
 912	ablkcipher_request_set_crypt(req, &req_data->sg,
 913		&req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
 914
 915	ret = crypto_ablkcipher_encrypt(req);
 916	if (ret == -EINPROGRESS || ret == -EBUSY) {
 917		ret = wait_for_completion_interruptible
 918			(&req_data->result.completion);
 919		if (!ret)
 920			ret = req_data->result.err;
 921	}
 922	kfree(req_data);
 923out_free_request:
 924	ablkcipher_request_free(req);
 925out_free_ablkcipher:
 926	crypto_free_ablkcipher(ctr_tfm);
 927	return ret;
 928}
 929
 930static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
 931						   unsigned int key_len)
 932{
 933	int ret = 0;
 934	struct crypto_tfm *tfm = crypto_aead_tfm(parent);
 935	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
 936	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
 937	struct aesni_rfc4106_gcm_ctx *child_ctx =
 938                                 aesni_rfc4106_gcm_ctx_get(cryptd_child);
 939	u8 *new_key_mem = NULL;
 940
 941	if (key_len < 4) {
 942		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 943		return -EINVAL;
 944	}
 945	/*Account for 4 byte nonce at the end.*/
 946	key_len -= 4;
 947	if (key_len != AES_KEYSIZE_128) {
 948		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 949		return -EINVAL;
 950	}
 951
 952	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
 953	/*This must be on a 16 byte boundary!*/
 954	if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
 955		return -EINVAL;
 
 
 
 
 
 
 956
 957	if ((unsigned long)key % AESNI_ALIGN) {
 958		/*key is not aligned: use an auxuliar aligned pointer*/
 959		new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
 960		if (!new_key_mem)
 961			return -ENOMEM;
 962
 963		new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
 964		memcpy(new_key_mem, key, key_len);
 965		key = new_key_mem;
 966	}
 967
 968	if (!irq_fpu_usable())
 969		ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
 970		key, key_len);
 971	else {
 972		kernel_fpu_begin();
 973		ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 974		kernel_fpu_end();
 
 
 975	}
 976	/*This must be on a 16 byte boundary!*/
 977	if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
 978		ret = -EINVAL;
 979		goto exit;
 980	}
 981	ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
 982	memcpy(child_ctx, ctx, sizeof(*ctx));
 983exit:
 984	kfree(new_key_mem);
 985	return ret;
 986}
 987
 988/* This is the Integrity Check Value (aka the authentication tag length and can
 989 * be 8, 12 or 16 bytes long. */
 990static int rfc4106_set_authsize(struct crypto_aead *parent,
 991				unsigned int authsize)
 992{
 993	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
 994	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
 
 
 
 
 
 
 995
 996	switch (authsize) {
 997	case 8:
 998	case 12:
 999	case 16:
1000		break;
1001	default:
1002		return -EINVAL;
1003	}
1004	crypto_aead_crt(parent)->authsize = authsize;
1005	crypto_aead_crt(cryptd_child)->authsize = authsize;
1006	return 0;
1007}
1008
1009static int rfc4106_encrypt(struct aead_request *req)
 
1010{
1011	int ret;
1012	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1013	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 
 
1014
1015	if (!irq_fpu_usable()) {
1016		struct aead_request *cryptd_req =
1017			(struct aead_request *) aead_request_ctx(req);
1018		memcpy(cryptd_req, req, sizeof(*req));
1019		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1020		return crypto_aead_encrypt(cryptd_req);
1021	} else {
1022		struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1023		kernel_fpu_begin();
1024		ret = cryptd_child->base.crt_aead.encrypt(req);
1025		kernel_fpu_end();
1026		return ret;
1027	}
1028}
1029
1030static int rfc4106_decrypt(struct aead_request *req)
 
1031{
1032	int ret;
1033	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1034	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 
 
 
 
 
 
 
 
 
 
 
 
 
1035
1036	if (!irq_fpu_usable()) {
1037		struct aead_request *cryptd_req =
1038			(struct aead_request *) aead_request_ctx(req);
1039		memcpy(cryptd_req, req, sizeof(*req));
1040		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1041		return crypto_aead_decrypt(cryptd_req);
1042	} else {
1043		struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1044		kernel_fpu_begin();
1045		ret = cryptd_child->base.crt_aead.decrypt(req);
1046		kernel_fpu_end();
1047		return ret;
1048	}
 
1049}
1050
1051static struct crypto_alg rfc4106_alg = {
1052	.cra_name = "rfc4106(gcm(aes))",
1053	.cra_driver_name = "rfc4106-gcm-aesni",
1054	.cra_priority = 400,
1055	.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1056	.cra_blocksize = 1,
1057	.cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1058	.cra_alignmask = 0,
1059	.cra_type = &crypto_nivaead_type,
1060	.cra_module = THIS_MODULE,
1061	.cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
1062	.cra_init = rfc4106_init,
1063	.cra_exit = rfc4106_exit,
1064	.cra_u = {
1065		.aead = {
1066			.setkey = rfc4106_set_key,
1067			.setauthsize = rfc4106_set_authsize,
1068			.encrypt = rfc4106_encrypt,
1069			.decrypt = rfc4106_decrypt,
1070			.geniv = "seqiv",
1071			.ivsize = 8,
1072			.maxauthsize = 16,
1073		},
1074	},
1075};
1076
1077static int __driver_rfc4106_encrypt(struct aead_request *req)
1078{
1079	u8 one_entry_in_sg = 0;
1080	u8 *src, *dst, *assoc;
1081	__be32 counter = cpu_to_be32(1);
1082	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1083	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1084	void *aes_ctx = &(ctx->aes_key_expanded);
1085	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1086	u8 iv_tab[16+AESNI_ALIGN];
1087	u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1088	struct scatter_walk src_sg_walk;
1089	struct scatter_walk assoc_sg_walk;
1090	struct scatter_walk dst_sg_walk;
1091	unsigned int i;
 
1092
1093	/* Assuming we are supporting rfc4106 64-bit extended */
1094	/* sequence numbers We need to have the AAD length equal */
1095	/* to 8 or 12 bytes */
1096	if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1097		return -EINVAL;
 
1098	/* IV below built */
1099	for (i = 0; i < 4; i++)
1100		*(iv+i) = ctx->nonce[i];
1101	for (i = 0; i < 8; i++)
1102		*(iv+4+i) = req->iv[i];
1103	*((__be32 *)(iv+12)) = counter;
1104
1105	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1106		one_entry_in_sg = 1;
1107		scatterwalk_start(&src_sg_walk, req->src);
1108		scatterwalk_start(&assoc_sg_walk, req->assoc);
1109		src = scatterwalk_map(&src_sg_walk, 0);
1110		assoc = scatterwalk_map(&assoc_sg_walk, 0);
1111		dst = src;
1112		if (unlikely(req->src != req->dst)) {
1113			scatterwalk_start(&dst_sg_walk, req->dst);
1114			dst = scatterwalk_map(&dst_sg_walk, 0);
1115		}
1116
1117	} else {
1118		/* Allocate memory for src, dst, assoc */
1119		src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1120			GFP_ATOMIC);
1121		if (unlikely(!src))
1122			return -ENOMEM;
1123		assoc = (src + req->cryptlen + auth_tag_len);
1124		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1125		scatterwalk_map_and_copy(assoc, req->assoc, 0,
1126					req->assoclen, 0);
1127		dst = src;
1128	}
1129
1130	aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1131		ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1132		+ ((unsigned long)req->cryptlen), auth_tag_len);
1133
1134	/* The authTag (aka the Integrity Check Value) needs to be written
1135	 * back to the packet. */
1136	if (one_entry_in_sg) {
1137		if (unlikely(req->src != req->dst)) {
1138			scatterwalk_unmap(dst, 0);
1139			scatterwalk_done(&dst_sg_walk, 0, 0);
1140		}
1141		scatterwalk_unmap(src, 0);
1142		scatterwalk_unmap(assoc, 0);
1143		scatterwalk_done(&src_sg_walk, 0, 0);
1144		scatterwalk_done(&assoc_sg_walk, 0, 0);
1145	} else {
1146		scatterwalk_map_and_copy(dst, req->dst, 0,
1147			req->cryptlen + auth_tag_len, 1);
1148		kfree(src);
1149	}
1150	return 0;
1151}
1152
1153static int __driver_rfc4106_decrypt(struct aead_request *req)
1154{
1155	u8 one_entry_in_sg = 0;
1156	u8 *src, *dst, *assoc;
1157	unsigned long tempCipherLen = 0;
1158	__be32 counter = cpu_to_be32(1);
1159	int retval = 0;
1160	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1161	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1162	void *aes_ctx = &(ctx->aes_key_expanded);
1163	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1164	u8 iv_and_authTag[32+AESNI_ALIGN];
1165	u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1166	u8 *authTag = iv + 16;
1167	struct scatter_walk src_sg_walk;
1168	struct scatter_walk assoc_sg_walk;
1169	struct scatter_walk dst_sg_walk;
1170	unsigned int i;
1171
1172	if (unlikely((req->cryptlen < auth_tag_len) ||
1173		(req->assoclen != 8 && req->assoclen != 12)))
1174		return -EINVAL;
 
1175	/* Assuming we are supporting rfc4106 64-bit extended */
1176	/* sequence numbers We need to have the AAD length */
1177	/* equal to 8 or 12 bytes */
1178
1179	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1180	/* IV below built */
1181	for (i = 0; i < 4; i++)
1182		*(iv+i) = ctx->nonce[i];
1183	for (i = 0; i < 8; i++)
1184		*(iv+4+i) = req->iv[i];
1185	*((__be32 *)(iv+12)) = counter;
1186
1187	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1188		one_entry_in_sg = 1;
1189		scatterwalk_start(&src_sg_walk, req->src);
1190		scatterwalk_start(&assoc_sg_walk, req->assoc);
1191		src = scatterwalk_map(&src_sg_walk, 0);
1192		assoc = scatterwalk_map(&assoc_sg_walk, 0);
1193		dst = src;
1194		if (unlikely(req->src != req->dst)) {
1195			scatterwalk_start(&dst_sg_walk, req->dst);
1196			dst = scatterwalk_map(&dst_sg_walk, 0);
1197		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1198
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1199	} else {
1200		/* Allocate memory for src, dst, assoc */
1201		src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1202		if (!src)
1203			return -ENOMEM;
1204		assoc = (src + req->cryptlen + auth_tag_len);
1205		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1206		scatterwalk_map_and_copy(assoc, req->assoc, 0,
1207			req->assoclen, 0);
1208		dst = src;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1209	}
1210
1211	aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
1212		ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1213		authTag, auth_tag_len);
 
 
 
 
 
 
 
1214
1215	/* Compare generated tag with passed in tag. */
1216	retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
1217		-EBADMSG : 0;
 
 
 
 
 
 
 
 
 
 
 
1218
1219	if (one_entry_in_sg) {
1220		if (unlikely(req->src != req->dst)) {
1221			scatterwalk_unmap(dst, 0);
1222			scatterwalk_done(&dst_sg_walk, 0, 0);
1223		}
1224		scatterwalk_unmap(src, 0);
1225		scatterwalk_unmap(assoc, 0);
1226		scatterwalk_done(&src_sg_walk, 0, 0);
1227		scatterwalk_done(&assoc_sg_walk, 0, 0);
1228	} else {
1229		scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1230		kfree(src);
1231	}
1232	return retval;
 
 
 
 
 
 
 
 
 
 
1233}
1234
1235static struct crypto_alg __rfc4106_alg = {
1236	.cra_name		= "__gcm-aes-aesni",
1237	.cra_driver_name	= "__driver-gcm-aes-aesni",
1238	.cra_priority		= 0,
1239	.cra_flags		= CRYPTO_ALG_TYPE_AEAD,
1240	.cra_blocksize		= 1,
1241	.cra_ctxsize	= sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1242	.cra_alignmask		= 0,
1243	.cra_type		= &crypto_aead_type,
1244	.cra_module		= THIS_MODULE,
1245	.cra_list		= LIST_HEAD_INIT(__rfc4106_alg.cra_list),
1246	.cra_u = {
1247		.aead = {
1248			.encrypt	= __driver_rfc4106_encrypt,
1249			.decrypt	= __driver_rfc4106_decrypt,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1250		},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1251	},
 
 
 
 
 
 
 
1252};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1253#endif
1254
 
 
 
 
 
 
 
 
1255static int __init aesni_init(void)
1256{
1257	int err;
1258
1259	if (!cpu_has_aes) {
1260		printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
1261		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
1262	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1263
1264	if ((err = crypto_fpu_init()))
1265		goto fpu_err;
1266	if ((err = crypto_register_alg(&aesni_alg)))
1267		goto aes_err;
1268	if ((err = crypto_register_alg(&__aesni_alg)))
1269		goto __aes_err;
1270	if ((err = crypto_register_alg(&blk_ecb_alg)))
1271		goto blk_ecb_err;
1272	if ((err = crypto_register_alg(&blk_cbc_alg)))
1273		goto blk_cbc_err;
1274	if ((err = crypto_register_alg(&ablk_ecb_alg)))
1275		goto ablk_ecb_err;
1276	if ((err = crypto_register_alg(&ablk_cbc_alg)))
1277		goto ablk_cbc_err;
1278#ifdef CONFIG_X86_64
1279	if ((err = crypto_register_alg(&blk_ctr_alg)))
1280		goto blk_ctr_err;
1281	if ((err = crypto_register_alg(&ablk_ctr_alg)))
1282		goto ablk_ctr_err;
1283	if ((err = crypto_register_alg(&__rfc4106_alg)))
1284		goto __aead_gcm_err;
1285	if ((err = crypto_register_alg(&rfc4106_alg)))
1286		goto aead_gcm_err;
1287#ifdef HAS_CTR
1288	if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
1289		goto ablk_rfc3686_ctr_err;
1290#endif
1291#endif
1292#ifdef HAS_LRW
1293	if ((err = crypto_register_alg(&ablk_lrw_alg)))
1294		goto ablk_lrw_err;
1295#endif
1296#ifdef HAS_PCBC
1297	if ((err = crypto_register_alg(&ablk_pcbc_alg)))
1298		goto ablk_pcbc_err;
1299#endif
1300#ifdef HAS_XTS
1301	if ((err = crypto_register_alg(&ablk_xts_alg)))
1302		goto ablk_xts_err;
1303#endif
1304	return err;
1305
1306#ifdef HAS_XTS
1307ablk_xts_err:
1308#endif
1309#ifdef HAS_PCBC
1310	crypto_unregister_alg(&ablk_pcbc_alg);
1311ablk_pcbc_err:
1312#endif
1313#ifdef HAS_LRW
1314	crypto_unregister_alg(&ablk_lrw_alg);
1315ablk_lrw_err:
1316#endif
1317#ifdef CONFIG_X86_64
1318#ifdef HAS_CTR
1319	crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1320ablk_rfc3686_ctr_err:
1321#endif
1322	crypto_unregister_alg(&rfc4106_alg);
1323aead_gcm_err:
1324	crypto_unregister_alg(&__rfc4106_alg);
1325__aead_gcm_err:
1326	crypto_unregister_alg(&ablk_ctr_alg);
1327ablk_ctr_err:
1328	crypto_unregister_alg(&blk_ctr_alg);
1329blk_ctr_err:
1330#endif
1331	crypto_unregister_alg(&ablk_cbc_alg);
1332ablk_cbc_err:
1333	crypto_unregister_alg(&ablk_ecb_alg);
1334ablk_ecb_err:
1335	crypto_unregister_alg(&blk_cbc_alg);
1336blk_cbc_err:
1337	crypto_unregister_alg(&blk_ecb_alg);
1338blk_ecb_err:
1339	crypto_unregister_alg(&__aesni_alg);
1340__aes_err:
1341	crypto_unregister_alg(&aesni_alg);
1342aes_err:
1343fpu_err:
1344	return err;
1345}
1346
1347static void __exit aesni_exit(void)
1348{
1349#ifdef HAS_XTS
1350	crypto_unregister_alg(&ablk_xts_alg);
1351#endif
1352#ifdef HAS_PCBC
1353	crypto_unregister_alg(&ablk_pcbc_alg);
1354#endif
1355#ifdef HAS_LRW
1356	crypto_unregister_alg(&ablk_lrw_alg);
1357#endif
1358#ifdef CONFIG_X86_64
1359#ifdef HAS_CTR
1360	crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1361#endif
1362	crypto_unregister_alg(&rfc4106_alg);
1363	crypto_unregister_alg(&__rfc4106_alg);
1364	crypto_unregister_alg(&ablk_ctr_alg);
1365	crypto_unregister_alg(&blk_ctr_alg);
1366#endif
1367	crypto_unregister_alg(&ablk_cbc_alg);
1368	crypto_unregister_alg(&ablk_ecb_alg);
1369	crypto_unregister_alg(&blk_cbc_alg);
1370	crypto_unregister_alg(&blk_ecb_alg);
1371	crypto_unregister_alg(&__aesni_alg);
1372	crypto_unregister_alg(&aesni_alg);
1373
1374	crypto_fpu_exit();
1375}
1376
1377module_init(aesni_init);
1378module_exit(aesni_exit);
1379
1380MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1381MODULE_LICENSE("GPL");
1382MODULE_ALIAS("aes");