Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Support for Intel AES-NI instructions. This file contains glue
   4 * code, the real AES implementation is in intel-aes_asm.S.
   5 *
   6 * Copyright (C) 2008, Intel Corp.
   7 *    Author: Huang Ying <ying.huang@intel.com>
   8 *
   9 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
  10 * interface for 64-bit kernels.
  11 *    Authors: Adrian Hoban <adrian.hoban@intel.com>
  12 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  13 *             Tadeusz Struk (tadeusz.struk@intel.com)
  14 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  15 *    Copyright (c) 2010, Intel Corporation.
  16 */
  17
  18#include <linux/hardirq.h>
  19#include <linux/types.h>
  20#include <linux/module.h>
  21#include <linux/err.h>
  22#include <crypto/algapi.h>
  23#include <crypto/aes.h>
  24#include <crypto/ctr.h>
  25#include <crypto/b128ops.h>
  26#include <crypto/gcm.h>
  27#include <crypto/xts.h>
  28#include <asm/cpu_device_id.h>
  29#include <asm/simd.h>
  30#include <crypto/scatterwalk.h>
  31#include <crypto/internal/aead.h>
  32#include <crypto/internal/simd.h>
  33#include <crypto/internal/skcipher.h>
  34#include <linux/jump_label.h>
  35#include <linux/workqueue.h>
  36#include <linux/spinlock.h>
  37#include <linux/static_call.h>
 
 
  38
  39
  40#define AESNI_ALIGN	16
  41#define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
  42#define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE - 1))
  43#define RFC4106_HASH_SUBKEY_SIZE 16
  44#define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
  45#define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
  46#define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
  47
  48/* This data is stored at the end of the crypto_tfm struct.
  49 * It's a type of per "session" data storage location.
  50 * This needs to be 16 byte aligned.
  51 */
  52struct aesni_rfc4106_gcm_ctx {
  53	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
  54	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
  55	u8 nonce[4];
  56};
  57
  58struct generic_gcmaes_ctx {
  59	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
  60	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
  61};
  62
  63struct aesni_xts_ctx {
  64	u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
  65	u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
  66};
  67
  68#define GCM_BLOCK_LEN 16
  69
  70struct gcm_context_data {
  71	/* init, update and finalize context data */
  72	u8 aad_hash[GCM_BLOCK_LEN];
  73	u64 aad_length;
  74	u64 in_length;
  75	u8 partial_block_enc_key[GCM_BLOCK_LEN];
  76	u8 orig_IV[GCM_BLOCK_LEN];
  77	u8 current_counter[GCM_BLOCK_LEN];
  78	u64 partial_block_len;
  79	u64 unused;
  80	u8 hash_keys[GCM_BLOCK_LEN * 16];
  81};
  82
  83asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
  84			     unsigned int key_len);
  85asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
  86asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
 
 
  87asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
  88			      const u8 *in, unsigned int len);
  89asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
  90			      const u8 *in, unsigned int len);
  91asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
  92			      const u8 *in, unsigned int len, u8 *iv);
  93asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
  94			      const u8 *in, unsigned int len, u8 *iv);
  95asmlinkage void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
  96				  const u8 *in, unsigned int len, u8 *iv);
  97asmlinkage void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
  98				  const u8 *in, unsigned int len, u8 *iv);
  99
 100#define AVX_GEN2_OPTSIZE 640
 101#define AVX_GEN4_OPTSIZE 4096
 102
 103asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
 104				  const u8 *in, unsigned int len, u8 *iv);
 105
 106asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
 107				  const u8 *in, unsigned int len, u8 *iv);
 108
 109#ifdef CONFIG_X86_64
 110
 
 
 111asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
 112			      const u8 *in, unsigned int len, u8 *iv);
 113DEFINE_STATIC_CALL(aesni_ctr_enc_tfm, aesni_ctr_enc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 114
 115/* Scatter / Gather routines, with args similar to above */
 116asmlinkage void aesni_gcm_init(void *ctx,
 117			       struct gcm_context_data *gdata,
 118			       u8 *iv,
 119			       u8 *hash_subkey, const u8 *aad,
 120			       unsigned long aad_len);
 121asmlinkage void aesni_gcm_enc_update(void *ctx,
 122				     struct gcm_context_data *gdata, u8 *out,
 123				     const u8 *in, unsigned long plaintext_len);
 124asmlinkage void aesni_gcm_dec_update(void *ctx,
 125				     struct gcm_context_data *gdata, u8 *out,
 126				     const u8 *in,
 127				     unsigned long ciphertext_len);
 128asmlinkage void aesni_gcm_finalize(void *ctx,
 129				   struct gcm_context_data *gdata,
 130				   u8 *auth_tag, unsigned long auth_tag_len);
 131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 132asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
 133		void *keys, u8 *out, unsigned int num_bytes);
 134asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
 135		void *keys, u8 *out, unsigned int num_bytes);
 136asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
 137		void *keys, u8 *out, unsigned int num_bytes);
 138
 139
 140asmlinkage void aes_xctr_enc_128_avx_by8(const u8 *in, const u8 *iv,
 141	const void *keys, u8 *out, unsigned int num_bytes,
 142	unsigned int byte_ctr);
 143
 144asmlinkage void aes_xctr_enc_192_avx_by8(const u8 *in, const u8 *iv,
 145	const void *keys, u8 *out, unsigned int num_bytes,
 146	unsigned int byte_ctr);
 147
 148asmlinkage void aes_xctr_enc_256_avx_by8(const u8 *in, const u8 *iv,
 149	const void *keys, u8 *out, unsigned int num_bytes,
 150	unsigned int byte_ctr);
 151
 152/*
 153 * asmlinkage void aesni_gcm_init_avx_gen2()
 154 * gcm_data *my_ctx_data, context data
 155 * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
 156 */
 157asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
 158					struct gcm_context_data *gdata,
 159					u8 *iv,
 160					u8 *hash_subkey,
 161					const u8 *aad,
 162					unsigned long aad_len);
 163
 164asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
 165				     struct gcm_context_data *gdata, u8 *out,
 166				     const u8 *in, unsigned long plaintext_len);
 167asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
 168				     struct gcm_context_data *gdata, u8 *out,
 169				     const u8 *in,
 170				     unsigned long ciphertext_len);
 171asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
 172				   struct gcm_context_data *gdata,
 173				   u8 *auth_tag, unsigned long auth_tag_len);
 174
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 175/*
 176 * asmlinkage void aesni_gcm_init_avx_gen4()
 177 * gcm_data *my_ctx_data, context data
 178 * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
 179 */
 180asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
 181					struct gcm_context_data *gdata,
 182					u8 *iv,
 183					u8 *hash_subkey,
 184					const u8 *aad,
 185					unsigned long aad_len);
 186
 187asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
 188				     struct gcm_context_data *gdata, u8 *out,
 189				     const u8 *in, unsigned long plaintext_len);
 190asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
 191				     struct gcm_context_data *gdata, u8 *out,
 192				     const u8 *in,
 193				     unsigned long ciphertext_len);
 194asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
 195				   struct gcm_context_data *gdata,
 196				   u8 *auth_tag, unsigned long auth_tag_len);
 197
 198static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx);
 199static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx2);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 200
 201static inline struct
 202aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
 203{
 204	unsigned long align = AESNI_ALIGN;
 205
 206	if (align <= crypto_tfm_ctx_alignment())
 207		align = 1;
 208	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
 209}
 210
 211static inline struct
 212generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
 213{
 214	unsigned long align = AESNI_ALIGN;
 215
 216	if (align <= crypto_tfm_ctx_alignment())
 217		align = 1;
 218	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
 219}
 220#endif
 221
 222static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
 223{
 224	unsigned long addr = (unsigned long)raw_ctx;
 225	unsigned long align = AESNI_ALIGN;
 226
 227	if (align <= crypto_tfm_ctx_alignment())
 228		align = 1;
 229	return (struct crypto_aes_ctx *)ALIGN(addr, align);
 230}
 231
 232static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
 233			      const u8 *in_key, unsigned int key_len)
 234{
 235	struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
 
 236	int err;
 237
 238	if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
 239	    key_len != AES_KEYSIZE_256)
 
 240		return -EINVAL;
 
 241
 242	if (!crypto_simd_usable())
 243		err = aes_expandkey(ctx, in_key, key_len);
 244	else {
 245		kernel_fpu_begin();
 246		err = aesni_set_key(ctx, in_key, key_len);
 247		kernel_fpu_end();
 248	}
 249
 250	return err;
 251}
 252
 253static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 254		       unsigned int key_len)
 255{
 256	return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
 257}
 258
 259static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 260{
 261	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 262
 263	if (!crypto_simd_usable()) {
 264		aes_encrypt(ctx, dst, src);
 265	} else {
 266		kernel_fpu_begin();
 267		aesni_enc(ctx, dst, src);
 268		kernel_fpu_end();
 269	}
 270}
 271
 272static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 273{
 274	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 275
 276	if (!crypto_simd_usable()) {
 277		aes_decrypt(ctx, dst, src);
 278	} else {
 279		kernel_fpu_begin();
 280		aesni_dec(ctx, dst, src);
 281		kernel_fpu_end();
 282	}
 283}
 284
 285static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
 286			         unsigned int len)
 287{
 288	return aes_set_key_common(crypto_skcipher_tfm(tfm),
 289				  crypto_skcipher_ctx(tfm), key, len);
 290}
 291
 292static int ecb_encrypt(struct skcipher_request *req)
 293{
 294	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 295	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 296	struct skcipher_walk walk;
 297	unsigned int nbytes;
 298	int err;
 299
 300	err = skcipher_walk_virt(&walk, req, false);
 301
 
 302	while ((nbytes = walk.nbytes)) {
 303		kernel_fpu_begin();
 304		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 305			      nbytes & AES_BLOCK_MASK);
 306		kernel_fpu_end();
 307		nbytes &= AES_BLOCK_SIZE - 1;
 308		err = skcipher_walk_done(&walk, nbytes);
 309	}
 
 310
 311	return err;
 312}
 313
 314static int ecb_decrypt(struct skcipher_request *req)
 315{
 316	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 317	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 318	struct skcipher_walk walk;
 319	unsigned int nbytes;
 320	int err;
 321
 322	err = skcipher_walk_virt(&walk, req, false);
 323
 
 324	while ((nbytes = walk.nbytes)) {
 325		kernel_fpu_begin();
 326		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 327			      nbytes & AES_BLOCK_MASK);
 328		kernel_fpu_end();
 329		nbytes &= AES_BLOCK_SIZE - 1;
 330		err = skcipher_walk_done(&walk, nbytes);
 331	}
 
 332
 333	return err;
 334}
 335
 336static int cbc_encrypt(struct skcipher_request *req)
 337{
 338	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 339	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 340	struct skcipher_walk walk;
 341	unsigned int nbytes;
 342	int err;
 343
 344	err = skcipher_walk_virt(&walk, req, false);
 345
 
 346	while ((nbytes = walk.nbytes)) {
 347		kernel_fpu_begin();
 348		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 349			      nbytes & AES_BLOCK_MASK, walk.iv);
 350		kernel_fpu_end();
 351		nbytes &= AES_BLOCK_SIZE - 1;
 352		err = skcipher_walk_done(&walk, nbytes);
 353	}
 
 354
 355	return err;
 356}
 357
 358static int cbc_decrypt(struct skcipher_request *req)
 359{
 360	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 361	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 362	struct skcipher_walk walk;
 363	unsigned int nbytes;
 364	int err;
 365
 366	err = skcipher_walk_virt(&walk, req, false);
 367
 
 368	while ((nbytes = walk.nbytes)) {
 369		kernel_fpu_begin();
 370		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 371			      nbytes & AES_BLOCK_MASK, walk.iv);
 372		kernel_fpu_end();
 373		nbytes &= AES_BLOCK_SIZE - 1;
 374		err = skcipher_walk_done(&walk, nbytes);
 375	}
 376
 377	return err;
 378}
 379
 380static int cts_cbc_encrypt(struct skcipher_request *req)
 381{
 382	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 383	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 384	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
 385	struct scatterlist *src = req->src, *dst = req->dst;
 386	struct scatterlist sg_src[2], sg_dst[2];
 387	struct skcipher_request subreq;
 388	struct skcipher_walk walk;
 389	int err;
 390
 391	skcipher_request_set_tfm(&subreq, tfm);
 392	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
 393				      NULL, NULL);
 394
 395	if (req->cryptlen <= AES_BLOCK_SIZE) {
 396		if (req->cryptlen < AES_BLOCK_SIZE)
 397			return -EINVAL;
 398		cbc_blocks = 1;
 399	}
 400
 401	if (cbc_blocks > 0) {
 402		skcipher_request_set_crypt(&subreq, req->src, req->dst,
 403					   cbc_blocks * AES_BLOCK_SIZE,
 404					   req->iv);
 405
 406		err = cbc_encrypt(&subreq);
 407		if (err)
 408			return err;
 409
 410		if (req->cryptlen == AES_BLOCK_SIZE)
 411			return 0;
 412
 413		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
 414		if (req->dst != req->src)
 415			dst = scatterwalk_ffwd(sg_dst, req->dst,
 416					       subreq.cryptlen);
 417	}
 418
 419	/* handle ciphertext stealing */
 420	skcipher_request_set_crypt(&subreq, src, dst,
 421				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
 422				   req->iv);
 423
 424	err = skcipher_walk_virt(&walk, &subreq, false);
 425	if (err)
 426		return err;
 427
 428	kernel_fpu_begin();
 429	aesni_cts_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 430			  walk.nbytes, walk.iv);
 431	kernel_fpu_end();
 432
 433	return skcipher_walk_done(&walk, 0);
 434}
 435
 436static int cts_cbc_decrypt(struct skcipher_request *req)
 
 
 437{
 438	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 439	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 440	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
 441	struct scatterlist *src = req->src, *dst = req->dst;
 442	struct scatterlist sg_src[2], sg_dst[2];
 443	struct skcipher_request subreq;
 444	struct skcipher_walk walk;
 445	int err;
 446
 447	skcipher_request_set_tfm(&subreq, tfm);
 448	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
 449				      NULL, NULL);
 450
 451	if (req->cryptlen <= AES_BLOCK_SIZE) {
 452		if (req->cryptlen < AES_BLOCK_SIZE)
 453			return -EINVAL;
 454		cbc_blocks = 1;
 455	}
 456
 457	if (cbc_blocks > 0) {
 458		skcipher_request_set_crypt(&subreq, req->src, req->dst,
 459					   cbc_blocks * AES_BLOCK_SIZE,
 460					   req->iv);
 461
 462		err = cbc_decrypt(&subreq);
 463		if (err)
 464			return err;
 465
 466		if (req->cryptlen == AES_BLOCK_SIZE)
 467			return 0;
 468
 469		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
 470		if (req->dst != req->src)
 471			dst = scatterwalk_ffwd(sg_dst, req->dst,
 472					       subreq.cryptlen);
 473	}
 474
 475	/* handle ciphertext stealing */
 476	skcipher_request_set_crypt(&subreq, src, dst,
 477				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
 478				   req->iv);
 479
 480	err = skcipher_walk_virt(&walk, &subreq, false);
 481	if (err)
 482		return err;
 483
 484	kernel_fpu_begin();
 485	aesni_cts_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 486			  walk.nbytes, walk.iv);
 487	kernel_fpu_end();
 488
 489	return skcipher_walk_done(&walk, 0);
 490}
 491
 492#ifdef CONFIG_X86_64
 493static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
 494			      const u8 *in, unsigned int len, u8 *iv)
 495{
 496	/*
 497	 * based on key length, override with the by8 version
 498	 * of ctr mode encryption/decryption for improved performance
 499	 * aes_set_key_common() ensures that key length is one of
 500	 * {128,192,256}
 501	 */
 502	if (ctx->key_length == AES_KEYSIZE_128)
 503		aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
 504	else if (ctx->key_length == AES_KEYSIZE_192)
 505		aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
 506	else
 507		aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
 508}
 
 509
 510static int ctr_crypt(struct skcipher_request *req)
 511{
 512	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 513	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 514	u8 keystream[AES_BLOCK_SIZE];
 515	struct skcipher_walk walk;
 516	unsigned int nbytes;
 517	int err;
 518
 519	err = skcipher_walk_virt(&walk, req, false);
 520
 521	while ((nbytes = walk.nbytes) > 0) {
 522		kernel_fpu_begin();
 523		if (nbytes & AES_BLOCK_MASK)
 524			static_call(aesni_ctr_enc_tfm)(ctx, walk.dst.virt.addr,
 525						       walk.src.virt.addr,
 526						       nbytes & AES_BLOCK_MASK,
 527						       walk.iv);
 528		nbytes &= ~AES_BLOCK_MASK;
 529
 530		if (walk.nbytes == walk.total && nbytes > 0) {
 531			aesni_enc(ctx, keystream, walk.iv);
 532			crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes - nbytes,
 533				       walk.src.virt.addr + walk.nbytes - nbytes,
 534				       keystream, nbytes);
 535			crypto_inc(walk.iv, AES_BLOCK_SIZE);
 536			nbytes = 0;
 537		}
 538		kernel_fpu_end();
 539		err = skcipher_walk_done(&walk, nbytes);
 540	}
 
 
 
 
 
 
 541	return err;
 542}
 543
 544static void aesni_xctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
 545				   const u8 *in, unsigned int len, u8 *iv,
 546				   unsigned int byte_ctr)
 547{
 548	if (ctx->key_length == AES_KEYSIZE_128)
 549		aes_xctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len,
 550					 byte_ctr);
 551	else if (ctx->key_length == AES_KEYSIZE_192)
 552		aes_xctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len,
 553					 byte_ctr);
 554	else
 555		aes_xctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len,
 556					 byte_ctr);
 
 
 
 
 
 
 
 
 
 557}
 558
 559static int xctr_crypt(struct skcipher_request *req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 560{
 561	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 562	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 563	u8 keystream[AES_BLOCK_SIZE];
 564	struct skcipher_walk walk;
 565	unsigned int nbytes;
 566	unsigned int byte_ctr = 0;
 567	int err;
 568	__le32 block[AES_BLOCK_SIZE / sizeof(__le32)];
 569
 570	err = skcipher_walk_virt(&walk, req, false);
 
 
 
 
 
 571
 572	while ((nbytes = walk.nbytes) > 0) {
 573		kernel_fpu_begin();
 574		if (nbytes & AES_BLOCK_MASK)
 575			aesni_xctr_enc_avx_tfm(ctx, walk.dst.virt.addr,
 576				walk.src.virt.addr, nbytes & AES_BLOCK_MASK,
 577				walk.iv, byte_ctr);
 578		nbytes &= ~AES_BLOCK_MASK;
 579		byte_ctr += walk.nbytes - nbytes;
 580
 581		if (walk.nbytes == walk.total && nbytes > 0) {
 582			memcpy(block, walk.iv, AES_BLOCK_SIZE);
 583			block[0] ^= cpu_to_le32(1 + byte_ctr / AES_BLOCK_SIZE);
 584			aesni_enc(ctx, keystream, (u8 *)block);
 585			crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes -
 586				       nbytes, walk.src.virt.addr + walk.nbytes
 587				       - nbytes, keystream, nbytes);
 588			byte_ctr += nbytes;
 589			nbytes = 0;
 590		}
 591		kernel_fpu_end();
 592		err = skcipher_walk_done(&walk, nbytes);
 593	}
 594	return err;
 595}
 596
 597static int
 598rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
 599{
 600	struct crypto_aes_ctx ctx;
 601	int ret;
 602
 603	ret = aes_expandkey(&ctx, key, key_len);
 604	if (ret)
 605		return ret;
 606
 607	/* Clear the data in the hash sub key container to zero.*/
 608	/* We want to cipher all zeros to create the hash sub key. */
 609	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
 610
 611	aes_encrypt(&ctx, hash_subkey, hash_subkey);
 612
 613	memzero_explicit(&ctx, sizeof(ctx));
 614	return 0;
 615}
 616
 617static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
 618				  unsigned int key_len)
 619{
 620	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
 621
 622	if (key_len < 4)
 
 623		return -EINVAL;
 624
 625	/*Account for 4 byte nonce at the end.*/
 626	key_len -= 4;
 627
 628	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
 629
 630	return aes_set_key_common(crypto_aead_tfm(aead),
 631				  &ctx->aes_key_expanded, key, key_len) ?:
 632	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
 633}
 634
 635/* This is the Integrity Check Value (aka the authentication tag) length and can
 636 * be 8, 12 or 16 bytes long. */
 637static int common_rfc4106_set_authsize(struct crypto_aead *aead,
 638				       unsigned int authsize)
 639{
 640	switch (authsize) {
 641	case 8:
 642	case 12:
 643	case 16:
 644		break;
 645	default:
 646		return -EINVAL;
 647	}
 648
 649	return 0;
 650}
 651
 652static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
 653				       unsigned int authsize)
 654{
 655	switch (authsize) {
 656	case 4:
 657	case 8:
 658	case 12:
 659	case 13:
 660	case 14:
 661	case 15:
 662	case 16:
 663		break;
 664	default:
 665		return -EINVAL;
 666	}
 667
 668	return 0;
 669}
 670
 671static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
 672			      unsigned int assoclen, u8 *hash_subkey,
 673			      u8 *iv, void *aes_ctx, u8 *auth_tag,
 674			      unsigned long auth_tag_len)
 675{
 676	u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
 677	struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
 
 
 
 678	unsigned long left = req->cryptlen;
 
 679	struct scatter_walk assoc_sg_walk;
 680	struct skcipher_walk walk;
 681	bool do_avx, do_avx2;
 
 
 
 
 682	u8 *assocmem = NULL;
 683	u8 *assoc;
 684	int err;
 685
 686	if (!enc)
 687		left -= auth_tag_len;
 688
 689	do_avx = (left >= AVX_GEN2_OPTSIZE);
 690	do_avx2 = (left >= AVX_GEN4_OPTSIZE);
 
 
 
 
 
 
 691
 692	/* Linearize assoc, if not already linear */
 693	if (req->src->length >= assoclen && req->src->length) {
 
 
 694		scatterwalk_start(&assoc_sg_walk, req->src);
 695		assoc = scatterwalk_map(&assoc_sg_walk);
 696	} else {
 697		gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 698			      GFP_KERNEL : GFP_ATOMIC;
 699
 700		/* assoc can be any length, so must be on heap */
 701		assocmem = kmalloc(assoclen, flags);
 702		if (unlikely(!assocmem))
 703			return -ENOMEM;
 704		assoc = assocmem;
 705
 706		scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
 707	}
 708
 
 
 
 
 
 
 
 
 
 
 709	kernel_fpu_begin();
 710	if (static_branch_likely(&gcm_use_avx2) && do_avx2)
 711		aesni_gcm_init_avx_gen4(aes_ctx, data, iv, hash_subkey, assoc,
 712					assoclen);
 713	else if (static_branch_likely(&gcm_use_avx) && do_avx)
 714		aesni_gcm_init_avx_gen2(aes_ctx, data, iv, hash_subkey, assoc,
 715					assoclen);
 716	else
 717		aesni_gcm_init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 718	kernel_fpu_end();
 719
 720	if (!assocmem)
 721		scatterwalk_unmap(assoc);
 722	else
 723		kfree(assocmem);
 724
 725	err = enc ? skcipher_walk_aead_encrypt(&walk, req, false)
 726		  : skcipher_walk_aead_decrypt(&walk, req, false);
 727
 728	while (walk.nbytes > 0) {
 729		kernel_fpu_begin();
 730		if (static_branch_likely(&gcm_use_avx2) && do_avx2) {
 731			if (enc)
 732				aesni_gcm_enc_update_avx_gen4(aes_ctx, data,
 733							      walk.dst.virt.addr,
 734							      walk.src.virt.addr,
 735							      walk.nbytes);
 736			else
 737				aesni_gcm_dec_update_avx_gen4(aes_ctx, data,
 738							      walk.dst.virt.addr,
 739							      walk.src.virt.addr,
 740							      walk.nbytes);
 741		} else if (static_branch_likely(&gcm_use_avx) && do_avx) {
 742			if (enc)
 743				aesni_gcm_enc_update_avx_gen2(aes_ctx, data,
 744							      walk.dst.virt.addr,
 745							      walk.src.virt.addr,
 746							      walk.nbytes);
 747			else
 748				aesni_gcm_dec_update_avx_gen2(aes_ctx, data,
 749							      walk.dst.virt.addr,
 750							      walk.src.virt.addr,
 751							      walk.nbytes);
 752		} else if (enc) {
 753			aesni_gcm_enc_update(aes_ctx, data, walk.dst.virt.addr,
 754					     walk.src.virt.addr, walk.nbytes);
 755		} else {
 756			aesni_gcm_dec_update(aes_ctx, data, walk.dst.virt.addr,
 757					     walk.src.virt.addr, walk.nbytes);
 758		}
 759		kernel_fpu_end();
 760
 761		err = skcipher_walk_done(&walk, 0);
 
 
 
 
 
 
 
 
 762	}
 763
 764	if (err)
 765		return err;
 766
 767	kernel_fpu_begin();
 768	if (static_branch_likely(&gcm_use_avx2) && do_avx2)
 769		aesni_gcm_finalize_avx_gen4(aes_ctx, data, auth_tag,
 770					    auth_tag_len);
 771	else if (static_branch_likely(&gcm_use_avx) && do_avx)
 772		aesni_gcm_finalize_avx_gen2(aes_ctx, data, auth_tag,
 773					    auth_tag_len);
 774	else
 775		aesni_gcm_finalize(aes_ctx, data, auth_tag, auth_tag_len);
 776	kernel_fpu_end();
 777
 778	return 0;
 779}
 780
 781static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
 782			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
 783{
 784	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 785	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
 786	u8 auth_tag[16];
 787	int err;
 788
 789	err = gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, aes_ctx,
 790				 auth_tag, auth_tag_len);
 791	if (err)
 792		return err;
 793
 794	scatterwalk_map_and_copy(auth_tag, req->dst,
 795				 req->assoclen + req->cryptlen,
 796				 auth_tag_len, 1);
 797	return 0;
 798}
 799
 800static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
 801			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
 802{
 803	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 804	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
 805	u8 auth_tag_msg[16];
 806	u8 auth_tag[16];
 807	int err;
 808
 809	err = gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, aes_ctx,
 810				 auth_tag, auth_tag_len);
 811	if (err)
 812		return err;
 813
 814	/* Copy out original auth_tag */
 815	scatterwalk_map_and_copy(auth_tag_msg, req->src,
 816				 req->assoclen + req->cryptlen - auth_tag_len,
 817				 auth_tag_len, 0);
 818
 819	/* Compare generated tag with passed in tag. */
 820	if (crypto_memneq(auth_tag_msg, auth_tag, auth_tag_len)) {
 821		memzero_explicit(auth_tag, sizeof(auth_tag));
 822		return -EBADMSG;
 823	}
 824	return 0;
 825}
 826
 827static int helper_rfc4106_encrypt(struct aead_request *req)
 828{
 829	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 830	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 831	void *aes_ctx = &(ctx->aes_key_expanded);
 832	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
 833	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
 834	unsigned int i;
 835	__be32 counter = cpu_to_be32(1);
 836
 837	/* Assuming we are supporting rfc4106 64-bit extended */
 838	/* sequence numbers We need to have the AAD length equal */
 839	/* to 16 or 20 bytes */
 840	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
 841		return -EINVAL;
 842
 843	/* IV below built */
 844	for (i = 0; i < 4; i++)
 845		*(iv+i) = ctx->nonce[i];
 846	for (i = 0; i < 8; i++)
 847		*(iv+4+i) = req->iv[i];
 848	*((__be32 *)(iv+12)) = counter;
 849
 850	return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
 851			      aes_ctx);
 852}
 853
 854static int helper_rfc4106_decrypt(struct aead_request *req)
 855{
 856	__be32 counter = cpu_to_be32(1);
 857	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 858	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 859	void *aes_ctx = &(ctx->aes_key_expanded);
 860	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
 861	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
 862	unsigned int i;
 863
 864	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
 865		return -EINVAL;
 866
 867	/* Assuming we are supporting rfc4106 64-bit extended */
 868	/* sequence numbers We need to have the AAD length */
 869	/* equal to 16 or 20 bytes */
 870
 871	/* IV below built */
 872	for (i = 0; i < 4; i++)
 873		*(iv+i) = ctx->nonce[i];
 874	for (i = 0; i < 8; i++)
 875		*(iv+4+i) = req->iv[i];
 876	*((__be32 *)(iv+12)) = counter;
 877
 878	return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
 879			      aes_ctx);
 880}
 881#endif
 882
 883static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
 884			    unsigned int keylen)
 885{
 886	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 887	int err;
 888
 889	err = xts_verify_key(tfm, key, keylen);
 890	if (err)
 891		return err;
 892
 893	keylen /= 2;
 894
 895	/* first half of xts-key is for crypt */
 896	err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
 897				 key, keylen);
 898	if (err)
 899		return err;
 900
 901	/* second half of xts-key is for tweak */
 902	return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
 903				  key + keylen, keylen);
 904}
 905
 906static int xts_crypt(struct skcipher_request *req, bool encrypt)
 907{
 908	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 909	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 910	int tail = req->cryptlen % AES_BLOCK_SIZE;
 911	struct skcipher_request subreq;
 912	struct skcipher_walk walk;
 913	int err;
 914
 915	if (req->cryptlen < AES_BLOCK_SIZE)
 916		return -EINVAL;
 917
 918	err = skcipher_walk_virt(&walk, req, false);
 919	if (!walk.nbytes)
 920		return err;
 921
 922	if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
 923		int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
 924
 925		skcipher_walk_abort(&walk);
 926
 927		skcipher_request_set_tfm(&subreq, tfm);
 928		skcipher_request_set_callback(&subreq,
 929					      skcipher_request_flags(req),
 930					      NULL, NULL);
 931		skcipher_request_set_crypt(&subreq, req->src, req->dst,
 932					   blocks * AES_BLOCK_SIZE, req->iv);
 933		req = &subreq;
 934
 935		err = skcipher_walk_virt(&walk, req, false);
 936		if (!walk.nbytes)
 937			return err;
 938	} else {
 939		tail = 0;
 940	}
 941
 942	kernel_fpu_begin();
 943
 944	/* calculate first value of T */
 945	aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);
 946
 947	while (walk.nbytes > 0) {
 948		int nbytes = walk.nbytes;
 949
 950		if (nbytes < walk.total)
 951			nbytes &= ~(AES_BLOCK_SIZE - 1);
 952
 953		if (encrypt)
 954			aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
 955					  walk.dst.virt.addr, walk.src.virt.addr,
 956					  nbytes, walk.iv);
 957		else
 958			aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
 959					  walk.dst.virt.addr, walk.src.virt.addr,
 960					  nbytes, walk.iv);
 961		kernel_fpu_end();
 962
 963		err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
 964
 965		if (walk.nbytes > 0)
 966			kernel_fpu_begin();
 967	}
 968
 969	if (unlikely(tail > 0 && !err)) {
 970		struct scatterlist sg_src[2], sg_dst[2];
 971		struct scatterlist *src, *dst;
 972
 973		dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
 974		if (req->dst != req->src)
 975			dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
 976
 977		skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
 978					   req->iv);
 979
 980		err = skcipher_walk_virt(&walk, &subreq, false);
 981		if (err)
 982			return err;
 983
 984		kernel_fpu_begin();
 985		if (encrypt)
 986			aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
 987					  walk.dst.virt.addr, walk.src.virt.addr,
 988					  walk.nbytes, walk.iv);
 989		else
 990			aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
 991					  walk.dst.virt.addr, walk.src.virt.addr,
 992					  walk.nbytes, walk.iv);
 993		kernel_fpu_end();
 994
 995		err = skcipher_walk_done(&walk, 0);
 996	}
 997	return err;
 998}
 999
1000static int xts_encrypt(struct skcipher_request *req)
1001{
1002	return xts_crypt(req, true);
1003}
1004
1005static int xts_decrypt(struct skcipher_request *req)
1006{
1007	return xts_crypt(req, false);
1008}
1009
1010static struct crypto_alg aesni_cipher_alg = {
1011	.cra_name		= "aes",
1012	.cra_driver_name	= "aes-aesni",
1013	.cra_priority		= 300,
1014	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
1015	.cra_blocksize		= AES_BLOCK_SIZE,
1016	.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1017	.cra_module		= THIS_MODULE,
1018	.cra_u	= {
1019		.cipher	= {
1020			.cia_min_keysize	= AES_MIN_KEY_SIZE,
1021			.cia_max_keysize	= AES_MAX_KEY_SIZE,
1022			.cia_setkey		= aes_set_key,
1023			.cia_encrypt		= aesni_encrypt,
1024			.cia_decrypt		= aesni_decrypt
1025		}
1026	}
1027};
1028
1029static struct skcipher_alg aesni_skciphers[] = {
1030	{
1031		.base = {
1032			.cra_name		= "__ecb(aes)",
1033			.cra_driver_name	= "__ecb-aes-aesni",
1034			.cra_priority		= 400,
1035			.cra_flags		= CRYPTO_ALG_INTERNAL,
1036			.cra_blocksize		= AES_BLOCK_SIZE,
1037			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1038			.cra_module		= THIS_MODULE,
1039		},
1040		.min_keysize	= AES_MIN_KEY_SIZE,
1041		.max_keysize	= AES_MAX_KEY_SIZE,
1042		.setkey		= aesni_skcipher_setkey,
1043		.encrypt	= ecb_encrypt,
1044		.decrypt	= ecb_decrypt,
1045	}, {
1046		.base = {
1047			.cra_name		= "__cbc(aes)",
1048			.cra_driver_name	= "__cbc-aes-aesni",
1049			.cra_priority		= 400,
1050			.cra_flags		= CRYPTO_ALG_INTERNAL,
1051			.cra_blocksize		= AES_BLOCK_SIZE,
1052			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1053			.cra_module		= THIS_MODULE,
1054		},
1055		.min_keysize	= AES_MIN_KEY_SIZE,
1056		.max_keysize	= AES_MAX_KEY_SIZE,
1057		.ivsize		= AES_BLOCK_SIZE,
1058		.setkey		= aesni_skcipher_setkey,
1059		.encrypt	= cbc_encrypt,
1060		.decrypt	= cbc_decrypt,
1061	}, {
1062		.base = {
1063			.cra_name		= "__cts(cbc(aes))",
1064			.cra_driver_name	= "__cts-cbc-aes-aesni",
1065			.cra_priority		= 400,
1066			.cra_flags		= CRYPTO_ALG_INTERNAL,
1067			.cra_blocksize		= AES_BLOCK_SIZE,
1068			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1069			.cra_module		= THIS_MODULE,
1070		},
1071		.min_keysize	= AES_MIN_KEY_SIZE,
1072		.max_keysize	= AES_MAX_KEY_SIZE,
1073		.ivsize		= AES_BLOCK_SIZE,
1074		.walksize	= 2 * AES_BLOCK_SIZE,
1075		.setkey		= aesni_skcipher_setkey,
1076		.encrypt	= cts_cbc_encrypt,
1077		.decrypt	= cts_cbc_decrypt,
1078#ifdef CONFIG_X86_64
1079	}, {
1080		.base = {
1081			.cra_name		= "__ctr(aes)",
1082			.cra_driver_name	= "__ctr-aes-aesni",
1083			.cra_priority		= 400,
1084			.cra_flags		= CRYPTO_ALG_INTERNAL,
1085			.cra_blocksize		= 1,
1086			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1087			.cra_module		= THIS_MODULE,
1088		},
1089		.min_keysize	= AES_MIN_KEY_SIZE,
1090		.max_keysize	= AES_MAX_KEY_SIZE,
1091		.ivsize		= AES_BLOCK_SIZE,
1092		.chunksize	= AES_BLOCK_SIZE,
1093		.setkey		= aesni_skcipher_setkey,
1094		.encrypt	= ctr_crypt,
1095		.decrypt	= ctr_crypt,
1096#endif
1097	}, {
1098		.base = {
1099			.cra_name		= "__xts(aes)",
1100			.cra_driver_name	= "__xts-aes-aesni",
1101			.cra_priority		= 401,
1102			.cra_flags		= CRYPTO_ALG_INTERNAL,
1103			.cra_blocksize		= AES_BLOCK_SIZE,
1104			.cra_ctxsize		= XTS_AES_CTX_SIZE,
1105			.cra_module		= THIS_MODULE,
1106		},
1107		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
1108		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
1109		.ivsize		= AES_BLOCK_SIZE,
1110		.walksize	= 2 * AES_BLOCK_SIZE,
1111		.setkey		= xts_aesni_setkey,
1112		.encrypt	= xts_encrypt,
1113		.decrypt	= xts_decrypt,
 
1114	}
1115};
1116
1117static
1118struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1119
1120#ifdef CONFIG_X86_64
1121/*
1122 * XCTR does not have a non-AVX implementation, so it must be enabled
1123 * conditionally.
1124 */
1125static struct skcipher_alg aesni_xctr = {
1126	.base = {
1127		.cra_name		= "__xctr(aes)",
1128		.cra_driver_name	= "__xctr-aes-aesni",
1129		.cra_priority		= 400,
1130		.cra_flags		= CRYPTO_ALG_INTERNAL,
1131		.cra_blocksize		= 1,
1132		.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1133		.cra_module		= THIS_MODULE,
1134	},
1135	.min_keysize	= AES_MIN_KEY_SIZE,
1136	.max_keysize	= AES_MAX_KEY_SIZE,
1137	.ivsize		= AES_BLOCK_SIZE,
1138	.chunksize	= AES_BLOCK_SIZE,
1139	.setkey		= aesni_skcipher_setkey,
1140	.encrypt	= xctr_crypt,
1141	.decrypt	= xctr_crypt,
1142};
1143
1144static struct simd_skcipher_alg *aesni_simd_xctr;
1145#endif /* CONFIG_X86_64 */
1146
1147#ifdef CONFIG_X86_64
1148static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1149				  unsigned int key_len)
1150{
1151	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
1152
1153	return aes_set_key_common(crypto_aead_tfm(aead),
1154				  &ctx->aes_key_expanded, key, key_len) ?:
1155	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1156}
1157
1158static int generic_gcmaes_encrypt(struct aead_request *req)
1159{
1160	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1161	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1162	void *aes_ctx = &(ctx->aes_key_expanded);
1163	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1164	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1165	__be32 counter = cpu_to_be32(1);
1166
1167	memcpy(iv, req->iv, 12);
1168	*((__be32 *)(iv+12)) = counter;
1169
1170	return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1171			      aes_ctx);
1172}
1173
1174static int generic_gcmaes_decrypt(struct aead_request *req)
1175{
1176	__be32 counter = cpu_to_be32(1);
1177	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1178	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1179	void *aes_ctx = &(ctx->aes_key_expanded);
1180	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1181	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1182
1183	memcpy(iv, req->iv, 12);
1184	*((__be32 *)(iv+12)) = counter;
1185
1186	return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1187			      aes_ctx);
1188}
1189
1190static struct aead_alg aesni_aeads[] = { {
1191	.setkey			= common_rfc4106_set_key,
1192	.setauthsize		= common_rfc4106_set_authsize,
1193	.encrypt		= helper_rfc4106_encrypt,
1194	.decrypt		= helper_rfc4106_decrypt,
1195	.ivsize			= GCM_RFC4106_IV_SIZE,
1196	.maxauthsize		= 16,
1197	.base = {
1198		.cra_name		= "__rfc4106(gcm(aes))",
1199		.cra_driver_name	= "__rfc4106-gcm-aesni",
1200		.cra_priority		= 400,
1201		.cra_flags		= CRYPTO_ALG_INTERNAL,
1202		.cra_blocksize		= 1,
1203		.cra_ctxsize		= sizeof(struct aesni_rfc4106_gcm_ctx),
1204		.cra_alignmask		= 0,
1205		.cra_module		= THIS_MODULE,
1206	},
1207}, {
1208	.setkey			= generic_gcmaes_set_key,
1209	.setauthsize		= generic_gcmaes_set_authsize,
1210	.encrypt		= generic_gcmaes_encrypt,
1211	.decrypt		= generic_gcmaes_decrypt,
1212	.ivsize			= GCM_AES_IV_SIZE,
1213	.maxauthsize		= 16,
1214	.base = {
1215		.cra_name		= "__gcm(aes)",
1216		.cra_driver_name	= "__generic-gcm-aesni",
1217		.cra_priority		= 400,
1218		.cra_flags		= CRYPTO_ALG_INTERNAL,
1219		.cra_blocksize		= 1,
1220		.cra_ctxsize		= sizeof(struct generic_gcmaes_ctx),
1221		.cra_alignmask		= 0,
1222		.cra_module		= THIS_MODULE,
1223	},
1224} };
1225#else
1226static struct aead_alg aesni_aeads[0];
1227#endif
1228
1229static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
1230
1231static const struct x86_cpu_id aesni_cpu_id[] = {
1232	X86_MATCH_FEATURE(X86_FEATURE_AES, NULL),
1233	{}
1234};
1235MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1236
1237static int __init aesni_init(void)
1238{
1239	int err;
1240
1241	if (!x86_match_cpu(aesni_cpu_id))
1242		return -ENODEV;
1243#ifdef CONFIG_X86_64
 
1244	if (boot_cpu_has(X86_FEATURE_AVX2)) {
1245		pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1246		static_branch_enable(&gcm_use_avx);
1247		static_branch_enable(&gcm_use_avx2);
1248	} else
 
 
1249	if (boot_cpu_has(X86_FEATURE_AVX)) {
1250		pr_info("AVX version of gcm_enc/dec engaged.\n");
1251		static_branch_enable(&gcm_use_avx);
1252	} else {
 
 
1253		pr_info("SSE version of gcm_enc/dec engaged.\n");
 
1254	}
 
 
1255	if (boot_cpu_has(X86_FEATURE_AVX)) {
1256		/* optimize performance of ctr mode encryption transform */
1257		static_call_update(aesni_ctr_enc_tfm, aesni_ctr_enc_avx_tfm);
1258		pr_info("AES CTR mode by8 optimization enabled\n");
1259	}
1260#endif /* CONFIG_X86_64 */
 
1261
1262	err = crypto_register_alg(&aesni_cipher_alg);
1263	if (err)
1264		return err;
1265
1266	err = simd_register_skciphers_compat(aesni_skciphers,
1267					     ARRAY_SIZE(aesni_skciphers),
1268					     aesni_simd_skciphers);
1269	if (err)
1270		goto unregister_cipher;
1271
1272	err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1273					 aesni_simd_aeads);
1274	if (err)
1275		goto unregister_skciphers;
1276
1277#ifdef CONFIG_X86_64
1278	if (boot_cpu_has(X86_FEATURE_AVX))
1279		err = simd_register_skciphers_compat(&aesni_xctr, 1,
1280						     &aesni_simd_xctr);
1281	if (err)
1282		goto unregister_aeads;
1283#endif /* CONFIG_X86_64 */
1284
1285	return 0;
1286
1287#ifdef CONFIG_X86_64
1288unregister_aeads:
1289	simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1290				aesni_simd_aeads);
1291#endif /* CONFIG_X86_64 */
1292
1293unregister_skciphers:
1294	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1295				  aesni_simd_skciphers);
1296unregister_cipher:
1297	crypto_unregister_alg(&aesni_cipher_alg);
1298	return err;
1299}
1300
1301static void __exit aesni_exit(void)
1302{
1303	simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1304			      aesni_simd_aeads);
1305	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1306				  aesni_simd_skciphers);
1307	crypto_unregister_alg(&aesni_cipher_alg);
1308#ifdef CONFIG_X86_64
1309	if (boot_cpu_has(X86_FEATURE_AVX))
1310		simd_unregister_skciphers(&aesni_xctr, 1, &aesni_simd_xctr);
1311#endif /* CONFIG_X86_64 */
1312}
1313
1314late_initcall(aesni_init);
1315module_exit(aesni_exit);
1316
1317MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1318MODULE_LICENSE("GPL");
1319MODULE_ALIAS_CRYPTO("aes");
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Support for Intel AES-NI instructions. This file contains glue
   4 * code, the real AES implementation is in intel-aes_asm.S.
   5 *
   6 * Copyright (C) 2008, Intel Corp.
   7 *    Author: Huang Ying <ying.huang@intel.com>
   8 *
   9 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
  10 * interface for 64-bit kernels.
  11 *    Authors: Adrian Hoban <adrian.hoban@intel.com>
  12 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  13 *             Tadeusz Struk (tadeusz.struk@intel.com)
  14 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  15 *    Copyright (c) 2010, Intel Corporation.
  16 */
  17
  18#include <linux/hardirq.h>
  19#include <linux/types.h>
  20#include <linux/module.h>
  21#include <linux/err.h>
  22#include <crypto/algapi.h>
  23#include <crypto/aes.h>
  24#include <crypto/ctr.h>
  25#include <crypto/b128ops.h>
  26#include <crypto/gcm.h>
  27#include <crypto/xts.h>
  28#include <asm/cpu_device_id.h>
  29#include <asm/simd.h>
  30#include <crypto/scatterwalk.h>
  31#include <crypto/internal/aead.h>
  32#include <crypto/internal/simd.h>
  33#include <crypto/internal/skcipher.h>
 
  34#include <linux/workqueue.h>
  35#include <linux/spinlock.h>
  36#ifdef CONFIG_X86_64
  37#include <asm/crypto/glue_helper.h>
  38#endif
  39
  40
  41#define AESNI_ALIGN	16
  42#define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
  43#define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE - 1))
  44#define RFC4106_HASH_SUBKEY_SIZE 16
  45#define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
  46#define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
  47#define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
  48
  49/* This data is stored at the end of the crypto_tfm struct.
  50 * It's a type of per "session" data storage location.
  51 * This needs to be 16 byte aligned.
  52 */
  53struct aesni_rfc4106_gcm_ctx {
  54	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
  55	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
  56	u8 nonce[4];
  57};
  58
  59struct generic_gcmaes_ctx {
  60	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
  61	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
  62};
  63
  64struct aesni_xts_ctx {
  65	u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
  66	u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
  67};
  68
  69#define GCM_BLOCK_LEN 16
  70
  71struct gcm_context_data {
  72	/* init, update and finalize context data */
  73	u8 aad_hash[GCM_BLOCK_LEN];
  74	u64 aad_length;
  75	u64 in_length;
  76	u8 partial_block_enc_key[GCM_BLOCK_LEN];
  77	u8 orig_IV[GCM_BLOCK_LEN];
  78	u8 current_counter[GCM_BLOCK_LEN];
  79	u64 partial_block_len;
  80	u64 unused;
  81	u8 hash_keys[GCM_BLOCK_LEN * 16];
  82};
  83
  84asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
  85			     unsigned int key_len);
  86asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
  87			  const u8 *in);
  88asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
  89			  const u8 *in);
  90asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
  91			      const u8 *in, unsigned int len);
  92asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
  93			      const u8 *in, unsigned int len);
  94asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
  95			      const u8 *in, unsigned int len, u8 *iv);
  96asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
  97			      const u8 *in, unsigned int len, u8 *iv);
 
 
 
 
  98
  99#define AVX_GEN2_OPTSIZE 640
 100#define AVX_GEN4_OPTSIZE 4096
 101
 
 
 
 
 
 
 102#ifdef CONFIG_X86_64
 103
 104static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
 105			      const u8 *in, unsigned int len, u8 *iv);
 106asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
 107			      const u8 *in, unsigned int len, u8 *iv);
 108
 109asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
 110				 const u8 *in, bool enc, u8 *iv);
 111
 112/* asmlinkage void aesni_gcm_enc()
 113 * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
 114 * struct gcm_context_data.  May be uninitialized.
 115 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
 116 * const u8 *in, Plaintext input
 117 * unsigned long plaintext_len, Length of data in bytes for encryption.
 118 * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
 119 *         16-byte aligned pointer.
 120 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
 121 * const u8 *aad, Additional Authentication Data (AAD)
 122 * unsigned long aad_len, Length of AAD in bytes.
 123 * u8 *auth_tag, Authenticated Tag output.
 124 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
 125 *          Valid values are 16 (most likely), 12 or 8.
 126 */
 127asmlinkage void aesni_gcm_enc(void *ctx,
 128			struct gcm_context_data *gdata, u8 *out,
 129			const u8 *in, unsigned long plaintext_len, u8 *iv,
 130			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 131			u8 *auth_tag, unsigned long auth_tag_len);
 132
 133/* asmlinkage void aesni_gcm_dec()
 134 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
 135 * struct gcm_context_data.  May be uninitialized.
 136 * u8 *out, Plaintext output. Decrypt in-place is allowed.
 137 * const u8 *in, Ciphertext input
 138 * unsigned long ciphertext_len, Length of data in bytes for decryption.
 139 * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
 140 *         16-byte aligned pointer.
 141 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
 142 * const u8 *aad, Additional Authentication Data (AAD)
 143 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
 144 * to be 8 or 12 bytes
 145 * u8 *auth_tag, Authenticated Tag output.
 146 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
 147 * Valid values are 16 (most likely), 12 or 8.
 148 */
 149asmlinkage void aesni_gcm_dec(void *ctx,
 150			struct gcm_context_data *gdata, u8 *out,
 151			const u8 *in, unsigned long ciphertext_len, u8 *iv,
 152			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 153			u8 *auth_tag, unsigned long auth_tag_len);
 154
 155/* Scatter / Gather routines, with args similar to above */
 156asmlinkage void aesni_gcm_init(void *ctx,
 157			       struct gcm_context_data *gdata,
 158			       u8 *iv,
 159			       u8 *hash_subkey, const u8 *aad,
 160			       unsigned long aad_len);
 161asmlinkage void aesni_gcm_enc_update(void *ctx,
 162				     struct gcm_context_data *gdata, u8 *out,
 163				     const u8 *in, unsigned long plaintext_len);
 164asmlinkage void aesni_gcm_dec_update(void *ctx,
 165				     struct gcm_context_data *gdata, u8 *out,
 166				     const u8 *in,
 167				     unsigned long ciphertext_len);
 168asmlinkage void aesni_gcm_finalize(void *ctx,
 169				   struct gcm_context_data *gdata,
 170				   u8 *auth_tag, unsigned long auth_tag_len);
 171
 172static const struct aesni_gcm_tfm_s {
 173	void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv,
 174		     u8 *hash_subkey, const u8 *aad, unsigned long aad_len);
 175	void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
 176			   const u8 *in, unsigned long plaintext_len);
 177	void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
 178			   const u8 *in, unsigned long ciphertext_len);
 179	void (*finalize)(void *ctx, struct gcm_context_data *gdata,
 180			 u8 *auth_tag, unsigned long auth_tag_len);
 181} *aesni_gcm_tfm;
 182
 183static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
 184	.init = &aesni_gcm_init,
 185	.enc_update = &aesni_gcm_enc_update,
 186	.dec_update = &aesni_gcm_dec_update,
 187	.finalize = &aesni_gcm_finalize,
 188};
 189
 190#ifdef CONFIG_AS_AVX
 191asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
 192		void *keys, u8 *out, unsigned int num_bytes);
 193asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
 194		void *keys, u8 *out, unsigned int num_bytes);
 195asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
 196		void *keys, u8 *out, unsigned int num_bytes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 197/*
 198 * asmlinkage void aesni_gcm_init_avx_gen2()
 199 * gcm_data *my_ctx_data, context data
 200 * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
 201 */
 202asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
 203					struct gcm_context_data *gdata,
 204					u8 *iv,
 205					u8 *hash_subkey,
 206					const u8 *aad,
 207					unsigned long aad_len);
 208
 209asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
 210				     struct gcm_context_data *gdata, u8 *out,
 211				     const u8 *in, unsigned long plaintext_len);
 212asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
 213				     struct gcm_context_data *gdata, u8 *out,
 214				     const u8 *in,
 215				     unsigned long ciphertext_len);
 216asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
 217				   struct gcm_context_data *gdata,
 218				   u8 *auth_tag, unsigned long auth_tag_len);
 219
 220asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx,
 221				struct gcm_context_data *gdata, u8 *out,
 222			const u8 *in, unsigned long plaintext_len, u8 *iv,
 223			const u8 *aad, unsigned long aad_len,
 224			u8 *auth_tag, unsigned long auth_tag_len);
 225
 226asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx,
 227				struct gcm_context_data *gdata, u8 *out,
 228			const u8 *in, unsigned long ciphertext_len, u8 *iv,
 229			const u8 *aad, unsigned long aad_len,
 230			u8 *auth_tag, unsigned long auth_tag_len);
 231
 232static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
 233	.init = &aesni_gcm_init_avx_gen2,
 234	.enc_update = &aesni_gcm_enc_update_avx_gen2,
 235	.dec_update = &aesni_gcm_dec_update_avx_gen2,
 236	.finalize = &aesni_gcm_finalize_avx_gen2,
 237};
 238
 239#endif
 240
 241#ifdef CONFIG_AS_AVX2
 242/*
 243 * asmlinkage void aesni_gcm_init_avx_gen4()
 244 * gcm_data *my_ctx_data, context data
 245 * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
 246 */
 247asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
 248					struct gcm_context_data *gdata,
 249					u8 *iv,
 250					u8 *hash_subkey,
 251					const u8 *aad,
 252					unsigned long aad_len);
 253
 254asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
 255				     struct gcm_context_data *gdata, u8 *out,
 256				     const u8 *in, unsigned long plaintext_len);
 257asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
 258				     struct gcm_context_data *gdata, u8 *out,
 259				     const u8 *in,
 260				     unsigned long ciphertext_len);
 261asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
 262				   struct gcm_context_data *gdata,
 263				   u8 *auth_tag, unsigned long auth_tag_len);
 264
 265asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx,
 266				struct gcm_context_data *gdata, u8 *out,
 267			const u8 *in, unsigned long plaintext_len, u8 *iv,
 268			const u8 *aad, unsigned long aad_len,
 269			u8 *auth_tag, unsigned long auth_tag_len);
 270
 271asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx,
 272				struct gcm_context_data *gdata, u8 *out,
 273			const u8 *in, unsigned long ciphertext_len, u8 *iv,
 274			const u8 *aad, unsigned long aad_len,
 275			u8 *auth_tag, unsigned long auth_tag_len);
 276
 277static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
 278	.init = &aesni_gcm_init_avx_gen4,
 279	.enc_update = &aesni_gcm_enc_update_avx_gen4,
 280	.dec_update = &aesni_gcm_dec_update_avx_gen4,
 281	.finalize = &aesni_gcm_finalize_avx_gen4,
 282};
 283
 284#endif
 285
 286static inline struct
 287aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
 288{
 289	unsigned long align = AESNI_ALIGN;
 290
 291	if (align <= crypto_tfm_ctx_alignment())
 292		align = 1;
 293	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
 294}
 295
 296static inline struct
 297generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
 298{
 299	unsigned long align = AESNI_ALIGN;
 300
 301	if (align <= crypto_tfm_ctx_alignment())
 302		align = 1;
 303	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
 304}
 305#endif
 306
 307static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
 308{
 309	unsigned long addr = (unsigned long)raw_ctx;
 310	unsigned long align = AESNI_ALIGN;
 311
 312	if (align <= crypto_tfm_ctx_alignment())
 313		align = 1;
 314	return (struct crypto_aes_ctx *)ALIGN(addr, align);
 315}
 316
 317static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
 318			      const u8 *in_key, unsigned int key_len)
 319{
 320	struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
 321	u32 *flags = &tfm->crt_flags;
 322	int err;
 323
 324	if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
 325	    key_len != AES_KEYSIZE_256) {
 326		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 327		return -EINVAL;
 328	}
 329
 330	if (!crypto_simd_usable())
 331		err = aes_expandkey(ctx, in_key, key_len);
 332	else {
 333		kernel_fpu_begin();
 334		err = aesni_set_key(ctx, in_key, key_len);
 335		kernel_fpu_end();
 336	}
 337
 338	return err;
 339}
 340
 341static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 342		       unsigned int key_len)
 343{
 344	return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
 345}
 346
 347static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 348{
 349	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 350
 351	if (!crypto_simd_usable()) {
 352		aes_encrypt(ctx, dst, src);
 353	} else {
 354		kernel_fpu_begin();
 355		aesni_enc(ctx, dst, src);
 356		kernel_fpu_end();
 357	}
 358}
 359
 360static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 361{
 362	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 363
 364	if (!crypto_simd_usable()) {
 365		aes_decrypt(ctx, dst, src);
 366	} else {
 367		kernel_fpu_begin();
 368		aesni_dec(ctx, dst, src);
 369		kernel_fpu_end();
 370	}
 371}
 372
 373static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
 374			         unsigned int len)
 375{
 376	return aes_set_key_common(crypto_skcipher_tfm(tfm),
 377				  crypto_skcipher_ctx(tfm), key, len);
 378}
 379
 380static int ecb_encrypt(struct skcipher_request *req)
 381{
 382	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 383	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 384	struct skcipher_walk walk;
 385	unsigned int nbytes;
 386	int err;
 387
 388	err = skcipher_walk_virt(&walk, req, true);
 389
 390	kernel_fpu_begin();
 391	while ((nbytes = walk.nbytes)) {
 
 392		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 393			      nbytes & AES_BLOCK_MASK);
 
 394		nbytes &= AES_BLOCK_SIZE - 1;
 395		err = skcipher_walk_done(&walk, nbytes);
 396	}
 397	kernel_fpu_end();
 398
 399	return err;
 400}
 401
 402static int ecb_decrypt(struct skcipher_request *req)
 403{
 404	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 405	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 406	struct skcipher_walk walk;
 407	unsigned int nbytes;
 408	int err;
 409
 410	err = skcipher_walk_virt(&walk, req, true);
 411
 412	kernel_fpu_begin();
 413	while ((nbytes = walk.nbytes)) {
 
 414		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 415			      nbytes & AES_BLOCK_MASK);
 
 416		nbytes &= AES_BLOCK_SIZE - 1;
 417		err = skcipher_walk_done(&walk, nbytes);
 418	}
 419	kernel_fpu_end();
 420
 421	return err;
 422}
 423
 424static int cbc_encrypt(struct skcipher_request *req)
 425{
 426	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 427	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 428	struct skcipher_walk walk;
 429	unsigned int nbytes;
 430	int err;
 431
 432	err = skcipher_walk_virt(&walk, req, true);
 433
 434	kernel_fpu_begin();
 435	while ((nbytes = walk.nbytes)) {
 
 436		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 437			      nbytes & AES_BLOCK_MASK, walk.iv);
 
 438		nbytes &= AES_BLOCK_SIZE - 1;
 439		err = skcipher_walk_done(&walk, nbytes);
 440	}
 441	kernel_fpu_end();
 442
 443	return err;
 444}
 445
 446static int cbc_decrypt(struct skcipher_request *req)
 447{
 448	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 449	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 450	struct skcipher_walk walk;
 451	unsigned int nbytes;
 452	int err;
 453
 454	err = skcipher_walk_virt(&walk, req, true);
 455
 456	kernel_fpu_begin();
 457	while ((nbytes = walk.nbytes)) {
 
 458		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 459			      nbytes & AES_BLOCK_MASK, walk.iv);
 
 460		nbytes &= AES_BLOCK_SIZE - 1;
 461		err = skcipher_walk_done(&walk, nbytes);
 462	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 463	kernel_fpu_end();
 464
 465	return err;
 466}
 467
 468#ifdef CONFIG_X86_64
 469static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
 470			    struct skcipher_walk *walk)
 471{
 472	u8 *ctrblk = walk->iv;
 473	u8 keystream[AES_BLOCK_SIZE];
 474	u8 *src = walk->src.virt.addr;
 475	u8 *dst = walk->dst.virt.addr;
 476	unsigned int nbytes = walk->nbytes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 477
 478	aesni_enc(ctx, keystream, ctrblk);
 479	crypto_xor_cpy(dst, keystream, src, nbytes);
 
 
 480
 481	crypto_inc(ctrblk, AES_BLOCK_SIZE);
 482}
 483
 484#ifdef CONFIG_AS_AVX
 485static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
 486			      const u8 *in, unsigned int len, u8 *iv)
 487{
 488	/*
 489	 * based on key length, override with the by8 version
 490	 * of ctr mode encryption/decryption for improved performance
 491	 * aes_set_key_common() ensures that key length is one of
 492	 * {128,192,256}
 493	 */
 494	if (ctx->key_length == AES_KEYSIZE_128)
 495		aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
 496	else if (ctx->key_length == AES_KEYSIZE_192)
 497		aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
 498	else
 499		aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
 500}
 501#endif
 502
 503static int ctr_crypt(struct skcipher_request *req)
 504{
 505	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 506	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 
 507	struct skcipher_walk walk;
 508	unsigned int nbytes;
 509	int err;
 510
 511	err = skcipher_walk_virt(&walk, req, true);
 512
 513	kernel_fpu_begin();
 514	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
 515		aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 516			              nbytes & AES_BLOCK_MASK, walk.iv);
 517		nbytes &= AES_BLOCK_SIZE - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 518		err = skcipher_walk_done(&walk, nbytes);
 519	}
 520	if (walk.nbytes) {
 521		ctr_crypt_final(ctx, &walk);
 522		err = skcipher_walk_done(&walk, 0);
 523	}
 524	kernel_fpu_end();
 525
 526	return err;
 527}
 528
 529static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
 530			    unsigned int keylen)
 
 531{
 532	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 533	int err;
 534
 535	err = xts_verify_key(tfm, key, keylen);
 536	if (err)
 537		return err;
 538
 539	keylen /= 2;
 540
 541	/* first half of xts-key is for crypt */
 542	err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
 543				 key, keylen);
 544	if (err)
 545		return err;
 546
 547	/* second half of xts-key is for tweak */
 548	return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
 549				  key + keylen, keylen);
 550}
 551
 552
 553static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
 554{
 555	aesni_enc(ctx, out, in);
 556}
 557
 558static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
 559{
 560	glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
 561}
 562
 563static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
 564{
 565	glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
 566}
 567
 568static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
 569{
 570	aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
 571}
 572
 573static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
 574{
 575	aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
 576}
 577
 578static const struct common_glue_ctx aesni_enc_xts = {
 579	.num_funcs = 2,
 580	.fpu_blocks_limit = 1,
 581
 582	.funcs = { {
 583		.num_blocks = 8,
 584		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
 585	}, {
 586		.num_blocks = 1,
 587		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
 588	} }
 589};
 590
 591static const struct common_glue_ctx aesni_dec_xts = {
 592	.num_funcs = 2,
 593	.fpu_blocks_limit = 1,
 594
 595	.funcs = { {
 596		.num_blocks = 8,
 597		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
 598	}, {
 599		.num_blocks = 1,
 600		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
 601	} }
 602};
 603
 604static int xts_encrypt(struct skcipher_request *req)
 605{
 606	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 607	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
 
 
 
 
 
 608
 609	return glue_xts_req_128bit(&aesni_enc_xts, req,
 610				   XTS_TWEAK_CAST(aesni_xts_tweak),
 611				   aes_ctx(ctx->raw_tweak_ctx),
 612				   aes_ctx(ctx->raw_crypt_ctx),
 613				   false);
 614}
 615
 616static int xts_decrypt(struct skcipher_request *req)
 617{
 618	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 619	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 620
 621	return glue_xts_req_128bit(&aesni_dec_xts, req,
 622				   XTS_TWEAK_CAST(aesni_xts_tweak),
 623				   aes_ctx(ctx->raw_tweak_ctx),
 624				   aes_ctx(ctx->raw_crypt_ctx),
 625				   true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 626}
 627
 628static int
 629rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
 630{
 631	struct crypto_aes_ctx ctx;
 632	int ret;
 633
 634	ret = aes_expandkey(&ctx, key, key_len);
 635	if (ret)
 636		return ret;
 637
 638	/* Clear the data in the hash sub key container to zero.*/
 639	/* We want to cipher all zeros to create the hash sub key. */
 640	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
 641
 642	aes_encrypt(&ctx, hash_subkey, hash_subkey);
 643
 644	memzero_explicit(&ctx, sizeof(ctx));
 645	return 0;
 646}
 647
 648static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
 649				  unsigned int key_len)
 650{
 651	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
 652
 653	if (key_len < 4) {
 654		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
 655		return -EINVAL;
 656	}
 657	/*Account for 4 byte nonce at the end.*/
 658	key_len -= 4;
 659
 660	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
 661
 662	return aes_set_key_common(crypto_aead_tfm(aead),
 663				  &ctx->aes_key_expanded, key, key_len) ?:
 664	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
 665}
 666
 667/* This is the Integrity Check Value (aka the authentication tag) length and can
 668 * be 8, 12 or 16 bytes long. */
 669static int common_rfc4106_set_authsize(struct crypto_aead *aead,
 670				       unsigned int authsize)
 671{
 672	switch (authsize) {
 673	case 8:
 674	case 12:
 675	case 16:
 676		break;
 677	default:
 678		return -EINVAL;
 679	}
 680
 681	return 0;
 682}
 683
 684static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
 685				       unsigned int authsize)
 686{
 687	switch (authsize) {
 688	case 4:
 689	case 8:
 690	case 12:
 691	case 13:
 692	case 14:
 693	case 15:
 694	case 16:
 695		break;
 696	default:
 697		return -EINVAL;
 698	}
 699
 700	return 0;
 701}
 702
 703static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
 704			      unsigned int assoclen, u8 *hash_subkey,
 705			      u8 *iv, void *aes_ctx)
 
 706{
 707	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 708	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
 709	const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
 710	struct gcm_context_data data AESNI_ALIGN_ATTR;
 711	struct scatter_walk dst_sg_walk = {};
 712	unsigned long left = req->cryptlen;
 713	unsigned long len, srclen, dstlen;
 714	struct scatter_walk assoc_sg_walk;
 715	struct scatter_walk src_sg_walk;
 716	struct scatterlist src_start[2];
 717	struct scatterlist dst_start[2];
 718	struct scatterlist *src_sg;
 719	struct scatterlist *dst_sg;
 720	u8 *src, *dst, *assoc;
 721	u8 *assocmem = NULL;
 722	u8 authTag[16];
 
 723
 724	if (!enc)
 725		left -= auth_tag_len;
 726
 727#ifdef CONFIG_AS_AVX2
 728	if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4)
 729		gcm_tfm = &aesni_gcm_tfm_avx_gen2;
 730#endif
 731#ifdef CONFIG_AS_AVX
 732	if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2)
 733		gcm_tfm = &aesni_gcm_tfm_sse;
 734#endif
 735
 736	/* Linearize assoc, if not already linear */
 737	if (req->src->length >= assoclen && req->src->length &&
 738		(!PageHighMem(sg_page(req->src)) ||
 739			req->src->offset + req->src->length <= PAGE_SIZE)) {
 740		scatterwalk_start(&assoc_sg_walk, req->src);
 741		assoc = scatterwalk_map(&assoc_sg_walk);
 742	} else {
 
 
 
 743		/* assoc can be any length, so must be on heap */
 744		assocmem = kmalloc(assoclen, GFP_ATOMIC);
 745		if (unlikely(!assocmem))
 746			return -ENOMEM;
 747		assoc = assocmem;
 748
 749		scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
 750	}
 751
 752	if (left) {
 753		src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
 754		scatterwalk_start(&src_sg_walk, src_sg);
 755		if (req->src != req->dst) {
 756			dst_sg = scatterwalk_ffwd(dst_start, req->dst,
 757						  req->assoclen);
 758			scatterwalk_start(&dst_sg_walk, dst_sg);
 759		}
 760	}
 761
 762	kernel_fpu_begin();
 763	gcm_tfm->init(aes_ctx, &data, iv,
 764		hash_subkey, assoc, assoclen);
 765	if (req->src != req->dst) {
 766		while (left) {
 767			src = scatterwalk_map(&src_sg_walk);
 768			dst = scatterwalk_map(&dst_sg_walk);
 769			srclen = scatterwalk_clamp(&src_sg_walk, left);
 770			dstlen = scatterwalk_clamp(&dst_sg_walk, left);
 771			len = min(srclen, dstlen);
 772			if (len) {
 773				if (enc)
 774					gcm_tfm->enc_update(aes_ctx, &data,
 775							     dst, src, len);
 776				else
 777					gcm_tfm->dec_update(aes_ctx, &data,
 778							     dst, src, len);
 779			}
 780			left -= len;
 781
 782			scatterwalk_unmap(src);
 783			scatterwalk_unmap(dst);
 784			scatterwalk_advance(&src_sg_walk, len);
 785			scatterwalk_advance(&dst_sg_walk, len);
 786			scatterwalk_done(&src_sg_walk, 0, left);
 787			scatterwalk_done(&dst_sg_walk, 1, left);
 788		}
 789	} else {
 790		while (left) {
 791			dst = src = scatterwalk_map(&src_sg_walk);
 792			len = scatterwalk_clamp(&src_sg_walk, left);
 793			if (len) {
 794				if (enc)
 795					gcm_tfm->enc_update(aes_ctx, &data,
 796							     src, src, len);
 797				else
 798					gcm_tfm->dec_update(aes_ctx, &data,
 799							     src, src, len);
 800			}
 801			left -= len;
 802			scatterwalk_unmap(src);
 803			scatterwalk_advance(&src_sg_walk, len);
 804			scatterwalk_done(&src_sg_walk, 1, left);
 805		}
 806	}
 807	gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len);
 808	kernel_fpu_end();
 809
 810	if (!assocmem)
 811		scatterwalk_unmap(assoc);
 812	else
 813		kfree(assocmem);
 814
 815	if (!enc) {
 816		u8 authTagMsg[16];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 817
 818		/* Copy out original authTag */
 819		scatterwalk_map_and_copy(authTagMsg, req->src,
 820					 req->assoclen + req->cryptlen -
 821					 auth_tag_len,
 822					 auth_tag_len, 0);
 823
 824		/* Compare generated tag with passed in tag. */
 825		return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
 826			-EBADMSG : 0;
 827	}
 828
 829	/* Copy in the authTag */
 830	scatterwalk_map_and_copy(authTag, req->dst,
 831				 req->assoclen + req->cryptlen,
 832				 auth_tag_len, 1);
 
 
 
 
 
 
 
 
 
 833
 834	return 0;
 835}
 836
 837static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
 838			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
 839{
 840	return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
 841				aes_ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 842}
 843
 844static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
 845			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
 846{
 847	return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
 848				aes_ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 849}
 850
 851static int helper_rfc4106_encrypt(struct aead_request *req)
 852{
 853	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 854	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 855	void *aes_ctx = &(ctx->aes_key_expanded);
 856	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
 
 857	unsigned int i;
 858	__be32 counter = cpu_to_be32(1);
 859
 860	/* Assuming we are supporting rfc4106 64-bit extended */
 861	/* sequence numbers We need to have the AAD length equal */
 862	/* to 16 or 20 bytes */
 863	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
 864		return -EINVAL;
 865
 866	/* IV below built */
 867	for (i = 0; i < 4; i++)
 868		*(iv+i) = ctx->nonce[i];
 869	for (i = 0; i < 8; i++)
 870		*(iv+4+i) = req->iv[i];
 871	*((__be32 *)(iv+12)) = counter;
 872
 873	return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
 874			      aes_ctx);
 875}
 876
 877static int helper_rfc4106_decrypt(struct aead_request *req)
 878{
 879	__be32 counter = cpu_to_be32(1);
 880	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 881	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 882	void *aes_ctx = &(ctx->aes_key_expanded);
 883	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
 
 884	unsigned int i;
 885
 886	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
 887		return -EINVAL;
 888
 889	/* Assuming we are supporting rfc4106 64-bit extended */
 890	/* sequence numbers We need to have the AAD length */
 891	/* equal to 16 or 20 bytes */
 892
 893	/* IV below built */
 894	for (i = 0; i < 4; i++)
 895		*(iv+i) = ctx->nonce[i];
 896	for (i = 0; i < 8; i++)
 897		*(iv+4+i) = req->iv[i];
 898	*((__be32 *)(iv+12)) = counter;
 899
 900	return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
 901			      aes_ctx);
 902}
 903#endif
 904
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 905static struct crypto_alg aesni_cipher_alg = {
 906	.cra_name		= "aes",
 907	.cra_driver_name	= "aes-aesni",
 908	.cra_priority		= 300,
 909	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
 910	.cra_blocksize		= AES_BLOCK_SIZE,
 911	.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
 912	.cra_module		= THIS_MODULE,
 913	.cra_u	= {
 914		.cipher	= {
 915			.cia_min_keysize	= AES_MIN_KEY_SIZE,
 916			.cia_max_keysize	= AES_MAX_KEY_SIZE,
 917			.cia_setkey		= aes_set_key,
 918			.cia_encrypt		= aesni_encrypt,
 919			.cia_decrypt		= aesni_decrypt
 920		}
 921	}
 922};
 923
 924static struct skcipher_alg aesni_skciphers[] = {
 925	{
 926		.base = {
 927			.cra_name		= "__ecb(aes)",
 928			.cra_driver_name	= "__ecb-aes-aesni",
 929			.cra_priority		= 400,
 930			.cra_flags		= CRYPTO_ALG_INTERNAL,
 931			.cra_blocksize		= AES_BLOCK_SIZE,
 932			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
 933			.cra_module		= THIS_MODULE,
 934		},
 935		.min_keysize	= AES_MIN_KEY_SIZE,
 936		.max_keysize	= AES_MAX_KEY_SIZE,
 937		.setkey		= aesni_skcipher_setkey,
 938		.encrypt	= ecb_encrypt,
 939		.decrypt	= ecb_decrypt,
 940	}, {
 941		.base = {
 942			.cra_name		= "__cbc(aes)",
 943			.cra_driver_name	= "__cbc-aes-aesni",
 944			.cra_priority		= 400,
 945			.cra_flags		= CRYPTO_ALG_INTERNAL,
 946			.cra_blocksize		= AES_BLOCK_SIZE,
 947			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
 948			.cra_module		= THIS_MODULE,
 949		},
 950		.min_keysize	= AES_MIN_KEY_SIZE,
 951		.max_keysize	= AES_MAX_KEY_SIZE,
 952		.ivsize		= AES_BLOCK_SIZE,
 953		.setkey		= aesni_skcipher_setkey,
 954		.encrypt	= cbc_encrypt,
 955		.decrypt	= cbc_decrypt,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 956#ifdef CONFIG_X86_64
 957	}, {
 958		.base = {
 959			.cra_name		= "__ctr(aes)",
 960			.cra_driver_name	= "__ctr-aes-aesni",
 961			.cra_priority		= 400,
 962			.cra_flags		= CRYPTO_ALG_INTERNAL,
 963			.cra_blocksize		= 1,
 964			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
 965			.cra_module		= THIS_MODULE,
 966		},
 967		.min_keysize	= AES_MIN_KEY_SIZE,
 968		.max_keysize	= AES_MAX_KEY_SIZE,
 969		.ivsize		= AES_BLOCK_SIZE,
 970		.chunksize	= AES_BLOCK_SIZE,
 971		.setkey		= aesni_skcipher_setkey,
 972		.encrypt	= ctr_crypt,
 973		.decrypt	= ctr_crypt,
 
 974	}, {
 975		.base = {
 976			.cra_name		= "__xts(aes)",
 977			.cra_driver_name	= "__xts-aes-aesni",
 978			.cra_priority		= 401,
 979			.cra_flags		= CRYPTO_ALG_INTERNAL,
 980			.cra_blocksize		= AES_BLOCK_SIZE,
 981			.cra_ctxsize		= XTS_AES_CTX_SIZE,
 982			.cra_module		= THIS_MODULE,
 983		},
 984		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
 985		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
 986		.ivsize		= AES_BLOCK_SIZE,
 
 987		.setkey		= xts_aesni_setkey,
 988		.encrypt	= xts_encrypt,
 989		.decrypt	= xts_decrypt,
 990#endif
 991	}
 992};
 993
 994static
 995struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
 996
 997#ifdef CONFIG_X86_64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 998static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
 999				  unsigned int key_len)
1000{
1001	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
1002
1003	return aes_set_key_common(crypto_aead_tfm(aead),
1004				  &ctx->aes_key_expanded, key, key_len) ?:
1005	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1006}
1007
1008static int generic_gcmaes_encrypt(struct aead_request *req)
1009{
1010	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1011	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1012	void *aes_ctx = &(ctx->aes_key_expanded);
1013	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
 
1014	__be32 counter = cpu_to_be32(1);
1015
1016	memcpy(iv, req->iv, 12);
1017	*((__be32 *)(iv+12)) = counter;
1018
1019	return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1020			      aes_ctx);
1021}
1022
1023static int generic_gcmaes_decrypt(struct aead_request *req)
1024{
1025	__be32 counter = cpu_to_be32(1);
1026	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1027	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1028	void *aes_ctx = &(ctx->aes_key_expanded);
1029	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
 
1030
1031	memcpy(iv, req->iv, 12);
1032	*((__be32 *)(iv+12)) = counter;
1033
1034	return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1035			      aes_ctx);
1036}
1037
1038static struct aead_alg aesni_aeads[] = { {
1039	.setkey			= common_rfc4106_set_key,
1040	.setauthsize		= common_rfc4106_set_authsize,
1041	.encrypt		= helper_rfc4106_encrypt,
1042	.decrypt		= helper_rfc4106_decrypt,
1043	.ivsize			= GCM_RFC4106_IV_SIZE,
1044	.maxauthsize		= 16,
1045	.base = {
1046		.cra_name		= "__rfc4106(gcm(aes))",
1047		.cra_driver_name	= "__rfc4106-gcm-aesni",
1048		.cra_priority		= 400,
1049		.cra_flags		= CRYPTO_ALG_INTERNAL,
1050		.cra_blocksize		= 1,
1051		.cra_ctxsize		= sizeof(struct aesni_rfc4106_gcm_ctx),
1052		.cra_alignmask		= AESNI_ALIGN - 1,
1053		.cra_module		= THIS_MODULE,
1054	},
1055}, {
1056	.setkey			= generic_gcmaes_set_key,
1057	.setauthsize		= generic_gcmaes_set_authsize,
1058	.encrypt		= generic_gcmaes_encrypt,
1059	.decrypt		= generic_gcmaes_decrypt,
1060	.ivsize			= GCM_AES_IV_SIZE,
1061	.maxauthsize		= 16,
1062	.base = {
1063		.cra_name		= "__gcm(aes)",
1064		.cra_driver_name	= "__generic-gcm-aesni",
1065		.cra_priority		= 400,
1066		.cra_flags		= CRYPTO_ALG_INTERNAL,
1067		.cra_blocksize		= 1,
1068		.cra_ctxsize		= sizeof(struct generic_gcmaes_ctx),
1069		.cra_alignmask		= AESNI_ALIGN - 1,
1070		.cra_module		= THIS_MODULE,
1071	},
1072} };
1073#else
1074static struct aead_alg aesni_aeads[0];
1075#endif
1076
1077static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
1078
1079static const struct x86_cpu_id aesni_cpu_id[] = {
1080	X86_FEATURE_MATCH(X86_FEATURE_AES),
1081	{}
1082};
1083MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1084
1085static int __init aesni_init(void)
1086{
1087	int err;
1088
1089	if (!x86_match_cpu(aesni_cpu_id))
1090		return -ENODEV;
1091#ifdef CONFIG_X86_64
1092#ifdef CONFIG_AS_AVX2
1093	if (boot_cpu_has(X86_FEATURE_AVX2)) {
1094		pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1095		aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4;
 
1096	} else
1097#endif
1098#ifdef CONFIG_AS_AVX
1099	if (boot_cpu_has(X86_FEATURE_AVX)) {
1100		pr_info("AVX version of gcm_enc/dec engaged.\n");
1101		aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2;
1102	} else
1103#endif
1104	{
1105		pr_info("SSE version of gcm_enc/dec engaged.\n");
1106		aesni_gcm_tfm = &aesni_gcm_tfm_sse;
1107	}
1108	aesni_ctr_enc_tfm = aesni_ctr_enc;
1109#ifdef CONFIG_AS_AVX
1110	if (boot_cpu_has(X86_FEATURE_AVX)) {
1111		/* optimize performance of ctr mode encryption transform */
1112		aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1113		pr_info("AES CTR mode by8 optimization enabled\n");
1114	}
1115#endif
1116#endif
1117
1118	err = crypto_register_alg(&aesni_cipher_alg);
1119	if (err)
1120		return err;
1121
1122	err = simd_register_skciphers_compat(aesni_skciphers,
1123					     ARRAY_SIZE(aesni_skciphers),
1124					     aesni_simd_skciphers);
1125	if (err)
1126		goto unregister_cipher;
1127
1128	err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1129					 aesni_simd_aeads);
1130	if (err)
1131		goto unregister_skciphers;
1132
 
 
 
 
 
 
 
 
1133	return 0;
1134
 
 
 
 
 
 
1135unregister_skciphers:
1136	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1137				  aesni_simd_skciphers);
1138unregister_cipher:
1139	crypto_unregister_alg(&aesni_cipher_alg);
1140	return err;
1141}
1142
1143static void __exit aesni_exit(void)
1144{
1145	simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1146			      aesni_simd_aeads);
1147	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1148				  aesni_simd_skciphers);
1149	crypto_unregister_alg(&aesni_cipher_alg);
 
 
 
 
1150}
1151
1152late_initcall(aesni_init);
1153module_exit(aesni_exit);
1154
1155MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1156MODULE_LICENSE("GPL");
1157MODULE_ALIAS_CRYPTO("aes");