Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for ATMEL AES HW acceleration.
   6 *
   7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
   8 * Author: Nicolas Royer <nicolas@eukrea.com>
   9 *
 
 
 
 
  10 * Some ideas are from omap-aes.c driver.
  11 */
  12
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/err.h>
  18#include <linux/clk.h>
  19#include <linux/io.h>
  20#include <linux/hw_random.h>
  21#include <linux/platform_device.h>
  22
  23#include <linux/device.h>
  24#include <linux/dmaengine.h>
  25#include <linux/init.h>
  26#include <linux/errno.h>
  27#include <linux/interrupt.h>
  28#include <linux/irq.h>
  29#include <linux/scatterlist.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/mod_devicetable.h>
  32#include <linux/delay.h>
  33#include <linux/crypto.h>
  34#include <crypto/scatterwalk.h>
  35#include <crypto/algapi.h>
  36#include <crypto/aes.h>
  37#include <crypto/gcm.h>
  38#include <crypto/xts.h>
  39#include <crypto/internal/aead.h>
  40#include <crypto/internal/skcipher.h>
 
  41#include "atmel-aes-regs.h"
  42#include "atmel-authenc.h"
  43
  44#define ATMEL_AES_PRIORITY	300
  45
  46#define ATMEL_AES_BUFFER_ORDER	2
  47#define ATMEL_AES_BUFFER_SIZE	(PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
  48
 
 
 
 
 
  49#define SIZE_IN_WORDS(x)	((x) >> 2)
  50
  51/* AES flags */
  52/* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */
  53#define AES_FLAGS_ENCRYPT	AES_MR_CYPHER_ENC
  54#define AES_FLAGS_GTAGEN	AES_MR_GTAGEN
  55#define AES_FLAGS_OPMODE_MASK	(AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
  56#define AES_FLAGS_ECB		AES_MR_OPMOD_ECB
  57#define AES_FLAGS_CBC		AES_MR_OPMOD_CBC
 
 
 
 
 
 
  58#define AES_FLAGS_CTR		AES_MR_OPMOD_CTR
  59#define AES_FLAGS_GCM		AES_MR_OPMOD_GCM
  60#define AES_FLAGS_XTS		AES_MR_OPMOD_XTS
  61
  62#define AES_FLAGS_MODE_MASK	(AES_FLAGS_OPMODE_MASK |	\
  63				 AES_FLAGS_ENCRYPT |		\
  64				 AES_FLAGS_GTAGEN)
  65
 
  66#define AES_FLAGS_BUSY		BIT(3)
  67#define AES_FLAGS_DUMP_REG	BIT(4)
  68#define AES_FLAGS_OWN_SHA	BIT(5)
  69
  70#define AES_FLAGS_PERSISTENT	AES_FLAGS_BUSY
  71
  72#define ATMEL_AES_QUEUE_LENGTH	50
  73
  74#define ATMEL_AES_DMA_THRESHOLD		256
  75
  76
  77struct atmel_aes_caps {
  78	bool			has_dualbuff;
 
 
  79	bool			has_gcm;
  80	bool			has_xts;
  81	bool			has_authenc;
  82	u32			max_burst_size;
  83};
  84
  85struct atmel_aes_dev;
  86
  87
  88typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
  89
  90
  91struct atmel_aes_base_ctx {
  92	struct atmel_aes_dev	*dd;
  93	atmel_aes_fn_t		start;
  94	int			keylen;
  95	u32			key[AES_KEYSIZE_256 / sizeof(u32)];
  96	u16			block_size;
  97	bool			is_aead;
  98};
  99
 100struct atmel_aes_ctx {
 101	struct atmel_aes_base_ctx	base;
 102};
 103
 104struct atmel_aes_ctr_ctx {
 105	struct atmel_aes_base_ctx	base;
 106
 107	__be32			iv[AES_BLOCK_SIZE / sizeof(u32)];
 108	size_t			offset;
 109	struct scatterlist	src[2];
 110	struct scatterlist	dst[2];
 111	u32			blocks;
 112};
 113
 114struct atmel_aes_gcm_ctx {
 115	struct atmel_aes_base_ctx	base;
 116
 117	struct scatterlist	src[2];
 118	struct scatterlist	dst[2];
 119
 120	__be32			j0[AES_BLOCK_SIZE / sizeof(u32)];
 121	u32			tag[AES_BLOCK_SIZE / sizeof(u32)];
 122	__be32			ghash[AES_BLOCK_SIZE / sizeof(u32)];
 123	size_t			textlen;
 124
 125	const __be32		*ghash_in;
 126	__be32			*ghash_out;
 127	atmel_aes_fn_t		ghash_resume;
 128};
 129
 130struct atmel_aes_xts_ctx {
 131	struct atmel_aes_base_ctx	base;
 132
 133	u32			key2[AES_KEYSIZE_256 / sizeof(u32)];
 134	struct crypto_skcipher *fallback_tfm;
 135};
 136
 137#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
 138struct atmel_aes_authenc_ctx {
 139	struct atmel_aes_base_ctx	base;
 140	struct atmel_sha_authenc_ctx	*auth;
 141};
 142#endif
 143
 144struct atmel_aes_reqctx {
 145	unsigned long		mode;
 146	u8			lastc[AES_BLOCK_SIZE];
 147	struct skcipher_request fallback_req;
 148};
 149
 150#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
 151struct atmel_aes_authenc_reqctx {
 152	struct atmel_aes_reqctx	base;
 153
 154	struct scatterlist	src[2];
 155	struct scatterlist	dst[2];
 156	size_t			textlen;
 157	u32			digest[SHA512_DIGEST_SIZE / sizeof(u32)];
 158
 159	/* auth_req MUST be place last. */
 160	struct ahash_request	auth_req;
 161};
 162#endif
 163
 164struct atmel_aes_dma {
 165	struct dma_chan		*chan;
 166	struct scatterlist	*sg;
 167	int			nents;
 168	unsigned int		remainder;
 169	unsigned int		sg_len;
 170};
 171
 172struct atmel_aes_dev {
 173	struct list_head	list;
 174	unsigned long		phys_base;
 175	void __iomem		*io_base;
 176
 177	struct crypto_async_request	*areq;
 178	struct atmel_aes_base_ctx	*ctx;
 179
 180	bool			is_async;
 181	atmel_aes_fn_t		resume;
 182	atmel_aes_fn_t		cpu_transfer_complete;
 183
 184	struct device		*dev;
 185	struct clk		*iclk;
 186	int			irq;
 187
 188	unsigned long		flags;
 189
 190	spinlock_t		lock;
 191	struct crypto_queue	queue;
 192
 193	struct tasklet_struct	done_task;
 194	struct tasklet_struct	queue_task;
 195
 196	size_t			total;
 197	size_t			datalen;
 198	u32			*data;
 199
 200	struct atmel_aes_dma	src;
 201	struct atmel_aes_dma	dst;
 202
 203	size_t			buflen;
 204	void			*buf;
 205	struct scatterlist	aligned_sg;
 206	struct scatterlist	*real_dst;
 207
 208	struct atmel_aes_caps	caps;
 209
 210	u32			hw_version;
 211};
 212
 213struct atmel_aes_drv {
 214	struct list_head	dev_list;
 215	spinlock_t		lock;
 216};
 217
 218static struct atmel_aes_drv atmel_aes = {
 219	.dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
 220	.lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
 221};
 222
 223#ifdef VERBOSE_DEBUG
 224static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
 225{
 226	switch (offset) {
 227	case AES_CR:
 228		return "CR";
 229
 230	case AES_MR:
 231		return "MR";
 232
 233	case AES_ISR:
 234		return "ISR";
 235
 236	case AES_IMR:
 237		return "IMR";
 238
 239	case AES_IER:
 240		return "IER";
 241
 242	case AES_IDR:
 243		return "IDR";
 244
 245	case AES_KEYWR(0):
 246	case AES_KEYWR(1):
 247	case AES_KEYWR(2):
 248	case AES_KEYWR(3):
 249	case AES_KEYWR(4):
 250	case AES_KEYWR(5):
 251	case AES_KEYWR(6):
 252	case AES_KEYWR(7):
 253		snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2);
 254		break;
 255
 256	case AES_IDATAR(0):
 257	case AES_IDATAR(1):
 258	case AES_IDATAR(2):
 259	case AES_IDATAR(3):
 260		snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2);
 261		break;
 262
 263	case AES_ODATAR(0):
 264	case AES_ODATAR(1):
 265	case AES_ODATAR(2):
 266	case AES_ODATAR(3):
 267		snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2);
 268		break;
 269
 270	case AES_IVR(0):
 271	case AES_IVR(1):
 272	case AES_IVR(2):
 273	case AES_IVR(3):
 274		snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2);
 275		break;
 276
 277	case AES_AADLENR:
 278		return "AADLENR";
 279
 280	case AES_CLENR:
 281		return "CLENR";
 282
 283	case AES_GHASHR(0):
 284	case AES_GHASHR(1):
 285	case AES_GHASHR(2):
 286	case AES_GHASHR(3):
 287		snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2);
 288		break;
 289
 290	case AES_TAGR(0):
 291	case AES_TAGR(1):
 292	case AES_TAGR(2):
 293	case AES_TAGR(3):
 294		snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2);
 295		break;
 296
 297	case AES_CTRR:
 298		return "CTRR";
 299
 300	case AES_GCMHR(0):
 301	case AES_GCMHR(1):
 302	case AES_GCMHR(2):
 303	case AES_GCMHR(3):
 304		snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
 305		break;
 306
 307	case AES_EMR:
 308		return "EMR";
 309
 310	case AES_TWR(0):
 311	case AES_TWR(1):
 312	case AES_TWR(2):
 313	case AES_TWR(3):
 314		snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2);
 315		break;
 316
 317	case AES_ALPHAR(0):
 318	case AES_ALPHAR(1):
 319	case AES_ALPHAR(2):
 320	case AES_ALPHAR(3):
 321		snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2);
 322		break;
 323
 324	default:
 325		snprintf(tmp, sz, "0x%02x", offset);
 326		break;
 327	}
 328
 329	return tmp;
 330}
 331#endif /* VERBOSE_DEBUG */
 332
 333/* Shared functions */
 334
 335static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
 336{
 337	u32 value = readl_relaxed(dd->io_base + offset);
 338
 339#ifdef VERBOSE_DEBUG
 340	if (dd->flags & AES_FLAGS_DUMP_REG) {
 341		char tmp[16];
 342
 343		dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
 344			 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
 345	}
 346#endif /* VERBOSE_DEBUG */
 347
 348	return value;
 349}
 350
 351static inline void atmel_aes_write(struct atmel_aes_dev *dd,
 352					u32 offset, u32 value)
 353{
 354#ifdef VERBOSE_DEBUG
 355	if (dd->flags & AES_FLAGS_DUMP_REG) {
 356		char tmp[16];
 357
 358		dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
 359			 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
 360	}
 361#endif /* VERBOSE_DEBUG */
 362
 363	writel_relaxed(value, dd->io_base + offset);
 364}
 365
 366static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
 367					u32 *value, int count)
 368{
 369	for (; count--; value++, offset += 4)
 370		*value = atmel_aes_read(dd, offset);
 371}
 372
 373static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
 374			      const u32 *value, int count)
 375{
 376	for (; count--; value++, offset += 4)
 377		atmel_aes_write(dd, offset, *value);
 378}
 379
 380static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
 381					void *value)
 382{
 383	atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
 384}
 385
 386static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
 387					 const void *value)
 388{
 389	atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
 390}
 391
 392static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd,
 393						atmel_aes_fn_t resume)
 394{
 395	u32 isr = atmel_aes_read(dd, AES_ISR);
 396
 397	if (unlikely(isr & AES_INT_DATARDY))
 398		return resume(dd);
 399
 400	dd->resume = resume;
 401	atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
 402	return -EINPROGRESS;
 403}
 404
 405static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
 406{
 407	len &= block_size - 1;
 408	return len ? block_size - len : 0;
 409}
 410
 411static struct atmel_aes_dev *atmel_aes_dev_alloc(struct atmel_aes_base_ctx *ctx)
 412{
 413	struct atmel_aes_dev *aes_dd;
 
 414
 415	spin_lock_bh(&atmel_aes.lock);
 416	/* One AES IP per SoC. */
 417	aes_dd = list_first_entry_or_null(&atmel_aes.dev_list,
 418					  struct atmel_aes_dev, list);
 
 
 
 
 
 
 
 419	spin_unlock_bh(&atmel_aes.lock);
 
 420	return aes_dd;
 421}
 422
 423static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
 424{
 425	int err;
 426
 427	err = clk_enable(dd->iclk);
 428	if (err)
 429		return err;
 430
 431	atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
 432	atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
 
 
 
 433
 434	return 0;
 435}
 436
 437static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
 438{
 439	return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
 440}
 441
 442static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
 443{
 444	int err;
 445
 446	err = atmel_aes_hw_init(dd);
 447	if (err)
 448		return err;
 449
 450	dd->hw_version = atmel_aes_get_version(dd);
 451
 452	dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
 453
 454	clk_disable(dd->iclk);
 455	return 0;
 456}
 457
 458static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
 459				      const struct atmel_aes_reqctx *rctx)
 460{
 461	/* Clear all but persistent flags and set request flags. */
 462	dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
 463}
 464
 465static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
 466{
 467	return (dd->flags & AES_FLAGS_ENCRYPT);
 468}
 469
 470#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
 471static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
 472#endif
 473
 474static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
 475{
 476	struct skcipher_request *req = skcipher_request_cast(dd->areq);
 477	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
 478	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 479	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 480
 481	if (req->cryptlen < ivsize)
 482		return;
 483
 484	if (rctx->mode & AES_FLAGS_ENCRYPT)
 485		scatterwalk_map_and_copy(req->iv, req->dst,
 486					 req->cryptlen - ivsize, ivsize, 0);
 487	else
 488		memcpy(req->iv, rctx->lastc, ivsize);
 489}
 490
 491static inline struct atmel_aes_ctr_ctx *
 492atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
 493{
 494	return container_of(ctx, struct atmel_aes_ctr_ctx, base);
 495}
 496
 497static void atmel_aes_ctr_update_req_iv(struct atmel_aes_dev *dd)
 498{
 499	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
 500	struct skcipher_request *req = skcipher_request_cast(dd->areq);
 501	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 502	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 503	int i;
 504
 505	/*
 506	 * The CTR transfer works in fragments of data of maximum 1 MByte
 507	 * because of the 16 bit CTR counter embedded in the IP. When reaching
 508	 * here, ctx->blocks contains the number of blocks of the last fragment
 509	 * processed, there is no need to explicit cast it to u16.
 510	 */
 511	for (i = 0; i < ctx->blocks; i++)
 512		crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
 513
 514	memcpy(req->iv, ctx->iv, ivsize);
 515}
 516
 517static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
 518{
 519	struct skcipher_request *req = skcipher_request_cast(dd->areq);
 520	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
 521
 522#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
 523	if (dd->ctx->is_aead)
 524		atmel_aes_authenc_complete(dd, err);
 525#endif
 526
 527	clk_disable(dd->iclk);
 528	dd->flags &= ~AES_FLAGS_BUSY;
 529
 530	if (!err && !dd->ctx->is_aead &&
 531	    (rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB) {
 532		if ((rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_CTR)
 533			atmel_aes_set_iv_as_last_ciphertext_block(dd);
 534		else
 535			atmel_aes_ctr_update_req_iv(dd);
 536	}
 537
 538	if (dd->is_async)
 539		crypto_request_complete(dd->areq, err);
 540
 541	tasklet_schedule(&dd->queue_task);
 542
 543	return err;
 544}
 545
 546static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
 547				     const __be32 *iv, const u32 *key, int keylen)
 548{
 549	u32 valmr = 0;
 550
 551	/* MR register must be set before IV registers */
 552	if (keylen == AES_KEYSIZE_128)
 553		valmr |= AES_MR_KEYSIZE_128;
 554	else if (keylen == AES_KEYSIZE_192)
 555		valmr |= AES_MR_KEYSIZE_192;
 556	else
 557		valmr |= AES_MR_KEYSIZE_256;
 558
 559	valmr |= dd->flags & AES_FLAGS_MODE_MASK;
 560
 561	if (use_dma) {
 562		valmr |= AES_MR_SMOD_IDATAR0;
 563		if (dd->caps.has_dualbuff)
 564			valmr |= AES_MR_DUALBUFF;
 565	} else {
 566		valmr |= AES_MR_SMOD_AUTO;
 567	}
 568
 569	atmel_aes_write(dd, AES_MR, valmr);
 570
 571	atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
 
 572
 573	if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
 574		atmel_aes_write_block(dd, AES_IVR(0), iv);
 575}
 576
 577static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
 578					const __be32 *iv)
 579
 580{
 581	atmel_aes_write_ctrl_key(dd, use_dma, iv,
 582				 dd->ctx->key, dd->ctx->keylen);
 583}
 584
 585/* CPU transfer */
 586
 587static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd)
 588{
 589	int err = 0;
 590	u32 isr;
 591
 592	for (;;) {
 593		atmel_aes_read_block(dd, AES_ODATAR(0), dd->data);
 594		dd->data += 4;
 595		dd->datalen -= AES_BLOCK_SIZE;
 596
 597		if (dd->datalen < AES_BLOCK_SIZE)
 598			break;
 599
 600		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
 601
 602		isr = atmel_aes_read(dd, AES_ISR);
 603		if (!(isr & AES_INT_DATARDY)) {
 604			dd->resume = atmel_aes_cpu_transfer;
 605			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
 606			return -EINPROGRESS;
 607		}
 608	}
 609
 610	if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
 611				 dd->buf, dd->total))
 612		err = -EINVAL;
 613
 614	if (err)
 615		return atmel_aes_complete(dd, err);
 616
 617	return dd->cpu_transfer_complete(dd);
 618}
 619
 620static int atmel_aes_cpu_start(struct atmel_aes_dev *dd,
 621			       struct scatterlist *src,
 622			       struct scatterlist *dst,
 623			       size_t len,
 624			       atmel_aes_fn_t resume)
 625{
 626	size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE);
 627
 628	if (unlikely(len == 0))
 629		return -EINVAL;
 630
 631	sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
 632
 633	dd->total = len;
 634	dd->real_dst = dst;
 635	dd->cpu_transfer_complete = resume;
 636	dd->datalen = len + padlen;
 637	dd->data = (u32 *)dd->buf;
 638	atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
 639	return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer);
 640}
 641
 642
 643/* DMA transfer */
 644
 645static void atmel_aes_dma_callback(void *data);
 646
 647static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd,
 648				    struct scatterlist *sg,
 649				    size_t len,
 650				    struct atmel_aes_dma *dma)
 651{
 652	int nents;
 653
 654	if (!IS_ALIGNED(len, dd->ctx->block_size))
 655		return false;
 656
 657	for (nents = 0; sg; sg = sg_next(sg), ++nents) {
 658		if (!IS_ALIGNED(sg->offset, sizeof(u32)))
 659			return false;
 660
 661		if (len <= sg->length) {
 662			if (!IS_ALIGNED(len, dd->ctx->block_size))
 663				return false;
 664
 665			dma->nents = nents+1;
 666			dma->remainder = sg->length - len;
 667			sg->length = len;
 668			return true;
 669		}
 670
 671		if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
 672			return false;
 673
 674		len -= sg->length;
 675	}
 676
 677	return false;
 678}
 679
 680static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma)
 681{
 682	struct scatterlist *sg = dma->sg;
 683	int nents = dma->nents;
 684
 685	if (!dma->remainder)
 686		return;
 687
 688	while (--nents > 0 && sg)
 689		sg = sg_next(sg);
 690
 691	if (!sg)
 692		return;
 693
 694	sg->length += dma->remainder;
 695}
 696
 697static int atmel_aes_map(struct atmel_aes_dev *dd,
 698			 struct scatterlist *src,
 699			 struct scatterlist *dst,
 700			 size_t len)
 701{
 702	bool src_aligned, dst_aligned;
 703	size_t padlen;
 704
 705	dd->total = len;
 706	dd->src.sg = src;
 707	dd->dst.sg = dst;
 708	dd->real_dst = dst;
 709
 710	src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src);
 711	if (src == dst)
 712		dst_aligned = src_aligned;
 713	else
 714		dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst);
 715	if (!src_aligned || !dst_aligned) {
 716		padlen = atmel_aes_padlen(len, dd->ctx->block_size);
 717
 718		if (dd->buflen < len + padlen)
 719			return -ENOMEM;
 720
 721		if (!src_aligned) {
 722			sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
 723			dd->src.sg = &dd->aligned_sg;
 724			dd->src.nents = 1;
 725			dd->src.remainder = 0;
 726		}
 727
 728		if (!dst_aligned) {
 729			dd->dst.sg = &dd->aligned_sg;
 730			dd->dst.nents = 1;
 731			dd->dst.remainder = 0;
 732		}
 733
 734		sg_init_table(&dd->aligned_sg, 1);
 735		sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
 736	}
 737
 738	if (dd->src.sg == dd->dst.sg) {
 739		dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
 740					    DMA_BIDIRECTIONAL);
 741		dd->dst.sg_len = dd->src.sg_len;
 742		if (!dd->src.sg_len)
 743			return -EFAULT;
 744	} else {
 745		dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
 746					    DMA_TO_DEVICE);
 747		if (!dd->src.sg_len)
 748			return -EFAULT;
 749
 750		dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
 751					    DMA_FROM_DEVICE);
 752		if (!dd->dst.sg_len) {
 753			dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
 754				     DMA_TO_DEVICE);
 755			return -EFAULT;
 756		}
 757	}
 758
 759	return 0;
 760}
 761
 762static void atmel_aes_unmap(struct atmel_aes_dev *dd)
 763{
 764	if (dd->src.sg == dd->dst.sg) {
 765		dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
 766			     DMA_BIDIRECTIONAL);
 767
 768		if (dd->src.sg != &dd->aligned_sg)
 769			atmel_aes_restore_sg(&dd->src);
 770	} else {
 771		dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
 772			     DMA_FROM_DEVICE);
 773
 774		if (dd->dst.sg != &dd->aligned_sg)
 775			atmel_aes_restore_sg(&dd->dst);
 776
 777		dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
 778			     DMA_TO_DEVICE);
 779
 780		if (dd->src.sg != &dd->aligned_sg)
 781			atmel_aes_restore_sg(&dd->src);
 782	}
 783
 784	if (dd->dst.sg == &dd->aligned_sg)
 785		sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
 786				    dd->buf, dd->total);
 787}
 788
 789static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
 790					enum dma_slave_buswidth addr_width,
 791					enum dma_transfer_direction dir,
 792					u32 maxburst)
 793{
 794	struct dma_async_tx_descriptor *desc;
 795	struct dma_slave_config config;
 796	dma_async_tx_callback callback;
 797	struct atmel_aes_dma *dma;
 798	int err;
 799
 800	memset(&config, 0, sizeof(config));
 
 801	config.src_addr_width = addr_width;
 802	config.dst_addr_width = addr_width;
 803	config.src_maxburst = maxburst;
 804	config.dst_maxburst = maxburst;
 805
 806	switch (dir) {
 807	case DMA_MEM_TO_DEV:
 808		dma = &dd->src;
 809		callback = NULL;
 810		config.dst_addr = dd->phys_base + AES_IDATAR(0);
 811		break;
 812
 813	case DMA_DEV_TO_MEM:
 814		dma = &dd->dst;
 815		callback = atmel_aes_dma_callback;
 816		config.src_addr = dd->phys_base + AES_ODATAR(0);
 817		break;
 818
 819	default:
 820		return -EINVAL;
 821	}
 822
 823	err = dmaengine_slave_config(dma->chan, &config);
 824	if (err)
 825		return err;
 826
 827	desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir,
 828				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 829	if (!desc)
 830		return -ENOMEM;
 831
 832	desc->callback = callback;
 833	desc->callback_param = dd;
 834	dmaengine_submit(desc);
 835	dma_async_issue_pending(dma->chan);
 836
 837	return 0;
 838}
 839
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 840static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
 841			       struct scatterlist *src,
 842			       struct scatterlist *dst,
 843			       size_t len,
 844			       atmel_aes_fn_t resume)
 845{
 846	enum dma_slave_buswidth addr_width;
 847	u32 maxburst;
 848	int err;
 849
 850	switch (dd->ctx->block_size) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 851	case AES_BLOCK_SIZE:
 852		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 853		maxburst = dd->caps.max_burst_size;
 854		break;
 855
 856	default:
 857		err = -EINVAL;
 858		goto exit;
 859	}
 860
 861	err = atmel_aes_map(dd, src, dst, len);
 862	if (err)
 863		goto exit;
 864
 865	dd->resume = resume;
 866
 867	/* Set output DMA transfer first */
 868	err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
 869					   maxburst);
 870	if (err)
 871		goto unmap;
 872
 873	/* Then set input DMA transfer */
 874	err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
 875					   maxburst);
 876	if (err)
 877		goto output_transfer_stop;
 878
 879	return -EINPROGRESS;
 880
 881output_transfer_stop:
 882	dmaengine_terminate_sync(dd->dst.chan);
 883unmap:
 884	atmel_aes_unmap(dd);
 885exit:
 886	return atmel_aes_complete(dd, err);
 887}
 888
 
 
 
 
 
 
 
 889static void atmel_aes_dma_callback(void *data)
 890{
 891	struct atmel_aes_dev *dd = data;
 892
 893	atmel_aes_unmap(dd);
 894	dd->is_async = true;
 895	(void)dd->resume(dd);
 896}
 897
 898static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
 899				  struct crypto_async_request *new_areq)
 900{
 901	struct crypto_async_request *areq, *backlog;
 902	struct atmel_aes_base_ctx *ctx;
 903	unsigned long flags;
 904	bool start_async;
 905	int err, ret = 0;
 906
 907	spin_lock_irqsave(&dd->lock, flags);
 908	if (new_areq)
 909		ret = crypto_enqueue_request(&dd->queue, new_areq);
 910	if (dd->flags & AES_FLAGS_BUSY) {
 911		spin_unlock_irqrestore(&dd->lock, flags);
 912		return ret;
 913	}
 914	backlog = crypto_get_backlog(&dd->queue);
 915	areq = crypto_dequeue_request(&dd->queue);
 916	if (areq)
 917		dd->flags |= AES_FLAGS_BUSY;
 918	spin_unlock_irqrestore(&dd->lock, flags);
 919
 920	if (!areq)
 921		return ret;
 922
 923	if (backlog)
 924		crypto_request_complete(backlog, -EINPROGRESS);
 925
 926	ctx = crypto_tfm_ctx(areq->tfm);
 927
 928	dd->areq = areq;
 929	dd->ctx = ctx;
 930	start_async = (areq != new_areq);
 931	dd->is_async = start_async;
 932
 933	/* WARNING: ctx->start() MAY change dd->is_async. */
 934	err = ctx->start(dd);
 935	return (start_async) ? ret : err;
 936}
 937
 938
 939/* AES async block ciphers */
 940
 941static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
 942{
 943	return atmel_aes_complete(dd, 0);
 944}
 945
 946static int atmel_aes_start(struct atmel_aes_dev *dd)
 947{
 948	struct skcipher_request *req = skcipher_request_cast(dd->areq);
 949	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
 950	bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD ||
 951			dd->ctx->block_size != AES_BLOCK_SIZE);
 952	int err;
 953
 954	atmel_aes_set_mode(dd, rctx);
 955
 956	err = atmel_aes_hw_init(dd);
 957	if (err)
 958		return atmel_aes_complete(dd, err);
 959
 960	atmel_aes_write_ctrl(dd, use_dma, (void *)req->iv);
 961	if (use_dma)
 962		return atmel_aes_dma_start(dd, req->src, req->dst,
 963					   req->cryptlen,
 964					   atmel_aes_transfer_complete);
 965
 966	return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
 967				   atmel_aes_transfer_complete);
 968}
 969
 
 
 
 
 
 
 970static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
 971{
 972	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
 973	struct skcipher_request *req = skcipher_request_cast(dd->areq);
 974	struct scatterlist *src, *dst;
 
 975	size_t datalen;
 976	u32 ctr;
 977	u16 start, end;
 978	bool use_dma, fragmented = false;
 979
 980	/* Check for transfer completion. */
 981	ctx->offset += dd->total;
 982	if (ctx->offset >= req->cryptlen)
 983		return atmel_aes_transfer_complete(dd);
 984
 985	/* Compute data length. */
 986	datalen = req->cryptlen - ctx->offset;
 987	ctx->blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
 988	ctr = be32_to_cpu(ctx->iv[3]);
 989
 990	/* Check 16bit counter overflow. */
 991	start = ctr & 0xffff;
 992	end = start + ctx->blocks - 1;
 993
 994	if (ctx->blocks >> 16 || end < start) {
 995		ctr |= 0xffff;
 996		datalen = AES_BLOCK_SIZE * (0x10000 - start);
 997		fragmented = true;
 
 
 
 
 
 
 
 
 
 
 
 998	}
 999
1000	use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
1001
1002	/* Jump to offset. */
1003	src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset);
1004	dst = ((req->src == req->dst) ? src :
1005	       scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset));
1006
1007	/* Configure hardware. */
1008	atmel_aes_write_ctrl(dd, use_dma, ctx->iv);
1009	if (unlikely(fragmented)) {
1010		/*
1011		 * Increment the counter manually to cope with the hardware
1012		 * counter overflow.
1013		 */
1014		ctx->iv[3] = cpu_to_be32(ctr);
1015		crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
1016	}
1017
1018	if (use_dma)
1019		return atmel_aes_dma_start(dd, src, dst, datalen,
1020					   atmel_aes_ctr_transfer);
1021
1022	return atmel_aes_cpu_start(dd, src, dst, datalen,
1023				   atmel_aes_ctr_transfer);
1024}
1025
1026static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
1027{
1028	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
1029	struct skcipher_request *req = skcipher_request_cast(dd->areq);
1030	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
1031	int err;
1032
1033	atmel_aes_set_mode(dd, rctx);
1034
1035	err = atmel_aes_hw_init(dd);
1036	if (err)
1037		return atmel_aes_complete(dd, err);
1038
1039	memcpy(ctx->iv, req->iv, AES_BLOCK_SIZE);
1040	ctx->offset = 0;
1041	dd->total = 0;
1042	return atmel_aes_ctr_transfer(dd);
1043}
1044
1045static int atmel_aes_xts_fallback(struct skcipher_request *req, bool enc)
1046{
1047	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
1048	struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(
1049			crypto_skcipher_reqtfm(req));
1050
1051	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
1052	skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
1053				      req->base.complete, req->base.data);
1054	skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
1055				   req->cryptlen, req->iv);
1056
1057	return enc ? crypto_skcipher_encrypt(&rctx->fallback_req) :
1058		     crypto_skcipher_decrypt(&rctx->fallback_req);
1059}
1060
1061static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
1062{
1063	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1064	struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
1065	struct atmel_aes_reqctx *rctx;
1066	u32 opmode = mode & AES_FLAGS_OPMODE_MASK;
1067
1068	if (opmode == AES_FLAGS_XTS) {
1069		if (req->cryptlen < XTS_BLOCK_SIZE)
1070			return -EINVAL;
1071
1072		if (!IS_ALIGNED(req->cryptlen, XTS_BLOCK_SIZE))
1073			return atmel_aes_xts_fallback(req,
1074						      mode & AES_FLAGS_ENCRYPT);
1075	}
1076
1077	/*
1078	 * ECB, CBC or CTR mode require the plaintext and ciphertext
1079	 * to have a positve integer length.
1080	 */
1081	if (!req->cryptlen && opmode != AES_FLAGS_XTS)
1082		return 0;
1083
1084	if ((opmode == AES_FLAGS_ECB || opmode == AES_FLAGS_CBC) &&
1085	    !IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(skcipher)))
1086		return -EINVAL;
1087
1088	ctx->block_size = AES_BLOCK_SIZE;
1089	ctx->is_aead = false;
 
1090
1091	rctx = skcipher_request_ctx(req);
1092	rctx->mode = mode;
 
 
1093
1094	if (opmode != AES_FLAGS_ECB &&
1095	    !(mode & AES_FLAGS_ENCRYPT)) {
1096		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1097
1098		if (req->cryptlen >= ivsize)
1099			scatterwalk_map_and_copy(rctx->lastc, req->src,
1100						 req->cryptlen - ivsize,
1101						 ivsize, 0);
1102	}
1103
1104	return atmel_aes_handle_queue(ctx->dd, &req->base);
1105}
1106
1107static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
1108			   unsigned int keylen)
1109{
1110	struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
1111
1112	if (keylen != AES_KEYSIZE_128 &&
1113	    keylen != AES_KEYSIZE_192 &&
1114	    keylen != AES_KEYSIZE_256)
 
1115		return -EINVAL;
 
1116
1117	memcpy(ctx->key, key, keylen);
1118	ctx->keylen = keylen;
1119
1120	return 0;
1121}
1122
1123static int atmel_aes_ecb_encrypt(struct skcipher_request *req)
1124{
1125	return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1126}
1127
1128static int atmel_aes_ecb_decrypt(struct skcipher_request *req)
1129{
1130	return atmel_aes_crypt(req, AES_FLAGS_ECB);
1131}
1132
1133static int atmel_aes_cbc_encrypt(struct skcipher_request *req)
1134{
1135	return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
1136}
1137
1138static int atmel_aes_cbc_decrypt(struct skcipher_request *req)
1139{
1140	return atmel_aes_crypt(req, AES_FLAGS_CBC);
1141}
1142
1143static int atmel_aes_ctr_encrypt(struct skcipher_request *req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1144{
1145	return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
1146}
1147
1148static int atmel_aes_ctr_decrypt(struct skcipher_request *req)
1149{
1150	return atmel_aes_crypt(req, AES_FLAGS_CTR);
1151}
1152
1153static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
1154{
1155	struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
1156	struct atmel_aes_dev *dd;
1157
1158	dd = atmel_aes_dev_alloc(&ctx->base);
1159	if (!dd)
1160		return -ENODEV;
1161
1162	crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1163	ctx->base.dd = dd;
1164	ctx->base.start = atmel_aes_start;
1165
1166	return 0;
1167}
1168
1169static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
1170{
1171	struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
1172	struct atmel_aes_dev *dd;
1173
1174	dd = atmel_aes_dev_alloc(&ctx->base);
1175	if (!dd)
1176		return -ENODEV;
1177
1178	crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1179	ctx->base.dd = dd;
1180	ctx->base.start = atmel_aes_ctr_start;
1181
1182	return 0;
1183}
1184
1185static struct skcipher_alg aes_algs[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1186{
1187	.base.cra_name		= "ecb(aes)",
1188	.base.cra_driver_name	= "atmel-ecb-aes",
1189	.base.cra_blocksize	= AES_BLOCK_SIZE,
1190	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
1191
1192	.init			= atmel_aes_init_tfm,
1193	.min_keysize		= AES_MIN_KEY_SIZE,
1194	.max_keysize		= AES_MAX_KEY_SIZE,
1195	.setkey			= atmel_aes_setkey,
1196	.encrypt		= atmel_aes_ecb_encrypt,
1197	.decrypt		= atmel_aes_ecb_decrypt,
 
 
 
 
 
 
 
 
1198},
1199{
1200	.base.cra_name		= "cbc(aes)",
1201	.base.cra_driver_name	= "atmel-cbc-aes",
1202	.base.cra_blocksize	= AES_BLOCK_SIZE,
1203	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
1204
1205	.init			= atmel_aes_init_tfm,
1206	.min_keysize		= AES_MIN_KEY_SIZE,
1207	.max_keysize		= AES_MAX_KEY_SIZE,
1208	.setkey			= atmel_aes_setkey,
1209	.encrypt		= atmel_aes_cbc_encrypt,
1210	.decrypt		= atmel_aes_cbc_decrypt,
1211	.ivsize			= AES_BLOCK_SIZE,
 
 
 
 
 
 
 
1212},
1213{
1214	.base.cra_name		= "ctr(aes)",
1215	.base.cra_driver_name	= "atmel-ctr-aes",
1216	.base.cra_blocksize	= 1,
1217	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctr_ctx),
1218
1219	.init			= atmel_aes_ctr_init_tfm,
1220	.min_keysize		= AES_MIN_KEY_SIZE,
1221	.max_keysize		= AES_MAX_KEY_SIZE,
1222	.setkey			= atmel_aes_setkey,
1223	.encrypt		= atmel_aes_ctr_encrypt,
1224	.decrypt		= atmel_aes_ctr_decrypt,
1225	.ivsize			= AES_BLOCK_SIZE,
 
 
 
 
 
 
 
1226},
1227};
1228
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1229
1230/* gcm aead functions */
1231
1232static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1233			       const u32 *data, size_t datalen,
1234			       const __be32 *ghash_in, __be32 *ghash_out,
1235			       atmel_aes_fn_t resume);
1236static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
1237static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
1238
1239static int atmel_aes_gcm_start(struct atmel_aes_dev *dd);
1240static int atmel_aes_gcm_process(struct atmel_aes_dev *dd);
1241static int atmel_aes_gcm_length(struct atmel_aes_dev *dd);
1242static int atmel_aes_gcm_data(struct atmel_aes_dev *dd);
1243static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd);
1244static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd);
1245static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd);
1246
1247static inline struct atmel_aes_gcm_ctx *
1248atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
1249{
1250	return container_of(ctx, struct atmel_aes_gcm_ctx, base);
1251}
1252
1253static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1254			       const u32 *data, size_t datalen,
1255			       const __be32 *ghash_in, __be32 *ghash_out,
1256			       atmel_aes_fn_t resume)
1257{
1258	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1259
1260	dd->data = (u32 *)data;
1261	dd->datalen = datalen;
1262	ctx->ghash_in = ghash_in;
1263	ctx->ghash_out = ghash_out;
1264	ctx->ghash_resume = resume;
1265
1266	atmel_aes_write_ctrl(dd, false, NULL);
1267	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init);
1268}
1269
1270static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd)
1271{
1272	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1273
1274	/* Set the data length. */
1275	atmel_aes_write(dd, AES_AADLENR, dd->total);
1276	atmel_aes_write(dd, AES_CLENR, 0);
1277
1278	/* If needed, overwrite the GCM Intermediate Hash Word Registers */
1279	if (ctx->ghash_in)
1280		atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in);
1281
1282	return atmel_aes_gcm_ghash_finalize(dd);
1283}
1284
1285static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd)
1286{
1287	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1288	u32 isr;
1289
1290	/* Write data into the Input Data Registers. */
1291	while (dd->datalen > 0) {
1292		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1293		dd->data += 4;
1294		dd->datalen -= AES_BLOCK_SIZE;
1295
1296		isr = atmel_aes_read(dd, AES_ISR);
1297		if (!(isr & AES_INT_DATARDY)) {
1298			dd->resume = atmel_aes_gcm_ghash_finalize;
1299			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1300			return -EINPROGRESS;
1301		}
1302	}
1303
1304	/* Read the computed hash from GHASHRx. */
1305	atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out);
1306
1307	return ctx->ghash_resume(dd);
1308}
1309
1310
1311static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
1312{
1313	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1314	struct aead_request *req = aead_request_cast(dd->areq);
1315	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1316	struct atmel_aes_reqctx *rctx = aead_request_ctx(req);
1317	size_t ivsize = crypto_aead_ivsize(tfm);
1318	size_t datalen, padlen;
1319	const void *iv = req->iv;
1320	u8 *data = dd->buf;
1321	int err;
1322
1323	atmel_aes_set_mode(dd, rctx);
1324
1325	err = atmel_aes_hw_init(dd);
1326	if (err)
1327		return atmel_aes_complete(dd, err);
1328
1329	if (likely(ivsize == GCM_AES_IV_SIZE)) {
1330		memcpy(ctx->j0, iv, ivsize);
1331		ctx->j0[3] = cpu_to_be32(1);
1332		return atmel_aes_gcm_process(dd);
1333	}
1334
1335	padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE);
1336	datalen = ivsize + padlen + AES_BLOCK_SIZE;
1337	if (datalen > dd->buflen)
1338		return atmel_aes_complete(dd, -EINVAL);
1339
1340	memcpy(data, iv, ivsize);
1341	memset(data + ivsize, 0, padlen + sizeof(u64));
1342	((__be64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
1343
1344	return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
1345				   NULL, ctx->j0, atmel_aes_gcm_process);
1346}
1347
1348static int atmel_aes_gcm_process(struct atmel_aes_dev *dd)
1349{
1350	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1351	struct aead_request *req = aead_request_cast(dd->areq);
1352	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1353	bool enc = atmel_aes_is_encrypt(dd);
1354	u32 authsize;
1355
1356	/* Compute text length. */
1357	authsize = crypto_aead_authsize(tfm);
1358	ctx->textlen = req->cryptlen - (enc ? 0 : authsize);
1359
1360	/*
1361	 * According to tcrypt test suite, the GCM Automatic Tag Generation
1362	 * fails when both the message and its associated data are empty.
1363	 */
1364	if (likely(req->assoclen != 0 || ctx->textlen != 0))
1365		dd->flags |= AES_FLAGS_GTAGEN;
1366
1367	atmel_aes_write_ctrl(dd, false, NULL);
1368	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length);
1369}
1370
1371static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
1372{
1373	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1374	struct aead_request *req = aead_request_cast(dd->areq);
1375	__be32 j0_lsw, *j0 = ctx->j0;
1376	size_t padlen;
1377
1378	/* Write incr32(J0) into IV. */
1379	j0_lsw = j0[3];
1380	be32_add_cpu(&j0[3], 1);
1381	atmel_aes_write_block(dd, AES_IVR(0), j0);
1382	j0[3] = j0_lsw;
1383
1384	/* Set aad and text lengths. */
1385	atmel_aes_write(dd, AES_AADLENR, req->assoclen);
1386	atmel_aes_write(dd, AES_CLENR, ctx->textlen);
1387
1388	/* Check whether AAD are present. */
1389	if (unlikely(req->assoclen == 0)) {
1390		dd->datalen = 0;
1391		return atmel_aes_gcm_data(dd);
1392	}
1393
1394	/* Copy assoc data and add padding. */
1395	padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE);
1396	if (unlikely(req->assoclen + padlen > dd->buflen))
1397		return atmel_aes_complete(dd, -EINVAL);
1398	sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
1399
1400	/* Write assoc data into the Input Data register. */
1401	dd->data = (u32 *)dd->buf;
1402	dd->datalen = req->assoclen + padlen;
1403	return atmel_aes_gcm_data(dd);
1404}
1405
1406static int atmel_aes_gcm_data(struct atmel_aes_dev *dd)
1407{
1408	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1409	struct aead_request *req = aead_request_cast(dd->areq);
1410	bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD);
1411	struct scatterlist *src, *dst;
1412	u32 isr, mr;
1413
1414	/* Write AAD first. */
1415	while (dd->datalen > 0) {
1416		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1417		dd->data += 4;
1418		dd->datalen -= AES_BLOCK_SIZE;
1419
1420		isr = atmel_aes_read(dd, AES_ISR);
1421		if (!(isr & AES_INT_DATARDY)) {
1422			dd->resume = atmel_aes_gcm_data;
1423			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1424			return -EINPROGRESS;
1425		}
1426	}
1427
1428	/* GMAC only. */
1429	if (unlikely(ctx->textlen == 0))
1430		return atmel_aes_gcm_tag_init(dd);
1431
1432	/* Prepare src and dst scatter lists to transfer cipher/plain texts */
1433	src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen);
1434	dst = ((req->src == req->dst) ? src :
1435	       scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen));
1436
1437	if (use_dma) {
1438		/* Update the Mode Register for DMA transfers. */
1439		mr = atmel_aes_read(dd, AES_MR);
1440		mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF);
1441		mr |= AES_MR_SMOD_IDATAR0;
1442		if (dd->caps.has_dualbuff)
1443			mr |= AES_MR_DUALBUFF;
1444		atmel_aes_write(dd, AES_MR, mr);
1445
1446		return atmel_aes_dma_start(dd, src, dst, ctx->textlen,
1447					   atmel_aes_gcm_tag_init);
1448	}
1449
1450	return atmel_aes_cpu_start(dd, src, dst, ctx->textlen,
1451				   atmel_aes_gcm_tag_init);
1452}
1453
1454static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
1455{
1456	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1457	struct aead_request *req = aead_request_cast(dd->areq);
1458	__be64 *data = dd->buf;
1459
1460	if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
1461		if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
1462			dd->resume = atmel_aes_gcm_tag_init;
1463			atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY);
1464			return -EINPROGRESS;
1465		}
1466
1467		return atmel_aes_gcm_finalize(dd);
1468	}
1469
1470	/* Read the GCM Intermediate Hash Word Registers. */
1471	atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash);
1472
1473	data[0] = cpu_to_be64(req->assoclen * 8);
1474	data[1] = cpu_to_be64(ctx->textlen * 8);
1475
1476	return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE,
1477				   ctx->ghash, ctx->ghash, atmel_aes_gcm_tag);
1478}
1479
1480static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd)
1481{
1482	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1483	unsigned long flags;
1484
1485	/*
1486	 * Change mode to CTR to complete the tag generation.
1487	 * Use J0 as Initialization Vector.
1488	 */
1489	flags = dd->flags;
1490	dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN);
1491	dd->flags |= AES_FLAGS_CTR;
1492	atmel_aes_write_ctrl(dd, false, ctx->j0);
1493	dd->flags = flags;
1494
1495	atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash);
1496	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize);
1497}
1498
1499static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd)
1500{
1501	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1502	struct aead_request *req = aead_request_cast(dd->areq);
1503	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1504	bool enc = atmel_aes_is_encrypt(dd);
1505	u32 offset, authsize, itag[4], *otag = ctx->tag;
1506	int err;
1507
1508	/* Read the computed tag. */
1509	if (likely(dd->flags & AES_FLAGS_GTAGEN))
1510		atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag);
1511	else
1512		atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag);
1513
1514	offset = req->assoclen + ctx->textlen;
1515	authsize = crypto_aead_authsize(tfm);
1516	if (enc) {
1517		scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1);
1518		err = 0;
1519	} else {
1520		scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0);
1521		err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0;
1522	}
1523
1524	return atmel_aes_complete(dd, err);
1525}
1526
1527static int atmel_aes_gcm_crypt(struct aead_request *req,
1528			       unsigned long mode)
1529{
1530	struct atmel_aes_base_ctx *ctx;
1531	struct atmel_aes_reqctx *rctx;
 
1532
1533	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
1534	ctx->block_size = AES_BLOCK_SIZE;
1535	ctx->is_aead = true;
 
 
 
1536
1537	rctx = aead_request_ctx(req);
1538	rctx->mode = AES_FLAGS_GCM | mode;
1539
1540	return atmel_aes_handle_queue(ctx->dd, &req->base);
1541}
1542
1543static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
1544				unsigned int keylen)
1545{
1546	struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
1547
1548	if (keylen != AES_KEYSIZE_256 &&
1549	    keylen != AES_KEYSIZE_192 &&
1550	    keylen != AES_KEYSIZE_128)
 
1551		return -EINVAL;
 
1552
1553	memcpy(ctx->key, key, keylen);
1554	ctx->keylen = keylen;
1555
1556	return 0;
1557}
1558
1559static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
1560				     unsigned int authsize)
1561{
1562	return crypto_gcm_check_authsize(authsize);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1563}
1564
1565static int atmel_aes_gcm_encrypt(struct aead_request *req)
1566{
1567	return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1568}
1569
1570static int atmel_aes_gcm_decrypt(struct aead_request *req)
1571{
1572	return atmel_aes_gcm_crypt(req, 0);
1573}
1574
1575static int atmel_aes_gcm_init(struct crypto_aead *tfm)
1576{
1577	struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
1578	struct atmel_aes_dev *dd;
1579
1580	dd = atmel_aes_dev_alloc(&ctx->base);
1581	if (!dd)
1582		return -ENODEV;
1583
1584	crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1585	ctx->base.dd = dd;
1586	ctx->base.start = atmel_aes_gcm_start;
1587
1588	return 0;
1589}
1590
 
 
 
 
 
1591static struct aead_alg aes_gcm_alg = {
1592	.setkey		= atmel_aes_gcm_setkey,
1593	.setauthsize	= atmel_aes_gcm_setauthsize,
1594	.encrypt	= atmel_aes_gcm_encrypt,
1595	.decrypt	= atmel_aes_gcm_decrypt,
1596	.init		= atmel_aes_gcm_init,
1597	.ivsize		= GCM_AES_IV_SIZE,
 
1598	.maxauthsize	= AES_BLOCK_SIZE,
1599
1600	.base = {
1601		.cra_name		= "gcm(aes)",
1602		.cra_driver_name	= "atmel-gcm-aes",
 
 
1603		.cra_blocksize		= 1,
1604		.cra_ctxsize		= sizeof(struct atmel_aes_gcm_ctx),
 
 
1605	},
1606};
1607
1608
1609/* xts functions */
1610
1611static inline struct atmel_aes_xts_ctx *
1612atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx)
1613{
1614	return container_of(ctx, struct atmel_aes_xts_ctx, base);
1615}
1616
1617static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
1618
1619static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
1620{
1621	struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
1622	struct skcipher_request *req = skcipher_request_cast(dd->areq);
1623	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
1624	unsigned long flags;
1625	int err;
1626
1627	atmel_aes_set_mode(dd, rctx);
1628
1629	err = atmel_aes_hw_init(dd);
1630	if (err)
1631		return atmel_aes_complete(dd, err);
1632
1633	/* Compute the tweak value from req->iv with ecb(aes). */
1634	flags = dd->flags;
1635	dd->flags &= ~AES_FLAGS_MODE_MASK;
1636	dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1637	atmel_aes_write_ctrl_key(dd, false, NULL,
1638				 ctx->key2, ctx->base.keylen);
1639	dd->flags = flags;
1640
1641	atmel_aes_write_block(dd, AES_IDATAR(0), req->iv);
1642	return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
1643}
1644
1645static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
1646{
1647	struct skcipher_request *req = skcipher_request_cast(dd->areq);
1648	bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD);
1649	u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
1650	static const __le32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
1651	u8 *tweak_bytes = (u8 *)tweak;
1652	int i;
1653
1654	/* Read the computed ciphered tweak value. */
1655	atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
1656	/*
1657	 * Hardware quirk:
1658	 * the order of the ciphered tweak bytes need to be reversed before
1659	 * writing them into the ODATARx registers.
1660	 */
1661	for (i = 0; i < AES_BLOCK_SIZE/2; ++i)
1662		swap(tweak_bytes[i], tweak_bytes[AES_BLOCK_SIZE - 1 - i]);
1663
1664	/* Process the data. */
1665	atmel_aes_write_ctrl(dd, use_dma, NULL);
1666	atmel_aes_write_block(dd, AES_TWR(0), tweak);
1667	atmel_aes_write_block(dd, AES_ALPHAR(0), one);
1668	if (use_dma)
1669		return atmel_aes_dma_start(dd, req->src, req->dst,
1670					   req->cryptlen,
1671					   atmel_aes_transfer_complete);
1672
1673	return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
1674				   atmel_aes_transfer_complete);
1675}
1676
1677static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
1678				unsigned int keylen)
1679{
1680	struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
1681	int err;
1682
1683	err = xts_verify_key(tfm, key, keylen);
1684	if (err)
1685		return err;
1686
1687	crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
1688	crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags &
1689				  CRYPTO_TFM_REQ_MASK);
1690	err = crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
1691	if (err)
1692		return err;
1693
1694	memcpy(ctx->base.key, key, keylen/2);
1695	memcpy(ctx->key2, key + keylen/2, keylen/2);
1696	ctx->base.keylen = keylen/2;
1697
1698	return 0;
1699}
1700
1701static int atmel_aes_xts_encrypt(struct skcipher_request *req)
1702{
1703	return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
1704}
1705
1706static int atmel_aes_xts_decrypt(struct skcipher_request *req)
1707{
1708	return atmel_aes_crypt(req, AES_FLAGS_XTS);
1709}
1710
1711static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
1712{
1713	struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
1714	struct atmel_aes_dev *dd;
1715	const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1716
1717	dd = atmel_aes_dev_alloc(&ctx->base);
1718	if (!dd)
1719		return -ENODEV;
1720
1721	ctx->fallback_tfm = crypto_alloc_skcipher(tfm_name, 0,
1722						  CRYPTO_ALG_NEED_FALLBACK);
1723	if (IS_ERR(ctx->fallback_tfm))
1724		return PTR_ERR(ctx->fallback_tfm);
1725
1726	crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx) +
1727				    crypto_skcipher_reqsize(ctx->fallback_tfm));
1728	ctx->base.dd = dd;
1729	ctx->base.start = atmel_aes_xts_start;
1730
1731	return 0;
1732}
1733
1734static void atmel_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
1735{
1736	struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
1737
1738	crypto_free_skcipher(ctx->fallback_tfm);
1739}
1740
1741static struct skcipher_alg aes_xts_alg = {
1742	.base.cra_name		= "xts(aes)",
1743	.base.cra_driver_name	= "atmel-xts-aes",
1744	.base.cra_blocksize	= AES_BLOCK_SIZE,
1745	.base.cra_ctxsize	= sizeof(struct atmel_aes_xts_ctx),
1746	.base.cra_flags		= CRYPTO_ALG_NEED_FALLBACK,
1747
1748	.min_keysize		= 2 * AES_MIN_KEY_SIZE,
1749	.max_keysize		= 2 * AES_MAX_KEY_SIZE,
1750	.ivsize			= AES_BLOCK_SIZE,
1751	.setkey			= atmel_aes_xts_setkey,
1752	.encrypt		= atmel_aes_xts_encrypt,
1753	.decrypt		= atmel_aes_xts_decrypt,
1754	.init			= atmel_aes_xts_init_tfm,
1755	.exit			= atmel_aes_xts_exit_tfm,
1756};
1757
1758#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
1759/* authenc aead functions */
1760
1761static int atmel_aes_authenc_start(struct atmel_aes_dev *dd);
1762static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
1763				  bool is_async);
1764static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
1765				      bool is_async);
1766static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd);
1767static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
1768				   bool is_async);
1769
1770static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err)
1771{
1772	struct aead_request *req = aead_request_cast(dd->areq);
1773	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1774
1775	if (err && (dd->flags & AES_FLAGS_OWN_SHA))
1776		atmel_sha_authenc_abort(&rctx->auth_req);
1777	dd->flags &= ~AES_FLAGS_OWN_SHA;
1778}
1779
1780static int atmel_aes_authenc_start(struct atmel_aes_dev *dd)
1781{
1782	struct aead_request *req = aead_request_cast(dd->areq);
1783	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1784	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1785	struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
1786	int err;
1787
1788	atmel_aes_set_mode(dd, &rctx->base);
1789
1790	err = atmel_aes_hw_init(dd);
1791	if (err)
1792		return atmel_aes_complete(dd, err);
1793
1794	return atmel_sha_authenc_schedule(&rctx->auth_req, ctx->auth,
1795					  atmel_aes_authenc_init, dd);
1796}
1797
1798static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
1799				  bool is_async)
1800{
1801	struct aead_request *req = aead_request_cast(dd->areq);
1802	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1803
1804	if (is_async)
1805		dd->is_async = true;
1806	if (err)
1807		return atmel_aes_complete(dd, err);
1808
1809	/* If here, we've got the ownership of the SHA device. */
1810	dd->flags |= AES_FLAGS_OWN_SHA;
1811
1812	/* Configure the SHA device. */
1813	return atmel_sha_authenc_init(&rctx->auth_req,
1814				      req->src, req->assoclen,
1815				      rctx->textlen,
1816				      atmel_aes_authenc_transfer, dd);
1817}
1818
1819static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
1820				      bool is_async)
1821{
1822	struct aead_request *req = aead_request_cast(dd->areq);
1823	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1824	bool enc = atmel_aes_is_encrypt(dd);
1825	struct scatterlist *src, *dst;
1826	__be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
1827	u32 emr;
1828
1829	if (is_async)
1830		dd->is_async = true;
1831	if (err)
1832		return atmel_aes_complete(dd, err);
1833
1834	/* Prepare src and dst scatter-lists to transfer cipher/plain texts. */
1835	src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
1836	dst = src;
1837
1838	if (req->src != req->dst)
1839		dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
1840
1841	/* Configure the AES device. */
1842	memcpy(iv, req->iv, sizeof(iv));
1843
1844	/*
1845	 * Here we always set the 2nd parameter of atmel_aes_write_ctrl() to
1846	 * 'true' even if the data transfer is actually performed by the CPU (so
1847	 * not by the DMA) because we must force the AES_MR_SMOD bitfield to the
1848	 * value AES_MR_SMOD_IDATAR0. Indeed, both AES_MR_SMOD and SHA_MR_SMOD
1849	 * must be set to *_MR_SMOD_IDATAR0.
1850	 */
1851	atmel_aes_write_ctrl(dd, true, iv);
1852	emr = AES_EMR_PLIPEN;
1853	if (!enc)
1854		emr |= AES_EMR_PLIPD;
1855	atmel_aes_write(dd, AES_EMR, emr);
1856
1857	/* Transfer data. */
1858	return atmel_aes_dma_start(dd, src, dst, rctx->textlen,
1859				   atmel_aes_authenc_digest);
1860}
1861
1862static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd)
1863{
1864	struct aead_request *req = aead_request_cast(dd->areq);
1865	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1866
1867	/* atmel_sha_authenc_final() releases the SHA device. */
1868	dd->flags &= ~AES_FLAGS_OWN_SHA;
1869	return atmel_sha_authenc_final(&rctx->auth_req,
1870				       rctx->digest, sizeof(rctx->digest),
1871				       atmel_aes_authenc_final, dd);
1872}
1873
1874static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
1875				   bool is_async)
1876{
1877	struct aead_request *req = aead_request_cast(dd->areq);
1878	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1879	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1880	bool enc = atmel_aes_is_encrypt(dd);
1881	u32 idigest[SHA512_DIGEST_SIZE / sizeof(u32)], *odigest = rctx->digest;
1882	u32 offs, authsize;
1883
1884	if (is_async)
1885		dd->is_async = true;
1886	if (err)
1887		goto complete;
1888
1889	offs = req->assoclen + rctx->textlen;
1890	authsize = crypto_aead_authsize(tfm);
1891	if (enc) {
1892		scatterwalk_map_and_copy(odigest, req->dst, offs, authsize, 1);
1893	} else {
1894		scatterwalk_map_and_copy(idigest, req->src, offs, authsize, 0);
1895		if (crypto_memneq(idigest, odigest, authsize))
1896			err = -EBADMSG;
1897	}
1898
1899complete:
1900	return atmel_aes_complete(dd, err);
1901}
1902
1903static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
1904				    unsigned int keylen)
1905{
1906	struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
1907	struct crypto_authenc_keys keys;
1908	int err;
1909
1910	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1911		goto badkey;
1912
1913	if (keys.enckeylen > sizeof(ctx->base.key))
1914		goto badkey;
1915
1916	/* Save auth key. */
1917	err = atmel_sha_authenc_setkey(ctx->auth,
1918				       keys.authkey, keys.authkeylen,
1919				       crypto_aead_get_flags(tfm));
1920	if (err) {
1921		memzero_explicit(&keys, sizeof(keys));
1922		return err;
1923	}
1924
1925	/* Save enc key. */
1926	ctx->base.keylen = keys.enckeylen;
1927	memcpy(ctx->base.key, keys.enckey, keys.enckeylen);
1928
1929	memzero_explicit(&keys, sizeof(keys));
1930	return 0;
1931
1932badkey:
1933	memzero_explicit(&keys, sizeof(keys));
1934	return -EINVAL;
1935}
1936
1937static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
1938				      unsigned long auth_mode)
1939{
1940	struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
1941	unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize();
1942	struct atmel_aes_dev *dd;
1943
1944	dd = atmel_aes_dev_alloc(&ctx->base);
1945	if (!dd)
1946		return -ENODEV;
1947
1948	ctx->auth = atmel_sha_authenc_spawn(auth_mode);
1949	if (IS_ERR(ctx->auth))
1950		return PTR_ERR(ctx->auth);
1951
1952	crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) +
1953				      auth_reqsize));
1954	ctx->base.dd = dd;
1955	ctx->base.start = atmel_aes_authenc_start;
1956
1957	return 0;
1958}
1959
1960static int atmel_aes_authenc_hmac_sha1_init_tfm(struct crypto_aead *tfm)
1961{
1962	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA1);
1963}
1964
1965static int atmel_aes_authenc_hmac_sha224_init_tfm(struct crypto_aead *tfm)
1966{
1967	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA224);
1968}
1969
1970static int atmel_aes_authenc_hmac_sha256_init_tfm(struct crypto_aead *tfm)
1971{
1972	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA256);
1973}
1974
1975static int atmel_aes_authenc_hmac_sha384_init_tfm(struct crypto_aead *tfm)
1976{
1977	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA384);
1978}
1979
1980static int atmel_aes_authenc_hmac_sha512_init_tfm(struct crypto_aead *tfm)
1981{
1982	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA512);
1983}
1984
1985static void atmel_aes_authenc_exit_tfm(struct crypto_aead *tfm)
1986{
1987	struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
1988
1989	atmel_sha_authenc_free(ctx->auth);
1990}
1991
1992static int atmel_aes_authenc_crypt(struct aead_request *req,
1993				   unsigned long mode)
1994{
1995	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1996	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1997	struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
1998	u32 authsize = crypto_aead_authsize(tfm);
1999	bool enc = (mode & AES_FLAGS_ENCRYPT);
2000
2001	/* Compute text length. */
2002	if (!enc && req->cryptlen < authsize)
2003		return -EINVAL;
2004	rctx->textlen = req->cryptlen - (enc ? 0 : authsize);
2005
2006	/*
2007	 * Currently, empty messages are not supported yet:
2008	 * the SHA auto-padding can be used only on non-empty messages.
2009	 * Hence a special case needs to be implemented for empty message.
2010	 */
2011	if (!rctx->textlen && !req->assoclen)
2012		return -EINVAL;
2013
2014	rctx->base.mode = mode;
2015	ctx->block_size = AES_BLOCK_SIZE;
2016	ctx->is_aead = true;
2017
2018	return atmel_aes_handle_queue(ctx->dd, &req->base);
2019}
2020
2021static int atmel_aes_authenc_cbc_aes_encrypt(struct aead_request *req)
2022{
2023	return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
2024}
2025
2026static int atmel_aes_authenc_cbc_aes_decrypt(struct aead_request *req)
2027{
2028	return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC);
2029}
2030
2031static struct aead_alg aes_authenc_algs[] = {
2032{
2033	.setkey		= atmel_aes_authenc_setkey,
2034	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
2035	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
2036	.init		= atmel_aes_authenc_hmac_sha1_init_tfm,
2037	.exit		= atmel_aes_authenc_exit_tfm,
2038	.ivsize		= AES_BLOCK_SIZE,
2039	.maxauthsize	= SHA1_DIGEST_SIZE,
2040
2041	.base = {
2042		.cra_name		= "authenc(hmac(sha1),cbc(aes))",
2043		.cra_driver_name	= "atmel-authenc-hmac-sha1-cbc-aes",
2044		.cra_blocksize		= AES_BLOCK_SIZE,
2045		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
2046	},
2047},
2048{
2049	.setkey		= atmel_aes_authenc_setkey,
2050	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
2051	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
2052	.init		= atmel_aes_authenc_hmac_sha224_init_tfm,
2053	.exit		= atmel_aes_authenc_exit_tfm,
2054	.ivsize		= AES_BLOCK_SIZE,
2055	.maxauthsize	= SHA224_DIGEST_SIZE,
2056
2057	.base = {
2058		.cra_name		= "authenc(hmac(sha224),cbc(aes))",
2059		.cra_driver_name	= "atmel-authenc-hmac-sha224-cbc-aes",
2060		.cra_blocksize		= AES_BLOCK_SIZE,
2061		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
2062	},
2063},
2064{
2065	.setkey		= atmel_aes_authenc_setkey,
2066	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
2067	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
2068	.init		= atmel_aes_authenc_hmac_sha256_init_tfm,
2069	.exit		= atmel_aes_authenc_exit_tfm,
2070	.ivsize		= AES_BLOCK_SIZE,
2071	.maxauthsize	= SHA256_DIGEST_SIZE,
2072
2073	.base = {
2074		.cra_name		= "authenc(hmac(sha256),cbc(aes))",
2075		.cra_driver_name	= "atmel-authenc-hmac-sha256-cbc-aes",
2076		.cra_blocksize		= AES_BLOCK_SIZE,
2077		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
2078	},
2079},
2080{
2081	.setkey		= atmel_aes_authenc_setkey,
2082	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
2083	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
2084	.init		= atmel_aes_authenc_hmac_sha384_init_tfm,
2085	.exit		= atmel_aes_authenc_exit_tfm,
2086	.ivsize		= AES_BLOCK_SIZE,
2087	.maxauthsize	= SHA384_DIGEST_SIZE,
2088
2089	.base = {
2090		.cra_name		= "authenc(hmac(sha384),cbc(aes))",
2091		.cra_driver_name	= "atmel-authenc-hmac-sha384-cbc-aes",
2092		.cra_blocksize		= AES_BLOCK_SIZE,
2093		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
2094	},
2095},
2096{
2097	.setkey		= atmel_aes_authenc_setkey,
2098	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
2099	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
2100	.init		= atmel_aes_authenc_hmac_sha512_init_tfm,
2101	.exit		= atmel_aes_authenc_exit_tfm,
2102	.ivsize		= AES_BLOCK_SIZE,
2103	.maxauthsize	= SHA512_DIGEST_SIZE,
2104
2105	.base = {
2106		.cra_name		= "authenc(hmac(sha512),cbc(aes))",
2107		.cra_driver_name	= "atmel-authenc-hmac-sha512-cbc-aes",
2108		.cra_blocksize		= AES_BLOCK_SIZE,
2109		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
2110	},
2111},
2112};
2113#endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
2114
2115/* Probe functions */
2116
2117static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
2118{
2119	dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
2120	dd->buflen = ATMEL_AES_BUFFER_SIZE;
2121	dd->buflen &= ~(AES_BLOCK_SIZE - 1);
2122
2123	if (!dd->buf) {
2124		dev_err(dd->dev, "unable to alloc pages.\n");
2125		return -ENOMEM;
2126	}
2127
2128	return 0;
2129}
2130
2131static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
2132{
2133	free_page((unsigned long)dd->buf);
2134}
2135
2136static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
 
 
 
 
 
 
 
 
 
 
 
 
 
2137{
2138	int ret;
 
 
 
 
 
2139
2140	/* Try to grab 2 DMA channels */
2141	dd->src.chan = dma_request_chan(dd->dev, "tx");
2142	if (IS_ERR(dd->src.chan)) {
2143		ret = PTR_ERR(dd->src.chan);
 
2144		goto err_dma_in;
2145	}
2146
2147	dd->dst.chan = dma_request_chan(dd->dev, "rx");
2148	if (IS_ERR(dd->dst.chan)) {
2149		ret = PTR_ERR(dd->dst.chan);
 
2150		goto err_dma_out;
2151	}
2152
2153	return 0;
2154
2155err_dma_out:
2156	dma_release_channel(dd->src.chan);
2157err_dma_in:
2158	dev_err(dd->dev, "no DMA channel available\n");
2159	return ret;
2160}
2161
2162static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
2163{
2164	dma_release_channel(dd->dst.chan);
2165	dma_release_channel(dd->src.chan);
2166}
2167
2168static void atmel_aes_queue_task(unsigned long data)
2169{
2170	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
2171
2172	atmel_aes_handle_queue(dd, NULL);
2173}
2174
2175static void atmel_aes_done_task(unsigned long data)
2176{
2177	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
2178
2179	dd->is_async = true;
2180	(void)dd->resume(dd);
2181}
2182
2183static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
2184{
2185	struct atmel_aes_dev *aes_dd = dev_id;
2186	u32 reg;
2187
2188	reg = atmel_aes_read(aes_dd, AES_ISR);
2189	if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
2190		atmel_aes_write(aes_dd, AES_IDR, reg);
2191		if (AES_FLAGS_BUSY & aes_dd->flags)
2192			tasklet_schedule(&aes_dd->done_task);
2193		else
2194			dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
2195		return IRQ_HANDLED;
2196	}
2197
2198	return IRQ_NONE;
2199}
2200
2201static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
2202{
2203	int i;
2204
2205#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2206	if (dd->caps.has_authenc)
2207		for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
2208			crypto_unregister_aead(&aes_authenc_algs[i]);
2209#endif
2210
2211	if (dd->caps.has_xts)
2212		crypto_unregister_skcipher(&aes_xts_alg);
2213
2214	if (dd->caps.has_gcm)
2215		crypto_unregister_aead(&aes_gcm_alg);
2216
2217	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
2218		crypto_unregister_skcipher(&aes_algs[i]);
2219}
2220
2221static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
2222{
2223	alg->cra_flags |= CRYPTO_ALG_ASYNC;
2224	alg->cra_alignmask = 0xf;
2225	alg->cra_priority = ATMEL_AES_PRIORITY;
2226	alg->cra_module = THIS_MODULE;
2227}
2228
2229static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
2230{
2231	int err, i, j;
2232
2233	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
2234		atmel_aes_crypto_alg_init(&aes_algs[i].base);
2235
2236		err = crypto_register_skcipher(&aes_algs[i]);
2237		if (err)
2238			goto err_aes_algs;
2239	}
2240
2241	if (dd->caps.has_gcm) {
2242		atmel_aes_crypto_alg_init(&aes_gcm_alg.base);
2243
2244		err = crypto_register_aead(&aes_gcm_alg);
2245		if (err)
2246			goto err_aes_gcm_alg;
2247	}
2248
2249	if (dd->caps.has_xts) {
2250		atmel_aes_crypto_alg_init(&aes_xts_alg.base);
2251
2252		err = crypto_register_skcipher(&aes_xts_alg);
2253		if (err)
2254			goto err_aes_xts_alg;
2255	}
2256
2257#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2258	if (dd->caps.has_authenc) {
2259		for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
2260			atmel_aes_crypto_alg_init(&aes_authenc_algs[i].base);
2261
2262			err = crypto_register_aead(&aes_authenc_algs[i]);
2263			if (err)
2264				goto err_aes_authenc_alg;
2265		}
2266	}
2267#endif
2268
2269	return 0;
2270
2271#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2272	/* i = ARRAY_SIZE(aes_authenc_algs); */
2273err_aes_authenc_alg:
2274	for (j = 0; j < i; j++)
2275		crypto_unregister_aead(&aes_authenc_algs[j]);
2276	crypto_unregister_skcipher(&aes_xts_alg);
2277#endif
2278err_aes_xts_alg:
2279	crypto_unregister_aead(&aes_gcm_alg);
2280err_aes_gcm_alg:
 
 
2281	i = ARRAY_SIZE(aes_algs);
2282err_aes_algs:
2283	for (j = 0; j < i; j++)
2284		crypto_unregister_skcipher(&aes_algs[j]);
2285
2286	return err;
2287}
2288
2289static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
2290{
2291	dd->caps.has_dualbuff = 0;
 
 
2292	dd->caps.has_gcm = 0;
2293	dd->caps.has_xts = 0;
2294	dd->caps.has_authenc = 0;
2295	dd->caps.max_burst_size = 1;
2296
2297	/* keep only major version number */
2298	switch (dd->hw_version & 0xff0) {
2299	case 0x700:
2300	case 0x600:
2301	case 0x500:
2302		dd->caps.has_dualbuff = 1;
 
 
2303		dd->caps.has_gcm = 1;
2304		dd->caps.has_xts = 1;
2305		dd->caps.has_authenc = 1;
2306		dd->caps.max_burst_size = 4;
2307		break;
2308	case 0x200:
2309		dd->caps.has_dualbuff = 1;
 
 
2310		dd->caps.has_gcm = 1;
2311		dd->caps.max_burst_size = 4;
2312		break;
2313	case 0x130:
2314		dd->caps.has_dualbuff = 1;
 
2315		dd->caps.max_burst_size = 4;
2316		break;
2317	case 0x120:
2318		break;
2319	default:
2320		dev_warn(dd->dev,
2321				"Unmanaged aes version, set minimum capabilities\n");
2322		break;
2323	}
2324}
2325
 
2326static const struct of_device_id atmel_aes_dt_ids[] = {
2327	{ .compatible = "atmel,at91sam9g46-aes" },
2328	{ /* sentinel */ }
2329};
2330MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
2331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2332static int atmel_aes_probe(struct platform_device *pdev)
2333{
2334	struct atmel_aes_dev *aes_dd;
 
2335	struct device *dev = &pdev->dev;
2336	struct resource *aes_res;
2337	int err;
2338
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2339	aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
2340	if (!aes_dd)
2341		return -ENOMEM;
 
 
 
2342
2343	aes_dd->dev = dev;
2344
2345	platform_set_drvdata(pdev, aes_dd);
2346
2347	INIT_LIST_HEAD(&aes_dd->list);
2348	spin_lock_init(&aes_dd->lock);
2349
2350	tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
2351					(unsigned long)aes_dd);
2352	tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
2353					(unsigned long)aes_dd);
2354
2355	crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
2356
2357	aes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &aes_res);
2358	if (IS_ERR(aes_dd->io_base)) {
2359		err = PTR_ERR(aes_dd->io_base);
2360		goto err_tasklet_kill;
 
 
 
 
2361	}
2362	aes_dd->phys_base = aes_res->start;
2363
2364	/* Get the IRQ */
2365	aes_dd->irq = platform_get_irq(pdev,  0);
2366	if (aes_dd->irq < 0) {
 
2367		err = aes_dd->irq;
2368		goto err_tasklet_kill;
2369	}
2370
2371	err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
2372			       IRQF_SHARED, "atmel-aes", aes_dd);
2373	if (err) {
2374		dev_err(dev, "unable to request aes irq.\n");
2375		goto err_tasklet_kill;
2376	}
2377
2378	/* Initializing the clock */
2379	aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
2380	if (IS_ERR(aes_dd->iclk)) {
2381		dev_err(dev, "clock initialization failed.\n");
2382		err = PTR_ERR(aes_dd->iclk);
2383		goto err_tasklet_kill;
 
 
 
 
 
 
 
2384	}
2385
2386	err = clk_prepare(aes_dd->iclk);
2387	if (err)
2388		goto err_tasklet_kill;
2389
2390	err = atmel_aes_hw_version_init(aes_dd);
2391	if (err)
2392		goto err_iclk_unprepare;
2393
2394	atmel_aes_get_cap(aes_dd);
2395
2396#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2397	if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
2398		err = -EPROBE_DEFER;
2399		goto err_iclk_unprepare;
2400	}
2401#endif
2402
2403	err = atmel_aes_buff_init(aes_dd);
2404	if (err)
2405		goto err_iclk_unprepare;
2406
2407	err = atmel_aes_dma_init(aes_dd);
2408	if (err)
2409		goto err_buff_cleanup;
2410
2411	spin_lock(&atmel_aes.lock);
2412	list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
2413	spin_unlock(&atmel_aes.lock);
2414
2415	err = atmel_aes_register_algs(aes_dd);
2416	if (err)
2417		goto err_algs;
2418
2419	dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
2420			dma_chan_name(aes_dd->src.chan),
2421			dma_chan_name(aes_dd->dst.chan));
2422
2423	return 0;
2424
2425err_algs:
2426	spin_lock(&atmel_aes.lock);
2427	list_del(&aes_dd->list);
2428	spin_unlock(&atmel_aes.lock);
2429	atmel_aes_dma_cleanup(aes_dd);
2430err_buff_cleanup:
2431	atmel_aes_buff_cleanup(aes_dd);
2432err_iclk_unprepare:
 
2433	clk_unprepare(aes_dd->iclk);
2434err_tasklet_kill:
2435	tasklet_kill(&aes_dd->done_task);
2436	tasklet_kill(&aes_dd->queue_task);
 
 
2437
2438	return err;
2439}
2440
2441static void atmel_aes_remove(struct platform_device *pdev)
2442{
2443	struct atmel_aes_dev *aes_dd;
2444
2445	aes_dd = platform_get_drvdata(pdev);
2446
 
2447	spin_lock(&atmel_aes.lock);
2448	list_del(&aes_dd->list);
2449	spin_unlock(&atmel_aes.lock);
2450
2451	atmel_aes_unregister_algs(aes_dd);
2452
2453	tasklet_kill(&aes_dd->done_task);
2454	tasklet_kill(&aes_dd->queue_task);
2455
2456	atmel_aes_dma_cleanup(aes_dd);
2457	atmel_aes_buff_cleanup(aes_dd);
2458
2459	clk_unprepare(aes_dd->iclk);
 
 
2460}
2461
2462static struct platform_driver atmel_aes_driver = {
2463	.probe		= atmel_aes_probe,
2464	.remove_new	= atmel_aes_remove,
2465	.driver		= {
2466		.name	= "atmel_aes",
2467		.of_match_table = atmel_aes_dt_ids,
2468	},
2469};
2470
2471module_platform_driver(atmel_aes_driver);
2472
2473MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
2474MODULE_LICENSE("GPL v2");
2475MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
v4.6
 
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for ATMEL AES HW acceleration.
   5 *
   6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
   7 * Author: Nicolas Royer <nicolas@eukrea.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as published
  11 * by the Free Software Foundation.
  12 *
  13 * Some ideas are from omap-aes.c driver.
  14 */
  15
  16
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/slab.h>
  20#include <linux/err.h>
  21#include <linux/clk.h>
  22#include <linux/io.h>
  23#include <linux/hw_random.h>
  24#include <linux/platform_device.h>
  25
  26#include <linux/device.h>
 
  27#include <linux/init.h>
  28#include <linux/errno.h>
  29#include <linux/interrupt.h>
  30#include <linux/irq.h>
  31#include <linux/scatterlist.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/of_device.h>
  34#include <linux/delay.h>
  35#include <linux/crypto.h>
  36#include <crypto/scatterwalk.h>
  37#include <crypto/algapi.h>
  38#include <crypto/aes.h>
 
 
  39#include <crypto/internal/aead.h>
  40#include <linux/platform_data/crypto-atmel.h>
  41#include <dt-bindings/dma/at91.h>
  42#include "atmel-aes-regs.h"
 
  43
  44#define ATMEL_AES_PRIORITY	300
  45
  46#define ATMEL_AES_BUFFER_ORDER	2
  47#define ATMEL_AES_BUFFER_SIZE	(PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
  48
  49#define CFB8_BLOCK_SIZE		1
  50#define CFB16_BLOCK_SIZE	2
  51#define CFB32_BLOCK_SIZE	4
  52#define CFB64_BLOCK_SIZE	8
  53
  54#define SIZE_IN_WORDS(x)	((x) >> 2)
  55
  56/* AES flags */
  57/* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */
  58#define AES_FLAGS_ENCRYPT	AES_MR_CYPHER_ENC
  59#define AES_FLAGS_GTAGEN	AES_MR_GTAGEN
  60#define AES_FLAGS_OPMODE_MASK	(AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
  61#define AES_FLAGS_ECB		AES_MR_OPMOD_ECB
  62#define AES_FLAGS_CBC		AES_MR_OPMOD_CBC
  63#define AES_FLAGS_OFB		AES_MR_OPMOD_OFB
  64#define AES_FLAGS_CFB128	(AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
  65#define AES_FLAGS_CFB64		(AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
  66#define AES_FLAGS_CFB32		(AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
  67#define AES_FLAGS_CFB16		(AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
  68#define AES_FLAGS_CFB8		(AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
  69#define AES_FLAGS_CTR		AES_MR_OPMOD_CTR
  70#define AES_FLAGS_GCM		AES_MR_OPMOD_GCM
 
  71
  72#define AES_FLAGS_MODE_MASK	(AES_FLAGS_OPMODE_MASK |	\
  73				 AES_FLAGS_ENCRYPT |		\
  74				 AES_FLAGS_GTAGEN)
  75
  76#define AES_FLAGS_INIT		BIT(2)
  77#define AES_FLAGS_BUSY		BIT(3)
  78#define AES_FLAGS_DUMP_REG	BIT(4)
 
  79
  80#define AES_FLAGS_PERSISTENT	(AES_FLAGS_INIT | AES_FLAGS_BUSY)
  81
  82#define ATMEL_AES_QUEUE_LENGTH	50
  83
  84#define ATMEL_AES_DMA_THRESHOLD		256
  85
  86
  87struct atmel_aes_caps {
  88	bool			has_dualbuff;
  89	bool			has_cfb64;
  90	bool			has_ctr32;
  91	bool			has_gcm;
 
 
  92	u32			max_burst_size;
  93};
  94
  95struct atmel_aes_dev;
  96
  97
  98typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
  99
 100
 101struct atmel_aes_base_ctx {
 102	struct atmel_aes_dev	*dd;
 103	atmel_aes_fn_t		start;
 104	int			keylen;
 105	u32			key[AES_KEYSIZE_256 / sizeof(u32)];
 106	u16			block_size;
 
 107};
 108
 109struct atmel_aes_ctx {
 110	struct atmel_aes_base_ctx	base;
 111};
 112
 113struct atmel_aes_ctr_ctx {
 114	struct atmel_aes_base_ctx	base;
 115
 116	u32			iv[AES_BLOCK_SIZE / sizeof(u32)];
 117	size_t			offset;
 118	struct scatterlist	src[2];
 119	struct scatterlist	dst[2];
 
 120};
 121
 122struct atmel_aes_gcm_ctx {
 123	struct atmel_aes_base_ctx	base;
 124
 125	struct scatterlist	src[2];
 126	struct scatterlist	dst[2];
 127
 128	u32			j0[AES_BLOCK_SIZE / sizeof(u32)];
 129	u32			tag[AES_BLOCK_SIZE / sizeof(u32)];
 130	u32			ghash[AES_BLOCK_SIZE / sizeof(u32)];
 131	size_t			textlen;
 132
 133	const u32		*ghash_in;
 134	u32			*ghash_out;
 135	atmel_aes_fn_t		ghash_resume;
 136};
 137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 138struct atmel_aes_reqctx {
 139	unsigned long		mode;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 140};
 
 141
 142struct atmel_aes_dma {
 143	struct dma_chan		*chan;
 144	struct scatterlist	*sg;
 145	int			nents;
 146	unsigned int		remainder;
 147	unsigned int		sg_len;
 148};
 149
 150struct atmel_aes_dev {
 151	struct list_head	list;
 152	unsigned long		phys_base;
 153	void __iomem		*io_base;
 154
 155	struct crypto_async_request	*areq;
 156	struct atmel_aes_base_ctx	*ctx;
 157
 158	bool			is_async;
 159	atmel_aes_fn_t		resume;
 160	atmel_aes_fn_t		cpu_transfer_complete;
 161
 162	struct device		*dev;
 163	struct clk		*iclk;
 164	int			irq;
 165
 166	unsigned long		flags;
 167
 168	spinlock_t		lock;
 169	struct crypto_queue	queue;
 170
 171	struct tasklet_struct	done_task;
 172	struct tasklet_struct	queue_task;
 173
 174	size_t			total;
 175	size_t			datalen;
 176	u32			*data;
 177
 178	struct atmel_aes_dma	src;
 179	struct atmel_aes_dma	dst;
 180
 181	size_t			buflen;
 182	void			*buf;
 183	struct scatterlist	aligned_sg;
 184	struct scatterlist	*real_dst;
 185
 186	struct atmel_aes_caps	caps;
 187
 188	u32			hw_version;
 189};
 190
 191struct atmel_aes_drv {
 192	struct list_head	dev_list;
 193	spinlock_t		lock;
 194};
 195
 196static struct atmel_aes_drv atmel_aes = {
 197	.dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
 198	.lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
 199};
 200
 201#ifdef VERBOSE_DEBUG
 202static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
 203{
 204	switch (offset) {
 205	case AES_CR:
 206		return "CR";
 207
 208	case AES_MR:
 209		return "MR";
 210
 211	case AES_ISR:
 212		return "ISR";
 213
 214	case AES_IMR:
 215		return "IMR";
 216
 217	case AES_IER:
 218		return "IER";
 219
 220	case AES_IDR:
 221		return "IDR";
 222
 223	case AES_KEYWR(0):
 224	case AES_KEYWR(1):
 225	case AES_KEYWR(2):
 226	case AES_KEYWR(3):
 227	case AES_KEYWR(4):
 228	case AES_KEYWR(5):
 229	case AES_KEYWR(6):
 230	case AES_KEYWR(7):
 231		snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2);
 232		break;
 233
 234	case AES_IDATAR(0):
 235	case AES_IDATAR(1):
 236	case AES_IDATAR(2):
 237	case AES_IDATAR(3):
 238		snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2);
 239		break;
 240
 241	case AES_ODATAR(0):
 242	case AES_ODATAR(1):
 243	case AES_ODATAR(2):
 244	case AES_ODATAR(3):
 245		snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2);
 246		break;
 247
 248	case AES_IVR(0):
 249	case AES_IVR(1):
 250	case AES_IVR(2):
 251	case AES_IVR(3):
 252		snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2);
 253		break;
 254
 255	case AES_AADLENR:
 256		return "AADLENR";
 257
 258	case AES_CLENR:
 259		return "CLENR";
 260
 261	case AES_GHASHR(0):
 262	case AES_GHASHR(1):
 263	case AES_GHASHR(2):
 264	case AES_GHASHR(3):
 265		snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2);
 266		break;
 267
 268	case AES_TAGR(0):
 269	case AES_TAGR(1):
 270	case AES_TAGR(2):
 271	case AES_TAGR(3):
 272		snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2);
 273		break;
 274
 275	case AES_CTRR:
 276		return "CTRR";
 277
 278	case AES_GCMHR(0):
 279	case AES_GCMHR(1):
 280	case AES_GCMHR(2):
 281	case AES_GCMHR(3):
 282		snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
 283		break;
 284
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 285	default:
 286		snprintf(tmp, sz, "0x%02x", offset);
 287		break;
 288	}
 289
 290	return tmp;
 291}
 292#endif /* VERBOSE_DEBUG */
 293
 294/* Shared functions */
 295
 296static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
 297{
 298	u32 value = readl_relaxed(dd->io_base + offset);
 299
 300#ifdef VERBOSE_DEBUG
 301	if (dd->flags & AES_FLAGS_DUMP_REG) {
 302		char tmp[16];
 303
 304		dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
 305			 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
 306	}
 307#endif /* VERBOSE_DEBUG */
 308
 309	return value;
 310}
 311
 312static inline void atmel_aes_write(struct atmel_aes_dev *dd,
 313					u32 offset, u32 value)
 314{
 315#ifdef VERBOSE_DEBUG
 316	if (dd->flags & AES_FLAGS_DUMP_REG) {
 317		char tmp[16];
 318
 319		dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
 320			 atmel_aes_reg_name(offset, tmp));
 321	}
 322#endif /* VERBOSE_DEBUG */
 323
 324	writel_relaxed(value, dd->io_base + offset);
 325}
 326
 327static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
 328					u32 *value, int count)
 329{
 330	for (; count--; value++, offset += 4)
 331		*value = atmel_aes_read(dd, offset);
 332}
 333
 334static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
 335			      const u32 *value, int count)
 336{
 337	for (; count--; value++, offset += 4)
 338		atmel_aes_write(dd, offset, *value);
 339}
 340
 341static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
 342					u32 *value)
 343{
 344	atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
 345}
 346
 347static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
 348					 const u32 *value)
 349{
 350	atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
 351}
 352
 353static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd,
 354						atmel_aes_fn_t resume)
 355{
 356	u32 isr = atmel_aes_read(dd, AES_ISR);
 357
 358	if (unlikely(isr & AES_INT_DATARDY))
 359		return resume(dd);
 360
 361	dd->resume = resume;
 362	atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
 363	return -EINPROGRESS;
 364}
 365
 366static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
 367{
 368	len &= block_size - 1;
 369	return len ? block_size - len : 0;
 370}
 371
 372static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
 373{
 374	struct atmel_aes_dev *aes_dd = NULL;
 375	struct atmel_aes_dev *tmp;
 376
 377	spin_lock_bh(&atmel_aes.lock);
 378	if (!ctx->dd) {
 379		list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
 380			aes_dd = tmp;
 381			break;
 382		}
 383		ctx->dd = aes_dd;
 384	} else {
 385		aes_dd = ctx->dd;
 386	}
 387
 388	spin_unlock_bh(&atmel_aes.lock);
 389
 390	return aes_dd;
 391}
 392
 393static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
 394{
 395	int err;
 396
 397	err = clk_enable(dd->iclk);
 398	if (err)
 399		return err;
 400
 401	if (!(dd->flags & AES_FLAGS_INIT)) {
 402		atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
 403		atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
 404		dd->flags |= AES_FLAGS_INIT;
 405	}
 406
 407	return 0;
 408}
 409
 410static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
 411{
 412	return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
 413}
 414
 415static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
 416{
 417	int err;
 418
 419	err = atmel_aes_hw_init(dd);
 420	if (err)
 421		return err;
 422
 423	dd->hw_version = atmel_aes_get_version(dd);
 424
 425	dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
 426
 427	clk_disable(dd->iclk);
 428	return 0;
 429}
 430
 431static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
 432				      const struct atmel_aes_reqctx *rctx)
 433{
 434	/* Clear all but persistent flags and set request flags. */
 435	dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
 436}
 437
 438static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
 439{
 440	return (dd->flags & AES_FLAGS_ENCRYPT);
 441}
 442
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 443static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
 444{
 
 
 
 
 
 
 
 
 445	clk_disable(dd->iclk);
 446	dd->flags &= ~AES_FLAGS_BUSY;
 447
 
 
 
 
 
 
 
 
 448	if (dd->is_async)
 449		dd->areq->complete(dd->areq, err);
 450
 451	tasklet_schedule(&dd->queue_task);
 452
 453	return err;
 454}
 455
 456static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
 457				 const u32 *iv)
 458{
 459	u32 valmr = 0;
 460
 461	/* MR register must be set before IV registers */
 462	if (dd->ctx->keylen == AES_KEYSIZE_128)
 463		valmr |= AES_MR_KEYSIZE_128;
 464	else if (dd->ctx->keylen == AES_KEYSIZE_192)
 465		valmr |= AES_MR_KEYSIZE_192;
 466	else
 467		valmr |= AES_MR_KEYSIZE_256;
 468
 469	valmr |= dd->flags & AES_FLAGS_MODE_MASK;
 470
 471	if (use_dma) {
 472		valmr |= AES_MR_SMOD_IDATAR0;
 473		if (dd->caps.has_dualbuff)
 474			valmr |= AES_MR_DUALBUFF;
 475	} else {
 476		valmr |= AES_MR_SMOD_AUTO;
 477	}
 478
 479	atmel_aes_write(dd, AES_MR, valmr);
 480
 481	atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
 482			  SIZE_IN_WORDS(dd->ctx->keylen));
 483
 484	if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
 485		atmel_aes_write_block(dd, AES_IVR(0), iv);
 486}
 487
 
 
 
 
 
 
 
 488
 489/* CPU transfer */
 490
 491static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd)
 492{
 493	int err = 0;
 494	u32 isr;
 495
 496	for (;;) {
 497		atmel_aes_read_block(dd, AES_ODATAR(0), dd->data);
 498		dd->data += 4;
 499		dd->datalen -= AES_BLOCK_SIZE;
 500
 501		if (dd->datalen < AES_BLOCK_SIZE)
 502			break;
 503
 504		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
 505
 506		isr = atmel_aes_read(dd, AES_ISR);
 507		if (!(isr & AES_INT_DATARDY)) {
 508			dd->resume = atmel_aes_cpu_transfer;
 509			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
 510			return -EINPROGRESS;
 511		}
 512	}
 513
 514	if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
 515				 dd->buf, dd->total))
 516		err = -EINVAL;
 517
 518	if (err)
 519		return atmel_aes_complete(dd, err);
 520
 521	return dd->cpu_transfer_complete(dd);
 522}
 523
 524static int atmel_aes_cpu_start(struct atmel_aes_dev *dd,
 525			       struct scatterlist *src,
 526			       struct scatterlist *dst,
 527			       size_t len,
 528			       atmel_aes_fn_t resume)
 529{
 530	size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE);
 531
 532	if (unlikely(len == 0))
 533		return -EINVAL;
 534
 535	sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
 536
 537	dd->total = len;
 538	dd->real_dst = dst;
 539	dd->cpu_transfer_complete = resume;
 540	dd->datalen = len + padlen;
 541	dd->data = (u32 *)dd->buf;
 542	atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
 543	return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer);
 544}
 545
 546
 547/* DMA transfer */
 548
 549static void atmel_aes_dma_callback(void *data);
 550
 551static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd,
 552				    struct scatterlist *sg,
 553				    size_t len,
 554				    struct atmel_aes_dma *dma)
 555{
 556	int nents;
 557
 558	if (!IS_ALIGNED(len, dd->ctx->block_size))
 559		return false;
 560
 561	for (nents = 0; sg; sg = sg_next(sg), ++nents) {
 562		if (!IS_ALIGNED(sg->offset, sizeof(u32)))
 563			return false;
 564
 565		if (len <= sg->length) {
 566			if (!IS_ALIGNED(len, dd->ctx->block_size))
 567				return false;
 568
 569			dma->nents = nents+1;
 570			dma->remainder = sg->length - len;
 571			sg->length = len;
 572			return true;
 573		}
 574
 575		if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
 576			return false;
 577
 578		len -= sg->length;
 579	}
 580
 581	return false;
 582}
 583
 584static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma)
 585{
 586	struct scatterlist *sg = dma->sg;
 587	int nents = dma->nents;
 588
 589	if (!dma->remainder)
 590		return;
 591
 592	while (--nents > 0 && sg)
 593		sg = sg_next(sg);
 594
 595	if (!sg)
 596		return;
 597
 598	sg->length += dma->remainder;
 599}
 600
 601static int atmel_aes_map(struct atmel_aes_dev *dd,
 602			 struct scatterlist *src,
 603			 struct scatterlist *dst,
 604			 size_t len)
 605{
 606	bool src_aligned, dst_aligned;
 607	size_t padlen;
 608
 609	dd->total = len;
 610	dd->src.sg = src;
 611	dd->dst.sg = dst;
 612	dd->real_dst = dst;
 613
 614	src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src);
 615	if (src == dst)
 616		dst_aligned = src_aligned;
 617	else
 618		dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst);
 619	if (!src_aligned || !dst_aligned) {
 620		padlen = atmel_aes_padlen(len, dd->ctx->block_size);
 621
 622		if (dd->buflen < len + padlen)
 623			return -ENOMEM;
 624
 625		if (!src_aligned) {
 626			sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
 627			dd->src.sg = &dd->aligned_sg;
 628			dd->src.nents = 1;
 629			dd->src.remainder = 0;
 630		}
 631
 632		if (!dst_aligned) {
 633			dd->dst.sg = &dd->aligned_sg;
 634			dd->dst.nents = 1;
 635			dd->dst.remainder = 0;
 636		}
 637
 638		sg_init_table(&dd->aligned_sg, 1);
 639		sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
 640	}
 641
 642	if (dd->src.sg == dd->dst.sg) {
 643		dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
 644					    DMA_BIDIRECTIONAL);
 645		dd->dst.sg_len = dd->src.sg_len;
 646		if (!dd->src.sg_len)
 647			return -EFAULT;
 648	} else {
 649		dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
 650					    DMA_TO_DEVICE);
 651		if (!dd->src.sg_len)
 652			return -EFAULT;
 653
 654		dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
 655					    DMA_FROM_DEVICE);
 656		if (!dd->dst.sg_len) {
 657			dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
 658				     DMA_TO_DEVICE);
 659			return -EFAULT;
 660		}
 661	}
 662
 663	return 0;
 664}
 665
 666static void atmel_aes_unmap(struct atmel_aes_dev *dd)
 667{
 668	if (dd->src.sg == dd->dst.sg) {
 669		dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
 670			     DMA_BIDIRECTIONAL);
 671
 672		if (dd->src.sg != &dd->aligned_sg)
 673			atmel_aes_restore_sg(&dd->src);
 674	} else {
 675		dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
 676			     DMA_FROM_DEVICE);
 677
 678		if (dd->dst.sg != &dd->aligned_sg)
 679			atmel_aes_restore_sg(&dd->dst);
 680
 681		dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
 682			     DMA_TO_DEVICE);
 683
 684		if (dd->src.sg != &dd->aligned_sg)
 685			atmel_aes_restore_sg(&dd->src);
 686	}
 687
 688	if (dd->dst.sg == &dd->aligned_sg)
 689		sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
 690				    dd->buf, dd->total);
 691}
 692
 693static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
 694					enum dma_slave_buswidth addr_width,
 695					enum dma_transfer_direction dir,
 696					u32 maxburst)
 697{
 698	struct dma_async_tx_descriptor *desc;
 699	struct dma_slave_config config;
 700	dma_async_tx_callback callback;
 701	struct atmel_aes_dma *dma;
 702	int err;
 703
 704	memset(&config, 0, sizeof(config));
 705	config.direction = dir;
 706	config.src_addr_width = addr_width;
 707	config.dst_addr_width = addr_width;
 708	config.src_maxburst = maxburst;
 709	config.dst_maxburst = maxburst;
 710
 711	switch (dir) {
 712	case DMA_MEM_TO_DEV:
 713		dma = &dd->src;
 714		callback = NULL;
 715		config.dst_addr = dd->phys_base + AES_IDATAR(0);
 716		break;
 717
 718	case DMA_DEV_TO_MEM:
 719		dma = &dd->dst;
 720		callback = atmel_aes_dma_callback;
 721		config.src_addr = dd->phys_base + AES_ODATAR(0);
 722		break;
 723
 724	default:
 725		return -EINVAL;
 726	}
 727
 728	err = dmaengine_slave_config(dma->chan, &config);
 729	if (err)
 730		return err;
 731
 732	desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir,
 733				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 734	if (!desc)
 735		return -ENOMEM;
 736
 737	desc->callback = callback;
 738	desc->callback_param = dd;
 739	dmaengine_submit(desc);
 740	dma_async_issue_pending(dma->chan);
 741
 742	return 0;
 743}
 744
 745static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd,
 746					enum dma_transfer_direction dir)
 747{
 748	struct atmel_aes_dma *dma;
 749
 750	switch (dir) {
 751	case DMA_MEM_TO_DEV:
 752		dma = &dd->src;
 753		break;
 754
 755	case DMA_DEV_TO_MEM:
 756		dma = &dd->dst;
 757		break;
 758
 759	default:
 760		return;
 761	}
 762
 763	dmaengine_terminate_all(dma->chan);
 764}
 765
 766static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
 767			       struct scatterlist *src,
 768			       struct scatterlist *dst,
 769			       size_t len,
 770			       atmel_aes_fn_t resume)
 771{
 772	enum dma_slave_buswidth addr_width;
 773	u32 maxburst;
 774	int err;
 775
 776	switch (dd->ctx->block_size) {
 777	case CFB8_BLOCK_SIZE:
 778		addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 779		maxburst = 1;
 780		break;
 781
 782	case CFB16_BLOCK_SIZE:
 783		addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 784		maxburst = 1;
 785		break;
 786
 787	case CFB32_BLOCK_SIZE:
 788	case CFB64_BLOCK_SIZE:
 789		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 790		maxburst = 1;
 791		break;
 792
 793	case AES_BLOCK_SIZE:
 794		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 795		maxburst = dd->caps.max_burst_size;
 796		break;
 797
 798	default:
 799		err = -EINVAL;
 800		goto exit;
 801	}
 802
 803	err = atmel_aes_map(dd, src, dst, len);
 804	if (err)
 805		goto exit;
 806
 807	dd->resume = resume;
 808
 809	/* Set output DMA transfer first */
 810	err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
 811					   maxburst);
 812	if (err)
 813		goto unmap;
 814
 815	/* Then set input DMA transfer */
 816	err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
 817					   maxburst);
 818	if (err)
 819		goto output_transfer_stop;
 820
 821	return -EINPROGRESS;
 822
 823output_transfer_stop:
 824	atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
 825unmap:
 826	atmel_aes_unmap(dd);
 827exit:
 828	return atmel_aes_complete(dd, err);
 829}
 830
 831static void atmel_aes_dma_stop(struct atmel_aes_dev *dd)
 832{
 833	atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV);
 834	atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
 835	atmel_aes_unmap(dd);
 836}
 837
 838static void atmel_aes_dma_callback(void *data)
 839{
 840	struct atmel_aes_dev *dd = data;
 841
 842	atmel_aes_dma_stop(dd);
 843	dd->is_async = true;
 844	(void)dd->resume(dd);
 845}
 846
 847static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
 848				  struct crypto_async_request *new_areq)
 849{
 850	struct crypto_async_request *areq, *backlog;
 851	struct atmel_aes_base_ctx *ctx;
 852	unsigned long flags;
 
 853	int err, ret = 0;
 854
 855	spin_lock_irqsave(&dd->lock, flags);
 856	if (new_areq)
 857		ret = crypto_enqueue_request(&dd->queue, new_areq);
 858	if (dd->flags & AES_FLAGS_BUSY) {
 859		spin_unlock_irqrestore(&dd->lock, flags);
 860		return ret;
 861	}
 862	backlog = crypto_get_backlog(&dd->queue);
 863	areq = crypto_dequeue_request(&dd->queue);
 864	if (areq)
 865		dd->flags |= AES_FLAGS_BUSY;
 866	spin_unlock_irqrestore(&dd->lock, flags);
 867
 868	if (!areq)
 869		return ret;
 870
 871	if (backlog)
 872		backlog->complete(backlog, -EINPROGRESS);
 873
 874	ctx = crypto_tfm_ctx(areq->tfm);
 875
 876	dd->areq = areq;
 877	dd->ctx = ctx;
 878	dd->is_async = (areq != new_areq);
 
 879
 
 880	err = ctx->start(dd);
 881	return (dd->is_async) ? ret : err;
 882}
 883
 884
 885/* AES async block ciphers */
 886
 887static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
 888{
 889	return atmel_aes_complete(dd, 0);
 890}
 891
 892static int atmel_aes_start(struct atmel_aes_dev *dd)
 893{
 894	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
 895	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 896	bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD ||
 897			dd->ctx->block_size != AES_BLOCK_SIZE);
 898	int err;
 899
 900	atmel_aes_set_mode(dd, rctx);
 901
 902	err = atmel_aes_hw_init(dd);
 903	if (err)
 904		return atmel_aes_complete(dd, err);
 905
 906	atmel_aes_write_ctrl(dd, use_dma, req->info);
 907	if (use_dma)
 908		return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
 
 909					   atmel_aes_transfer_complete);
 910
 911	return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
 912				   atmel_aes_transfer_complete);
 913}
 914
 915static inline struct atmel_aes_ctr_ctx *
 916atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
 917{
 918	return container_of(ctx, struct atmel_aes_ctr_ctx, base);
 919}
 920
 921static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
 922{
 923	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
 924	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
 925	struct scatterlist *src, *dst;
 926	u32 ctr, blocks;
 927	size_t datalen;
 
 
 928	bool use_dma, fragmented = false;
 929
 930	/* Check for transfer completion. */
 931	ctx->offset += dd->total;
 932	if (ctx->offset >= req->nbytes)
 933		return atmel_aes_transfer_complete(dd);
 934
 935	/* Compute data length. */
 936	datalen = req->nbytes - ctx->offset;
 937	blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
 938	ctr = be32_to_cpu(ctx->iv[3]);
 939	if (dd->caps.has_ctr32) {
 940		/* Check 32bit counter overflow. */
 941		u32 start = ctr;
 942		u32 end = start + blocks - 1;
 943
 944		if (end < start) {
 945			ctr |= 0xffffffff;
 946			datalen = AES_BLOCK_SIZE * -start;
 947			fragmented = true;
 948		}
 949	} else {
 950		/* Check 16bit counter overflow. */
 951		u16 start = ctr & 0xffff;
 952		u16 end = start + (u16)blocks - 1;
 953
 954		if (blocks >> 16 || end < start) {
 955			ctr |= 0xffff;
 956			datalen = AES_BLOCK_SIZE * (0x10000-start);
 957			fragmented = true;
 958		}
 959	}
 
 960	use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
 961
 962	/* Jump to offset. */
 963	src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset);
 964	dst = ((req->src == req->dst) ? src :
 965	       scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset));
 966
 967	/* Configure hardware. */
 968	atmel_aes_write_ctrl(dd, use_dma, ctx->iv);
 969	if (unlikely(fragmented)) {
 970		/*
 971		 * Increment the counter manually to cope with the hardware
 972		 * counter overflow.
 973		 */
 974		ctx->iv[3] = cpu_to_be32(ctr);
 975		crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
 976	}
 977
 978	if (use_dma)
 979		return atmel_aes_dma_start(dd, src, dst, datalen,
 980					   atmel_aes_ctr_transfer);
 981
 982	return atmel_aes_cpu_start(dd, src, dst, datalen,
 983				   atmel_aes_ctr_transfer);
 984}
 985
 986static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
 987{
 988	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
 989	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
 990	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 991	int err;
 992
 993	atmel_aes_set_mode(dd, rctx);
 994
 995	err = atmel_aes_hw_init(dd);
 996	if (err)
 997		return atmel_aes_complete(dd, err);
 998
 999	memcpy(ctx->iv, req->info, AES_BLOCK_SIZE);
1000	ctx->offset = 0;
1001	dd->total = 0;
1002	return atmel_aes_ctr_transfer(dd);
1003}
1004
1005static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1006{
1007	struct atmel_aes_base_ctx *ctx;
 
1008	struct atmel_aes_reqctx *rctx;
1009	struct atmel_aes_dev *dd;
1010
1011	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
1012	switch (mode & AES_FLAGS_OPMODE_MASK) {
1013	case AES_FLAGS_CFB8:
1014		ctx->block_size = CFB8_BLOCK_SIZE;
1015		break;
 
 
 
1016
1017	case AES_FLAGS_CFB16:
1018		ctx->block_size = CFB16_BLOCK_SIZE;
1019		break;
 
 
 
1020
1021	case AES_FLAGS_CFB32:
1022		ctx->block_size = CFB32_BLOCK_SIZE;
1023		break;
1024
1025	case AES_FLAGS_CFB64:
1026		ctx->block_size = CFB64_BLOCK_SIZE;
1027		break;
1028
1029	default:
1030		ctx->block_size = AES_BLOCK_SIZE;
1031		break;
1032	}
1033
1034	dd = atmel_aes_find_dev(ctx);
1035	if (!dd)
1036		return -ENODEV;
1037
1038	rctx = ablkcipher_request_ctx(req);
1039	rctx->mode = mode;
 
 
 
1040
1041	return atmel_aes_handle_queue(dd, &req->base);
1042}
1043
1044static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1045			   unsigned int keylen)
1046{
1047	struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
1048
1049	if (keylen != AES_KEYSIZE_128 &&
1050	    keylen != AES_KEYSIZE_192 &&
1051	    keylen != AES_KEYSIZE_256) {
1052		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1053		return -EINVAL;
1054	}
1055
1056	memcpy(ctx->key, key, keylen);
1057	ctx->keylen = keylen;
1058
1059	return 0;
1060}
1061
1062static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
1063{
1064	return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1065}
1066
1067static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
1068{
1069	return atmel_aes_crypt(req, AES_FLAGS_ECB);
1070}
1071
1072static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
1073{
1074	return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
1075}
1076
1077static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
1078{
1079	return atmel_aes_crypt(req, AES_FLAGS_CBC);
1080}
1081
1082static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
1083{
1084	return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
1085}
1086
1087static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
1088{
1089	return atmel_aes_crypt(req, AES_FLAGS_OFB);
1090}
1091
1092static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
1093{
1094	return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
1095}
1096
1097static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
1098{
1099	return atmel_aes_crypt(req, AES_FLAGS_CFB128);
1100}
1101
1102static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
1103{
1104	return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
1105}
1106
1107static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
1108{
1109	return atmel_aes_crypt(req, AES_FLAGS_CFB64);
1110}
1111
1112static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
1113{
1114	return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
1115}
1116
1117static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
1118{
1119	return atmel_aes_crypt(req, AES_FLAGS_CFB32);
1120}
1121
1122static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
1123{
1124	return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
1125}
1126
1127static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
1128{
1129	return atmel_aes_crypt(req, AES_FLAGS_CFB16);
1130}
1131
1132static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
1133{
1134	return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
1135}
1136
1137static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
1138{
1139	return atmel_aes_crypt(req, AES_FLAGS_CFB8);
1140}
1141
1142static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
1143{
1144	return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
1145}
1146
1147static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
1148{
1149	return atmel_aes_crypt(req, AES_FLAGS_CTR);
1150}
1151
1152static int atmel_aes_cra_init(struct crypto_tfm *tfm)
1153{
1154	struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 
 
 
 
 
1155
1156	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
 
1157	ctx->base.start = atmel_aes_start;
1158
1159	return 0;
1160}
1161
1162static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
1163{
1164	struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 
 
 
 
 
1165
1166	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
 
1167	ctx->base.start = atmel_aes_ctr_start;
1168
1169	return 0;
1170}
1171
1172static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
1173{
1174}
1175
1176static struct crypto_alg aes_algs[] = {
1177{
1178	.cra_name		= "ecb(aes)",
1179	.cra_driver_name	= "atmel-ecb-aes",
1180	.cra_priority		= ATMEL_AES_PRIORITY,
1181	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1182	.cra_blocksize		= AES_BLOCK_SIZE,
1183	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1184	.cra_alignmask		= 0xf,
1185	.cra_type		= &crypto_ablkcipher_type,
1186	.cra_module		= THIS_MODULE,
1187	.cra_init		= atmel_aes_cra_init,
1188	.cra_exit		= atmel_aes_cra_exit,
1189	.cra_u.ablkcipher = {
1190		.min_keysize	= AES_MIN_KEY_SIZE,
1191		.max_keysize	= AES_MAX_KEY_SIZE,
1192		.setkey		= atmel_aes_setkey,
1193		.encrypt	= atmel_aes_ecb_encrypt,
1194		.decrypt	= atmel_aes_ecb_decrypt,
1195	}
1196},
1197{
1198	.cra_name		= "cbc(aes)",
1199	.cra_driver_name	= "atmel-cbc-aes",
1200	.cra_priority		= ATMEL_AES_PRIORITY,
1201	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1202	.cra_blocksize		= AES_BLOCK_SIZE,
1203	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1204	.cra_alignmask		= 0xf,
1205	.cra_type		= &crypto_ablkcipher_type,
1206	.cra_module		= THIS_MODULE,
1207	.cra_init		= atmel_aes_cra_init,
1208	.cra_exit		= atmel_aes_cra_exit,
1209	.cra_u.ablkcipher = {
1210		.min_keysize	= AES_MIN_KEY_SIZE,
1211		.max_keysize	= AES_MAX_KEY_SIZE,
1212		.ivsize		= AES_BLOCK_SIZE,
1213		.setkey		= atmel_aes_setkey,
1214		.encrypt	= atmel_aes_cbc_encrypt,
1215		.decrypt	= atmel_aes_cbc_decrypt,
1216	}
1217},
1218{
1219	.cra_name		= "ofb(aes)",
1220	.cra_driver_name	= "atmel-ofb-aes",
1221	.cra_priority		= ATMEL_AES_PRIORITY,
1222	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1223	.cra_blocksize		= AES_BLOCK_SIZE,
1224	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1225	.cra_alignmask		= 0xf,
1226	.cra_type		= &crypto_ablkcipher_type,
1227	.cra_module		= THIS_MODULE,
1228	.cra_init		= atmel_aes_cra_init,
1229	.cra_exit		= atmel_aes_cra_exit,
1230	.cra_u.ablkcipher = {
1231		.min_keysize	= AES_MIN_KEY_SIZE,
1232		.max_keysize	= AES_MAX_KEY_SIZE,
1233		.ivsize		= AES_BLOCK_SIZE,
1234		.setkey		= atmel_aes_setkey,
1235		.encrypt	= atmel_aes_ofb_encrypt,
1236		.decrypt	= atmel_aes_ofb_decrypt,
1237	}
1238},
1239{
1240	.cra_name		= "cfb(aes)",
1241	.cra_driver_name	= "atmel-cfb-aes",
1242	.cra_priority		= ATMEL_AES_PRIORITY,
1243	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1244	.cra_blocksize		= AES_BLOCK_SIZE,
1245	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1246	.cra_alignmask		= 0xf,
1247	.cra_type		= &crypto_ablkcipher_type,
1248	.cra_module		= THIS_MODULE,
1249	.cra_init		= atmel_aes_cra_init,
1250	.cra_exit		= atmel_aes_cra_exit,
1251	.cra_u.ablkcipher = {
1252		.min_keysize	= AES_MIN_KEY_SIZE,
1253		.max_keysize	= AES_MAX_KEY_SIZE,
1254		.ivsize		= AES_BLOCK_SIZE,
1255		.setkey		= atmel_aes_setkey,
1256		.encrypt	= atmel_aes_cfb_encrypt,
1257		.decrypt	= atmel_aes_cfb_decrypt,
1258	}
1259},
1260{
1261	.cra_name		= "cfb32(aes)",
1262	.cra_driver_name	= "atmel-cfb32-aes",
1263	.cra_priority		= ATMEL_AES_PRIORITY,
1264	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1265	.cra_blocksize		= CFB32_BLOCK_SIZE,
1266	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1267	.cra_alignmask		= 0x3,
1268	.cra_type		= &crypto_ablkcipher_type,
1269	.cra_module		= THIS_MODULE,
1270	.cra_init		= atmel_aes_cra_init,
1271	.cra_exit		= atmel_aes_cra_exit,
1272	.cra_u.ablkcipher = {
1273		.min_keysize	= AES_MIN_KEY_SIZE,
1274		.max_keysize	= AES_MAX_KEY_SIZE,
1275		.ivsize		= AES_BLOCK_SIZE,
1276		.setkey		= atmel_aes_setkey,
1277		.encrypt	= atmel_aes_cfb32_encrypt,
1278		.decrypt	= atmel_aes_cfb32_decrypt,
1279	}
1280},
1281{
1282	.cra_name		= "cfb16(aes)",
1283	.cra_driver_name	= "atmel-cfb16-aes",
1284	.cra_priority		= ATMEL_AES_PRIORITY,
1285	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1286	.cra_blocksize		= CFB16_BLOCK_SIZE,
1287	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1288	.cra_alignmask		= 0x1,
1289	.cra_type		= &crypto_ablkcipher_type,
1290	.cra_module		= THIS_MODULE,
1291	.cra_init		= atmel_aes_cra_init,
1292	.cra_exit		= atmel_aes_cra_exit,
1293	.cra_u.ablkcipher = {
1294		.min_keysize	= AES_MIN_KEY_SIZE,
1295		.max_keysize	= AES_MAX_KEY_SIZE,
1296		.ivsize		= AES_BLOCK_SIZE,
1297		.setkey		= atmel_aes_setkey,
1298		.encrypt	= atmel_aes_cfb16_encrypt,
1299		.decrypt	= atmel_aes_cfb16_decrypt,
1300	}
1301},
1302{
1303	.cra_name		= "cfb8(aes)",
1304	.cra_driver_name	= "atmel-cfb8-aes",
1305	.cra_priority		= ATMEL_AES_PRIORITY,
1306	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1307	.cra_blocksize		= CFB8_BLOCK_SIZE,
1308	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1309	.cra_alignmask		= 0x0,
1310	.cra_type		= &crypto_ablkcipher_type,
1311	.cra_module		= THIS_MODULE,
1312	.cra_init		= atmel_aes_cra_init,
1313	.cra_exit		= atmel_aes_cra_exit,
1314	.cra_u.ablkcipher = {
1315		.min_keysize	= AES_MIN_KEY_SIZE,
1316		.max_keysize	= AES_MAX_KEY_SIZE,
1317		.ivsize		= AES_BLOCK_SIZE,
1318		.setkey		= atmel_aes_setkey,
1319		.encrypt	= atmel_aes_cfb8_encrypt,
1320		.decrypt	= atmel_aes_cfb8_decrypt,
1321	}
1322},
1323{
1324	.cra_name		= "ctr(aes)",
1325	.cra_driver_name	= "atmel-ctr-aes",
1326	.cra_priority		= ATMEL_AES_PRIORITY,
1327	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1328	.cra_blocksize		= 1,
1329	.cra_ctxsize		= sizeof(struct atmel_aes_ctr_ctx),
1330	.cra_alignmask		= 0xf,
1331	.cra_type		= &crypto_ablkcipher_type,
1332	.cra_module		= THIS_MODULE,
1333	.cra_init		= atmel_aes_ctr_cra_init,
1334	.cra_exit		= atmel_aes_cra_exit,
1335	.cra_u.ablkcipher = {
1336		.min_keysize	= AES_MIN_KEY_SIZE,
1337		.max_keysize	= AES_MAX_KEY_SIZE,
1338		.ivsize		= AES_BLOCK_SIZE,
1339		.setkey		= atmel_aes_setkey,
1340		.encrypt	= atmel_aes_ctr_encrypt,
1341		.decrypt	= atmel_aes_ctr_decrypt,
1342	}
1343},
1344};
1345
1346static struct crypto_alg aes_cfb64_alg = {
1347	.cra_name		= "cfb64(aes)",
1348	.cra_driver_name	= "atmel-cfb64-aes",
1349	.cra_priority		= ATMEL_AES_PRIORITY,
1350	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1351	.cra_blocksize		= CFB64_BLOCK_SIZE,
1352	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1353	.cra_alignmask		= 0x7,
1354	.cra_type		= &crypto_ablkcipher_type,
1355	.cra_module		= THIS_MODULE,
1356	.cra_init		= atmel_aes_cra_init,
1357	.cra_exit		= atmel_aes_cra_exit,
1358	.cra_u.ablkcipher = {
1359		.min_keysize	= AES_MIN_KEY_SIZE,
1360		.max_keysize	= AES_MAX_KEY_SIZE,
1361		.ivsize		= AES_BLOCK_SIZE,
1362		.setkey		= atmel_aes_setkey,
1363		.encrypt	= atmel_aes_cfb64_encrypt,
1364		.decrypt	= atmel_aes_cfb64_decrypt,
1365	}
1366};
1367
1368
1369/* gcm aead functions */
1370
1371static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1372			       const u32 *data, size_t datalen,
1373			       const u32 *ghash_in, u32 *ghash_out,
1374			       atmel_aes_fn_t resume);
1375static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
1376static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
1377
1378static int atmel_aes_gcm_start(struct atmel_aes_dev *dd);
1379static int atmel_aes_gcm_process(struct atmel_aes_dev *dd);
1380static int atmel_aes_gcm_length(struct atmel_aes_dev *dd);
1381static int atmel_aes_gcm_data(struct atmel_aes_dev *dd);
1382static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd);
1383static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd);
1384static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd);
1385
1386static inline struct atmel_aes_gcm_ctx *
1387atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
1388{
1389	return container_of(ctx, struct atmel_aes_gcm_ctx, base);
1390}
1391
1392static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1393			       const u32 *data, size_t datalen,
1394			       const u32 *ghash_in, u32 *ghash_out,
1395			       atmel_aes_fn_t resume)
1396{
1397	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1398
1399	dd->data = (u32 *)data;
1400	dd->datalen = datalen;
1401	ctx->ghash_in = ghash_in;
1402	ctx->ghash_out = ghash_out;
1403	ctx->ghash_resume = resume;
1404
1405	atmel_aes_write_ctrl(dd, false, NULL);
1406	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init);
1407}
1408
1409static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd)
1410{
1411	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1412
1413	/* Set the data length. */
1414	atmel_aes_write(dd, AES_AADLENR, dd->total);
1415	atmel_aes_write(dd, AES_CLENR, 0);
1416
1417	/* If needed, overwrite the GCM Intermediate Hash Word Registers */
1418	if (ctx->ghash_in)
1419		atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in);
1420
1421	return atmel_aes_gcm_ghash_finalize(dd);
1422}
1423
1424static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd)
1425{
1426	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1427	u32 isr;
1428
1429	/* Write data into the Input Data Registers. */
1430	while (dd->datalen > 0) {
1431		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1432		dd->data += 4;
1433		dd->datalen -= AES_BLOCK_SIZE;
1434
1435		isr = atmel_aes_read(dd, AES_ISR);
1436		if (!(isr & AES_INT_DATARDY)) {
1437			dd->resume = atmel_aes_gcm_ghash_finalize;
1438			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1439			return -EINPROGRESS;
1440		}
1441	}
1442
1443	/* Read the computed hash from GHASHRx. */
1444	atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out);
1445
1446	return ctx->ghash_resume(dd);
1447}
1448
1449
1450static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
1451{
1452	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1453	struct aead_request *req = aead_request_cast(dd->areq);
1454	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1455	struct atmel_aes_reqctx *rctx = aead_request_ctx(req);
1456	size_t ivsize = crypto_aead_ivsize(tfm);
1457	size_t datalen, padlen;
1458	const void *iv = req->iv;
1459	u8 *data = dd->buf;
1460	int err;
1461
1462	atmel_aes_set_mode(dd, rctx);
1463
1464	err = atmel_aes_hw_init(dd);
1465	if (err)
1466		return atmel_aes_complete(dd, err);
1467
1468	if (likely(ivsize == 12)) {
1469		memcpy(ctx->j0, iv, ivsize);
1470		ctx->j0[3] = cpu_to_be32(1);
1471		return atmel_aes_gcm_process(dd);
1472	}
1473
1474	padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE);
1475	datalen = ivsize + padlen + AES_BLOCK_SIZE;
1476	if (datalen > dd->buflen)
1477		return atmel_aes_complete(dd, -EINVAL);
1478
1479	memcpy(data, iv, ivsize);
1480	memset(data + ivsize, 0, padlen + sizeof(u64));
1481	((u64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
1482
1483	return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
1484				   NULL, ctx->j0, atmel_aes_gcm_process);
1485}
1486
1487static int atmel_aes_gcm_process(struct atmel_aes_dev *dd)
1488{
1489	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1490	struct aead_request *req = aead_request_cast(dd->areq);
1491	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1492	bool enc = atmel_aes_is_encrypt(dd);
1493	u32 authsize;
1494
1495	/* Compute text length. */
1496	authsize = crypto_aead_authsize(tfm);
1497	ctx->textlen = req->cryptlen - (enc ? 0 : authsize);
1498
1499	/*
1500	 * According to tcrypt test suite, the GCM Automatic Tag Generation
1501	 * fails when both the message and its associated data are empty.
1502	 */
1503	if (likely(req->assoclen != 0 || ctx->textlen != 0))
1504		dd->flags |= AES_FLAGS_GTAGEN;
1505
1506	atmel_aes_write_ctrl(dd, false, NULL);
1507	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length);
1508}
1509
1510static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
1511{
1512	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1513	struct aead_request *req = aead_request_cast(dd->areq);
1514	u32 j0_lsw, *j0 = ctx->j0;
1515	size_t padlen;
1516
1517	/* Write incr32(J0) into IV. */
1518	j0_lsw = j0[3];
1519	j0[3] = cpu_to_be32(be32_to_cpu(j0[3]) + 1);
1520	atmel_aes_write_block(dd, AES_IVR(0), j0);
1521	j0[3] = j0_lsw;
1522
1523	/* Set aad and text lengths. */
1524	atmel_aes_write(dd, AES_AADLENR, req->assoclen);
1525	atmel_aes_write(dd, AES_CLENR, ctx->textlen);
1526
1527	/* Check whether AAD are present. */
1528	if (unlikely(req->assoclen == 0)) {
1529		dd->datalen = 0;
1530		return atmel_aes_gcm_data(dd);
1531	}
1532
1533	/* Copy assoc data and add padding. */
1534	padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE);
1535	if (unlikely(req->assoclen + padlen > dd->buflen))
1536		return atmel_aes_complete(dd, -EINVAL);
1537	sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
1538
1539	/* Write assoc data into the Input Data register. */
1540	dd->data = (u32 *)dd->buf;
1541	dd->datalen = req->assoclen + padlen;
1542	return atmel_aes_gcm_data(dd);
1543}
1544
1545static int atmel_aes_gcm_data(struct atmel_aes_dev *dd)
1546{
1547	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1548	struct aead_request *req = aead_request_cast(dd->areq);
1549	bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD);
1550	struct scatterlist *src, *dst;
1551	u32 isr, mr;
1552
1553	/* Write AAD first. */
1554	while (dd->datalen > 0) {
1555		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1556		dd->data += 4;
1557		dd->datalen -= AES_BLOCK_SIZE;
1558
1559		isr = atmel_aes_read(dd, AES_ISR);
1560		if (!(isr & AES_INT_DATARDY)) {
1561			dd->resume = atmel_aes_gcm_data;
1562			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1563			return -EINPROGRESS;
1564		}
1565	}
1566
1567	/* GMAC only. */
1568	if (unlikely(ctx->textlen == 0))
1569		return atmel_aes_gcm_tag_init(dd);
1570
1571	/* Prepare src and dst scatter lists to transfer cipher/plain texts */
1572	src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen);
1573	dst = ((req->src == req->dst) ? src :
1574	       scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen));
1575
1576	if (use_dma) {
1577		/* Update the Mode Register for DMA transfers. */
1578		mr = atmel_aes_read(dd, AES_MR);
1579		mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF);
1580		mr |= AES_MR_SMOD_IDATAR0;
1581		if (dd->caps.has_dualbuff)
1582			mr |= AES_MR_DUALBUFF;
1583		atmel_aes_write(dd, AES_MR, mr);
1584
1585		return atmel_aes_dma_start(dd, src, dst, ctx->textlen,
1586					   atmel_aes_gcm_tag_init);
1587	}
1588
1589	return atmel_aes_cpu_start(dd, src, dst, ctx->textlen,
1590				   atmel_aes_gcm_tag_init);
1591}
1592
1593static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
1594{
1595	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1596	struct aead_request *req = aead_request_cast(dd->areq);
1597	u64 *data = dd->buf;
1598
1599	if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
1600		if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
1601			dd->resume = atmel_aes_gcm_tag_init;
1602			atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY);
1603			return -EINPROGRESS;
1604		}
1605
1606		return atmel_aes_gcm_finalize(dd);
1607	}
1608
1609	/* Read the GCM Intermediate Hash Word Registers. */
1610	atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash);
1611
1612	data[0] = cpu_to_be64(req->assoclen * 8);
1613	data[1] = cpu_to_be64(ctx->textlen * 8);
1614
1615	return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE,
1616				   ctx->ghash, ctx->ghash, atmel_aes_gcm_tag);
1617}
1618
1619static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd)
1620{
1621	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1622	unsigned long flags;
1623
1624	/*
1625	 * Change mode to CTR to complete the tag generation.
1626	 * Use J0 as Initialization Vector.
1627	 */
1628	flags = dd->flags;
1629	dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN);
1630	dd->flags |= AES_FLAGS_CTR;
1631	atmel_aes_write_ctrl(dd, false, ctx->j0);
1632	dd->flags = flags;
1633
1634	atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash);
1635	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize);
1636}
1637
1638static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd)
1639{
1640	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1641	struct aead_request *req = aead_request_cast(dd->areq);
1642	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1643	bool enc = atmel_aes_is_encrypt(dd);
1644	u32 offset, authsize, itag[4], *otag = ctx->tag;
1645	int err;
1646
1647	/* Read the computed tag. */
1648	if (likely(dd->flags & AES_FLAGS_GTAGEN))
1649		atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag);
1650	else
1651		atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag);
1652
1653	offset = req->assoclen + ctx->textlen;
1654	authsize = crypto_aead_authsize(tfm);
1655	if (enc) {
1656		scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1);
1657		err = 0;
1658	} else {
1659		scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0);
1660		err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0;
1661	}
1662
1663	return atmel_aes_complete(dd, err);
1664}
1665
1666static int atmel_aes_gcm_crypt(struct aead_request *req,
1667			       unsigned long mode)
1668{
1669	struct atmel_aes_base_ctx *ctx;
1670	struct atmel_aes_reqctx *rctx;
1671	struct atmel_aes_dev *dd;
1672
1673	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
1674	ctx->block_size = AES_BLOCK_SIZE;
1675
1676	dd = atmel_aes_find_dev(ctx);
1677	if (!dd)
1678		return -ENODEV;
1679
1680	rctx = aead_request_ctx(req);
1681	rctx->mode = AES_FLAGS_GCM | mode;
1682
1683	return atmel_aes_handle_queue(dd, &req->base);
1684}
1685
1686static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
1687				unsigned int keylen)
1688{
1689	struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
1690
1691	if (keylen != AES_KEYSIZE_256 &&
1692	    keylen != AES_KEYSIZE_192 &&
1693	    keylen != AES_KEYSIZE_128) {
1694		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1695		return -EINVAL;
1696	}
1697
1698	memcpy(ctx->key, key, keylen);
1699	ctx->keylen = keylen;
1700
1701	return 0;
1702}
1703
1704static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
1705				     unsigned int authsize)
1706{
1707	/* Same as crypto_gcm_authsize() from crypto/gcm.c */
1708	switch (authsize) {
1709	case 4:
1710	case 8:
1711	case 12:
1712	case 13:
1713	case 14:
1714	case 15:
1715	case 16:
1716		break;
1717	default:
1718		return -EINVAL;
1719	}
1720
1721	return 0;
1722}
1723
1724static int atmel_aes_gcm_encrypt(struct aead_request *req)
1725{
1726	return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1727}
1728
1729static int atmel_aes_gcm_decrypt(struct aead_request *req)
1730{
1731	return atmel_aes_gcm_crypt(req, 0);
1732}
1733
1734static int atmel_aes_gcm_init(struct crypto_aead *tfm)
1735{
1736	struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
 
 
 
 
 
1737
1738	crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
 
1739	ctx->base.start = atmel_aes_gcm_start;
1740
1741	return 0;
1742}
1743
1744static void atmel_aes_gcm_exit(struct crypto_aead *tfm)
1745{
1746
1747}
1748
1749static struct aead_alg aes_gcm_alg = {
1750	.setkey		= atmel_aes_gcm_setkey,
1751	.setauthsize	= atmel_aes_gcm_setauthsize,
1752	.encrypt	= atmel_aes_gcm_encrypt,
1753	.decrypt	= atmel_aes_gcm_decrypt,
1754	.init		= atmel_aes_gcm_init,
1755	.exit		= atmel_aes_gcm_exit,
1756	.ivsize		= 12,
1757	.maxauthsize	= AES_BLOCK_SIZE,
1758
1759	.base = {
1760		.cra_name		= "gcm(aes)",
1761		.cra_driver_name	= "atmel-gcm-aes",
1762		.cra_priority		= ATMEL_AES_PRIORITY,
1763		.cra_flags		= CRYPTO_ALG_ASYNC,
1764		.cra_blocksize		= 1,
1765		.cra_ctxsize		= sizeof(struct atmel_aes_gcm_ctx),
1766		.cra_alignmask		= 0xf,
1767		.cra_module		= THIS_MODULE,
1768	},
1769};
1770
1771
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1772/* Probe functions */
1773
1774static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
1775{
1776	dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
1777	dd->buflen = ATMEL_AES_BUFFER_SIZE;
1778	dd->buflen &= ~(AES_BLOCK_SIZE - 1);
1779
1780	if (!dd->buf) {
1781		dev_err(dd->dev, "unable to alloc pages.\n");
1782		return -ENOMEM;
1783	}
1784
1785	return 0;
1786}
1787
1788static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
1789{
1790	free_page((unsigned long)dd->buf);
1791}
1792
1793static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
1794{
1795	struct at_dma_slave	*sl = slave;
1796
1797	if (sl && sl->dma_dev == chan->device->dev) {
1798		chan->private = sl;
1799		return true;
1800	} else {
1801		return false;
1802	}
1803}
1804
1805static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
1806			      struct crypto_platform_data *pdata)
1807{
1808	struct at_dma_slave *slave;
1809	int err = -ENOMEM;
1810	dma_cap_mask_t mask;
1811
1812	dma_cap_zero(mask);
1813	dma_cap_set(DMA_SLAVE, mask);
1814
1815	/* Try to grab 2 DMA channels */
1816	slave = &pdata->dma_slave->rxdata;
1817	dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
1818							slave, dd->dev, "tx");
1819	if (!dd->src.chan)
1820		goto err_dma_in;
 
1821
1822	slave = &pdata->dma_slave->txdata;
1823	dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
1824							slave, dd->dev, "rx");
1825	if (!dd->dst.chan)
1826		goto err_dma_out;
 
1827
1828	return 0;
1829
1830err_dma_out:
1831	dma_release_channel(dd->src.chan);
1832err_dma_in:
1833	dev_warn(dd->dev, "no DMA channel available\n");
1834	return err;
1835}
1836
1837static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
1838{
1839	dma_release_channel(dd->dst.chan);
1840	dma_release_channel(dd->src.chan);
1841}
1842
1843static void atmel_aes_queue_task(unsigned long data)
1844{
1845	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1846
1847	atmel_aes_handle_queue(dd, NULL);
1848}
1849
1850static void atmel_aes_done_task(unsigned long data)
1851{
1852	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1853
1854	dd->is_async = true;
1855	(void)dd->resume(dd);
1856}
1857
1858static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
1859{
1860	struct atmel_aes_dev *aes_dd = dev_id;
1861	u32 reg;
1862
1863	reg = atmel_aes_read(aes_dd, AES_ISR);
1864	if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
1865		atmel_aes_write(aes_dd, AES_IDR, reg);
1866		if (AES_FLAGS_BUSY & aes_dd->flags)
1867			tasklet_schedule(&aes_dd->done_task);
1868		else
1869			dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
1870		return IRQ_HANDLED;
1871	}
1872
1873	return IRQ_NONE;
1874}
1875
1876static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1877{
1878	int i;
1879
 
 
 
 
 
 
 
 
 
1880	if (dd->caps.has_gcm)
1881		crypto_unregister_aead(&aes_gcm_alg);
1882
1883	if (dd->caps.has_cfb64)
1884		crypto_unregister_alg(&aes_cfb64_alg);
 
1885
1886	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1887		crypto_unregister_alg(&aes_algs[i]);
 
 
 
 
1888}
1889
1890static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1891{
1892	int err, i, j;
1893
1894	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1895		err = crypto_register_alg(&aes_algs[i]);
 
 
1896		if (err)
1897			goto err_aes_algs;
1898	}
1899
1900	if (dd->caps.has_cfb64) {
1901		err = crypto_register_alg(&aes_cfb64_alg);
 
 
1902		if (err)
1903			goto err_aes_cfb64_alg;
1904	}
1905
1906	if (dd->caps.has_gcm) {
1907		err = crypto_register_aead(&aes_gcm_alg);
 
 
1908		if (err)
1909			goto err_aes_gcm_alg;
 
 
 
 
 
 
 
 
 
 
 
1910	}
 
1911
1912	return 0;
1913
 
 
 
 
 
 
 
 
 
1914err_aes_gcm_alg:
1915	crypto_unregister_alg(&aes_cfb64_alg);
1916err_aes_cfb64_alg:
1917	i = ARRAY_SIZE(aes_algs);
1918err_aes_algs:
1919	for (j = 0; j < i; j++)
1920		crypto_unregister_alg(&aes_algs[j]);
1921
1922	return err;
1923}
1924
1925static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
1926{
1927	dd->caps.has_dualbuff = 0;
1928	dd->caps.has_cfb64 = 0;
1929	dd->caps.has_ctr32 = 0;
1930	dd->caps.has_gcm = 0;
 
 
1931	dd->caps.max_burst_size = 1;
1932
1933	/* keep only major version number */
1934	switch (dd->hw_version & 0xff0) {
 
 
1935	case 0x500:
1936		dd->caps.has_dualbuff = 1;
1937		dd->caps.has_cfb64 = 1;
1938		dd->caps.has_ctr32 = 1;
1939		dd->caps.has_gcm = 1;
 
 
1940		dd->caps.max_burst_size = 4;
1941		break;
1942	case 0x200:
1943		dd->caps.has_dualbuff = 1;
1944		dd->caps.has_cfb64 = 1;
1945		dd->caps.has_ctr32 = 1;
1946		dd->caps.has_gcm = 1;
1947		dd->caps.max_burst_size = 4;
1948		break;
1949	case 0x130:
1950		dd->caps.has_dualbuff = 1;
1951		dd->caps.has_cfb64 = 1;
1952		dd->caps.max_burst_size = 4;
1953		break;
1954	case 0x120:
1955		break;
1956	default:
1957		dev_warn(dd->dev,
1958				"Unmanaged aes version, set minimum capabilities\n");
1959		break;
1960	}
1961}
1962
1963#if defined(CONFIG_OF)
1964static const struct of_device_id atmel_aes_dt_ids[] = {
1965	{ .compatible = "atmel,at91sam9g46-aes" },
1966	{ /* sentinel */ }
1967};
1968MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
1969
1970static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1971{
1972	struct device_node *np = pdev->dev.of_node;
1973	struct crypto_platform_data *pdata;
1974
1975	if (!np) {
1976		dev_err(&pdev->dev, "device node not found\n");
1977		return ERR_PTR(-EINVAL);
1978	}
1979
1980	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1981	if (!pdata) {
1982		dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1983		return ERR_PTR(-ENOMEM);
1984	}
1985
1986	pdata->dma_slave = devm_kzalloc(&pdev->dev,
1987					sizeof(*(pdata->dma_slave)),
1988					GFP_KERNEL);
1989	if (!pdata->dma_slave) {
1990		dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
1991		devm_kfree(&pdev->dev, pdata);
1992		return ERR_PTR(-ENOMEM);
1993	}
1994
1995	return pdata;
1996}
1997#else
1998static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1999{
2000	return ERR_PTR(-EINVAL);
2001}
2002#endif
2003
2004static int atmel_aes_probe(struct platform_device *pdev)
2005{
2006	struct atmel_aes_dev *aes_dd;
2007	struct crypto_platform_data *pdata;
2008	struct device *dev = &pdev->dev;
2009	struct resource *aes_res;
2010	int err;
2011
2012	pdata = pdev->dev.platform_data;
2013	if (!pdata) {
2014		pdata = atmel_aes_of_init(pdev);
2015		if (IS_ERR(pdata)) {
2016			err = PTR_ERR(pdata);
2017			goto aes_dd_err;
2018		}
2019	}
2020
2021	if (!pdata->dma_slave) {
2022		err = -ENXIO;
2023		goto aes_dd_err;
2024	}
2025
2026	aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
2027	if (aes_dd == NULL) {
2028		dev_err(dev, "unable to alloc data struct.\n");
2029		err = -ENOMEM;
2030		goto aes_dd_err;
2031	}
2032
2033	aes_dd->dev = dev;
2034
2035	platform_set_drvdata(pdev, aes_dd);
2036
2037	INIT_LIST_HEAD(&aes_dd->list);
2038	spin_lock_init(&aes_dd->lock);
2039
2040	tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
2041					(unsigned long)aes_dd);
2042	tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
2043					(unsigned long)aes_dd);
2044
2045	crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
2046
2047	aes_dd->irq = -1;
2048
2049	/* Get the base address */
2050	aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2051	if (!aes_res) {
2052		dev_err(dev, "no MEM resource info\n");
2053		err = -ENODEV;
2054		goto res_err;
2055	}
2056	aes_dd->phys_base = aes_res->start;
2057
2058	/* Get the IRQ */
2059	aes_dd->irq = platform_get_irq(pdev,  0);
2060	if (aes_dd->irq < 0) {
2061		dev_err(dev, "no IRQ resource info\n");
2062		err = aes_dd->irq;
2063		goto res_err;
2064	}
2065
2066	err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
2067			       IRQF_SHARED, "atmel-aes", aes_dd);
2068	if (err) {
2069		dev_err(dev, "unable to request aes irq.\n");
2070		goto res_err;
2071	}
2072
2073	/* Initializing the clock */
2074	aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
2075	if (IS_ERR(aes_dd->iclk)) {
2076		dev_err(dev, "clock initialization failed.\n");
2077		err = PTR_ERR(aes_dd->iclk);
2078		goto res_err;
2079	}
2080
2081	aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
2082	if (IS_ERR(aes_dd->io_base)) {
2083		dev_err(dev, "can't ioremap\n");
2084		err = PTR_ERR(aes_dd->io_base);
2085		goto res_err;
2086	}
2087
2088	err = clk_prepare(aes_dd->iclk);
2089	if (err)
2090		goto res_err;
2091
2092	err = atmel_aes_hw_version_init(aes_dd);
2093	if (err)
2094		goto iclk_unprepare;
2095
2096	atmel_aes_get_cap(aes_dd);
2097
 
 
 
 
 
 
 
2098	err = atmel_aes_buff_init(aes_dd);
2099	if (err)
2100		goto err_aes_buff;
2101
2102	err = atmel_aes_dma_init(aes_dd, pdata);
2103	if (err)
2104		goto err_aes_dma;
2105
2106	spin_lock(&atmel_aes.lock);
2107	list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
2108	spin_unlock(&atmel_aes.lock);
2109
2110	err = atmel_aes_register_algs(aes_dd);
2111	if (err)
2112		goto err_algs;
2113
2114	dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
2115			dma_chan_name(aes_dd->src.chan),
2116			dma_chan_name(aes_dd->dst.chan));
2117
2118	return 0;
2119
2120err_algs:
2121	spin_lock(&atmel_aes.lock);
2122	list_del(&aes_dd->list);
2123	spin_unlock(&atmel_aes.lock);
2124	atmel_aes_dma_cleanup(aes_dd);
2125err_aes_dma:
2126	atmel_aes_buff_cleanup(aes_dd);
2127err_aes_buff:
2128iclk_unprepare:
2129	clk_unprepare(aes_dd->iclk);
2130res_err:
2131	tasklet_kill(&aes_dd->done_task);
2132	tasklet_kill(&aes_dd->queue_task);
2133aes_dd_err:
2134	dev_err(dev, "initialization failed.\n");
2135
2136	return err;
2137}
2138
2139static int atmel_aes_remove(struct platform_device *pdev)
2140{
2141	static struct atmel_aes_dev *aes_dd;
2142
2143	aes_dd = platform_get_drvdata(pdev);
2144	if (!aes_dd)
2145		return -ENODEV;
2146	spin_lock(&atmel_aes.lock);
2147	list_del(&aes_dd->list);
2148	spin_unlock(&atmel_aes.lock);
2149
2150	atmel_aes_unregister_algs(aes_dd);
2151
2152	tasklet_kill(&aes_dd->done_task);
2153	tasklet_kill(&aes_dd->queue_task);
2154
2155	atmel_aes_dma_cleanup(aes_dd);
2156	atmel_aes_buff_cleanup(aes_dd);
2157
2158	clk_unprepare(aes_dd->iclk);
2159
2160	return 0;
2161}
2162
2163static struct platform_driver atmel_aes_driver = {
2164	.probe		= atmel_aes_probe,
2165	.remove		= atmel_aes_remove,
2166	.driver		= {
2167		.name	= "atmel_aes",
2168		.of_match_table = of_match_ptr(atmel_aes_dt_ids),
2169	},
2170};
2171
2172module_platform_driver(atmel_aes_driver);
2173
2174MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
2175MODULE_LICENSE("GPL v2");
2176MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");