Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for ATMEL DES/TDES HW acceleration.
   6 *
   7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
   8 * Author: Nicolas Royer <nicolas@eukrea.com>
   9 *
  10 * Some ideas are from omap-aes.c drivers.
  11 */
  12
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/err.h>
  18#include <linux/clk.h>
  19#include <linux/io.h>
  20#include <linux/hw_random.h>
  21#include <linux/platform_device.h>
  22
  23#include <linux/device.h>
  24#include <linux/dmaengine.h>
  25#include <linux/init.h>
  26#include <linux/errno.h>
  27#include <linux/interrupt.h>
  28#include <linux/irq.h>
  29#include <linux/scatterlist.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/of_device.h>
  32#include <linux/delay.h>
  33#include <linux/crypto.h>
  34#include <crypto/scatterwalk.h>
  35#include <crypto/algapi.h>
  36#include <crypto/internal/des.h>
  37#include <crypto/internal/skcipher.h>
  38#include "atmel-tdes-regs.h"
  39
  40#define ATMEL_TDES_PRIORITY	300
  41
  42/* TDES flags  */
  43/* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
  44#define TDES_FLAGS_ENCRYPT	TDES_MR_CYPHER_ENC
  45#define TDES_FLAGS_OPMODE_MASK	(TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
  46#define TDES_FLAGS_ECB		TDES_MR_OPMOD_ECB
  47#define TDES_FLAGS_CBC		TDES_MR_OPMOD_CBC
  48#define TDES_FLAGS_OFB		TDES_MR_OPMOD_OFB
  49#define TDES_FLAGS_CFB64	(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
  50#define TDES_FLAGS_CFB32	(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
  51#define TDES_FLAGS_CFB16	(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
  52#define TDES_FLAGS_CFB8		(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
  53
  54#define TDES_FLAGS_MODE_MASK	(TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
  55
  56#define TDES_FLAGS_INIT		BIT(3)
  57#define TDES_FLAGS_FAST		BIT(4)
  58#define TDES_FLAGS_BUSY		BIT(5)
  59#define TDES_FLAGS_DMA		BIT(6)
  60
  61#define ATMEL_TDES_QUEUE_LENGTH	50
  62
  63#define CFB8_BLOCK_SIZE		1
  64#define CFB16_BLOCK_SIZE	2
  65#define CFB32_BLOCK_SIZE	4
  66
  67struct atmel_tdes_caps {
  68	bool	has_dma;
  69	u32		has_cfb_3keys;
  70};
  71
  72struct atmel_tdes_dev;
  73
  74struct atmel_tdes_ctx {
  75	struct atmel_tdes_dev *dd;
  76
  77	int		keylen;
  78	u32		key[DES3_EDE_KEY_SIZE / sizeof(u32)];
  79	unsigned long	flags;
  80
  81	u16		block_size;
  82};
  83
  84struct atmel_tdes_reqctx {
  85	unsigned long mode;
  86	u8 lastc[DES_BLOCK_SIZE];
  87};
  88
  89struct atmel_tdes_dma {
  90	struct dma_chan			*chan;
  91	struct dma_slave_config dma_conf;
  92};
  93
  94struct atmel_tdes_dev {
  95	struct list_head	list;
  96	unsigned long		phys_base;
  97	void __iomem		*io_base;
  98
  99	struct atmel_tdes_ctx	*ctx;
 100	struct device		*dev;
 101	struct clk			*iclk;
 102	int					irq;
 103
 104	unsigned long		flags;
 105
 106	spinlock_t		lock;
 107	struct crypto_queue	queue;
 108
 109	struct tasklet_struct	done_task;
 110	struct tasklet_struct	queue_task;
 111
 112	struct skcipher_request	*req;
 113	size_t				total;
 114
 115	struct scatterlist	*in_sg;
 116	unsigned int		nb_in_sg;
 117	size_t				in_offset;
 118	struct scatterlist	*out_sg;
 119	unsigned int		nb_out_sg;
 120	size_t				out_offset;
 121
 122	size_t	buflen;
 123	size_t	dma_size;
 124
 125	void	*buf_in;
 126	int		dma_in;
 127	dma_addr_t	dma_addr_in;
 128	struct atmel_tdes_dma	dma_lch_in;
 129
 130	void	*buf_out;
 131	int		dma_out;
 132	dma_addr_t	dma_addr_out;
 133	struct atmel_tdes_dma	dma_lch_out;
 134
 135	struct atmel_tdes_caps	caps;
 136
 137	u32	hw_version;
 138};
 139
 140struct atmel_tdes_drv {
 141	struct list_head	dev_list;
 142	spinlock_t		lock;
 143};
 144
 145static struct atmel_tdes_drv atmel_tdes = {
 146	.dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list),
 147	.lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock),
 148};
 149
 150static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
 151			void *buf, size_t buflen, size_t total, int out)
 152{
 153	size_t count, off = 0;
 154
 155	while (buflen && total) {
 156		count = min((*sg)->length - *offset, total);
 157		count = min(count, buflen);
 158
 159		if (!count)
 160			return off;
 161
 162		scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
 163
 164		off += count;
 165		buflen -= count;
 166		*offset += count;
 167		total -= count;
 168
 169		if (*offset == (*sg)->length) {
 170			*sg = sg_next(*sg);
 171			if (*sg)
 172				*offset = 0;
 173			else
 174				total = 0;
 175		}
 176	}
 177
 178	return off;
 179}
 180
 181static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
 182{
 183	return readl_relaxed(dd->io_base + offset);
 184}
 185
 186static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
 187					u32 offset, u32 value)
 188{
 189	writel_relaxed(value, dd->io_base + offset);
 190}
 191
 192static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
 193			       const u32 *value, int count)
 194{
 195	for (; count--; value++, offset += 4)
 196		atmel_tdes_write(dd, offset, *value);
 197}
 198
 199static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx)
 200{
 201	struct atmel_tdes_dev *tdes_dd = NULL;
 202	struct atmel_tdes_dev *tmp;
 203
 204	spin_lock_bh(&atmel_tdes.lock);
 205	if (!ctx->dd) {
 206		list_for_each_entry(tmp, &atmel_tdes.dev_list, list) {
 207			tdes_dd = tmp;
 208			break;
 209		}
 210		ctx->dd = tdes_dd;
 211	} else {
 212		tdes_dd = ctx->dd;
 213	}
 214	spin_unlock_bh(&atmel_tdes.lock);
 215
 216	return tdes_dd;
 217}
 218
 219static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
 220{
 221	int err;
 222
 223	err = clk_prepare_enable(dd->iclk);
 224	if (err)
 225		return err;
 226
 227	if (!(dd->flags & TDES_FLAGS_INIT)) {
 228		atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
 229		dd->flags |= TDES_FLAGS_INIT;
 230	}
 231
 232	return 0;
 233}
 234
 235static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
 236{
 237	return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
 238}
 239
 240static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
 241{
 242	int err;
 243
 244	err = atmel_tdes_hw_init(dd);
 245	if (err)
 246		return err;
 247
 248	dd->hw_version = atmel_tdes_get_version(dd);
 249
 250	dev_info(dd->dev,
 251			"version: 0x%x\n", dd->hw_version);
 252
 253	clk_disable_unprepare(dd->iclk);
 254
 255	return 0;
 256}
 257
 258static void atmel_tdes_dma_callback(void *data)
 259{
 260	struct atmel_tdes_dev *dd = data;
 261
 262	/* dma_lch_out - completed */
 263	tasklet_schedule(&dd->done_task);
 264}
 265
 266static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
 267{
 268	int err;
 269	u32 valmr = TDES_MR_SMOD_PDC;
 270
 271	err = atmel_tdes_hw_init(dd);
 272
 273	if (err)
 274		return err;
 275
 276	if (!dd->caps.has_dma)
 277		atmel_tdes_write(dd, TDES_PTCR,
 278			TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS);
 279
 280	/* MR register must be set before IV registers */
 281	if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
 282		valmr |= TDES_MR_KEYMOD_3KEY;
 283		valmr |= TDES_MR_TDESMOD_TDES;
 284	} else if (dd->ctx->keylen > DES_KEY_SIZE) {
 285		valmr |= TDES_MR_KEYMOD_2KEY;
 286		valmr |= TDES_MR_TDESMOD_TDES;
 287	} else {
 288		valmr |= TDES_MR_TDESMOD_DES;
 289	}
 290
 291	valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
 292
 293	atmel_tdes_write(dd, TDES_MR, valmr);
 294
 295	atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
 296						dd->ctx->keylen >> 2);
 297
 298	if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
 299		atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
 300
 301	return 0;
 302}
 303
 304static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
 305{
 306	int err = 0;
 307	size_t count;
 308
 309	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
 310
 311	if (dd->flags & TDES_FLAGS_FAST) {
 312		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
 313		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 314	} else {
 315		dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
 316					   dd->dma_size, DMA_FROM_DEVICE);
 317
 318		/* copy data */
 319		count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
 320				dd->buf_out, dd->buflen, dd->dma_size, 1);
 321		if (count != dd->dma_size) {
 322			err = -EINVAL;
 323			pr_err("not all data converted: %zu\n", count);
 324		}
 325	}
 326
 327	return err;
 328}
 329
 330static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
 331{
 332	int err = -ENOMEM;
 333
 334	dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
 335	dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
 336	dd->buflen = PAGE_SIZE;
 337	dd->buflen &= ~(DES_BLOCK_SIZE - 1);
 338
 339	if (!dd->buf_in || !dd->buf_out) {
 340		dev_err(dd->dev, "unable to alloc pages.\n");
 341		goto err_alloc;
 342	}
 343
 344	/* MAP here */
 345	dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
 346					dd->buflen, DMA_TO_DEVICE);
 347	if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
 348		dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
 349		err = -EINVAL;
 350		goto err_map_in;
 351	}
 352
 353	dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
 354					dd->buflen, DMA_FROM_DEVICE);
 355	if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
 356		dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
 357		err = -EINVAL;
 358		goto err_map_out;
 359	}
 360
 361	return 0;
 362
 363err_map_out:
 364	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
 365		DMA_TO_DEVICE);
 366err_map_in:
 367err_alloc:
 368	free_page((unsigned long)dd->buf_out);
 369	free_page((unsigned long)dd->buf_in);
 370	if (err)
 371		pr_err("error: %d\n", err);
 372	return err;
 373}
 374
 375static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
 376{
 377	dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
 378			 DMA_FROM_DEVICE);
 379	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
 380		DMA_TO_DEVICE);
 381	free_page((unsigned long)dd->buf_out);
 382	free_page((unsigned long)dd->buf_in);
 383}
 384
 385static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
 386				dma_addr_t dma_addr_in,
 387				dma_addr_t dma_addr_out, int length)
 388{
 389	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
 390	int len32;
 391
 392	dd->dma_size = length;
 393
 394	if (!(dd->flags & TDES_FLAGS_FAST)) {
 395		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
 396					   DMA_TO_DEVICE);
 397	}
 398
 399	switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
 400	case TDES_FLAGS_CFB8:
 401		len32 = DIV_ROUND_UP(length, sizeof(u8));
 402		break;
 403
 404	case TDES_FLAGS_CFB16:
 405		len32 = DIV_ROUND_UP(length, sizeof(u16));
 406		break;
 407
 408	default:
 409		len32 = DIV_ROUND_UP(length, sizeof(u32));
 410		break;
 411	}
 412
 413	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
 414	atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
 415	atmel_tdes_write(dd, TDES_TCR, len32);
 416	atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
 417	atmel_tdes_write(dd, TDES_RCR, len32);
 418
 419	/* Enable Interrupt */
 420	atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
 421
 422	/* Start DMA transfer */
 423	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
 424
 425	return 0;
 426}
 427
 428static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
 429				dma_addr_t dma_addr_in,
 430				dma_addr_t dma_addr_out, int length)
 431{
 432	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
 433	struct scatterlist sg[2];
 434	struct dma_async_tx_descriptor	*in_desc, *out_desc;
 435	enum dma_slave_buswidth addr_width;
 436
 437	dd->dma_size = length;
 438
 439	if (!(dd->flags & TDES_FLAGS_FAST)) {
 440		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
 441					   DMA_TO_DEVICE);
 442	}
 443
 444	switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
 445	case TDES_FLAGS_CFB8:
 446		addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 447		break;
 448
 449	case TDES_FLAGS_CFB16:
 450		addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 451		break;
 452
 453	default:
 454		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 455		break;
 456	}
 457
 458	dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
 459	dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
 460
 461	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
 462	dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
 463
 464	dd->flags |= TDES_FLAGS_DMA;
 465
 466	sg_init_table(&sg[0], 1);
 467	sg_dma_address(&sg[0]) = dma_addr_in;
 468	sg_dma_len(&sg[0]) = length;
 469
 470	sg_init_table(&sg[1], 1);
 471	sg_dma_address(&sg[1]) = dma_addr_out;
 472	sg_dma_len(&sg[1]) = length;
 473
 474	in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
 475				1, DMA_MEM_TO_DEV,
 476				DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
 477	if (!in_desc)
 478		return -EINVAL;
 479
 480	out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
 481				1, DMA_DEV_TO_MEM,
 482				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 483	if (!out_desc)
 484		return -EINVAL;
 485
 486	out_desc->callback = atmel_tdes_dma_callback;
 487	out_desc->callback_param = dd;
 488
 489	dmaengine_submit(out_desc);
 490	dma_async_issue_pending(dd->dma_lch_out.chan);
 491
 492	dmaengine_submit(in_desc);
 493	dma_async_issue_pending(dd->dma_lch_in.chan);
 494
 495	return 0;
 496}
 497
 498static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
 499{
 500	int err, fast = 0, in, out;
 501	size_t count;
 502	dma_addr_t addr_in, addr_out;
 503
 504	if ((!dd->in_offset) && (!dd->out_offset)) {
 505		/* check for alignment */
 506		in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
 507			IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
 508		out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
 509			IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
 510		fast = in && out;
 511
 512		if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
 513			fast = 0;
 514	}
 515
 516
 517	if (fast)  {
 518		count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
 519		count = min_t(size_t, count, sg_dma_len(dd->out_sg));
 520
 521		err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 522		if (!err) {
 523			dev_err(dd->dev, "dma_map_sg() error\n");
 524			return -EINVAL;
 525		}
 526
 527		err = dma_map_sg(dd->dev, dd->out_sg, 1,
 528				DMA_FROM_DEVICE);
 529		if (!err) {
 530			dev_err(dd->dev, "dma_map_sg() error\n");
 531			dma_unmap_sg(dd->dev, dd->in_sg, 1,
 532				DMA_TO_DEVICE);
 533			return -EINVAL;
 534		}
 535
 536		addr_in = sg_dma_address(dd->in_sg);
 537		addr_out = sg_dma_address(dd->out_sg);
 538
 539		dd->flags |= TDES_FLAGS_FAST;
 540
 541	} else {
 542		/* use cache buffers */
 543		count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
 544				dd->buf_in, dd->buflen, dd->total, 0);
 545
 546		addr_in = dd->dma_addr_in;
 547		addr_out = dd->dma_addr_out;
 548
 549		dd->flags &= ~TDES_FLAGS_FAST;
 550	}
 551
 552	dd->total -= count;
 553
 554	if (dd->caps.has_dma)
 555		err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count);
 556	else
 557		err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count);
 558
 559	if (err && (dd->flags & TDES_FLAGS_FAST)) {
 560		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 561		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
 562	}
 563
 564	return err;
 565}
 566
 567static void
 568atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
 569{
 570	struct skcipher_request *req = dd->req;
 571	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 572	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 573	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 574
 575	if (req->cryptlen < ivsize)
 576		return;
 577
 578	if (rctx->mode & TDES_FLAGS_ENCRYPT) {
 579		scatterwalk_map_and_copy(req->iv, req->dst,
 580					 req->cryptlen - ivsize, ivsize, 0);
 581	} else {
 582		if (req->src == req->dst)
 583			memcpy(req->iv, rctx->lastc, ivsize);
 584		else
 585			scatterwalk_map_and_copy(req->iv, req->src,
 586						 req->cryptlen - ivsize,
 587						 ivsize, 0);
 588	}
 589}
 590
 591static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
 592{
 593	struct skcipher_request *req = dd->req;
 594	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 595
 596	clk_disable_unprepare(dd->iclk);
 597
 598	dd->flags &= ~TDES_FLAGS_BUSY;
 599
 600	if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
 601		atmel_tdes_set_iv_as_last_ciphertext_block(dd);
 602
 603	req->base.complete(&req->base, err);
 604}
 605
 606static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
 607			       struct skcipher_request *req)
 608{
 609	struct crypto_async_request *async_req, *backlog;
 610	struct atmel_tdes_ctx *ctx;
 611	struct atmel_tdes_reqctx *rctx;
 612	unsigned long flags;
 613	int err, ret = 0;
 614
 615	spin_lock_irqsave(&dd->lock, flags);
 616	if (req)
 617		ret = crypto_enqueue_request(&dd->queue, &req->base);
 618	if (dd->flags & TDES_FLAGS_BUSY) {
 619		spin_unlock_irqrestore(&dd->lock, flags);
 620		return ret;
 621	}
 622	backlog = crypto_get_backlog(&dd->queue);
 623	async_req = crypto_dequeue_request(&dd->queue);
 624	if (async_req)
 625		dd->flags |= TDES_FLAGS_BUSY;
 626	spin_unlock_irqrestore(&dd->lock, flags);
 627
 628	if (!async_req)
 629		return ret;
 630
 631	if (backlog)
 632		backlog->complete(backlog, -EINPROGRESS);
 633
 634	req = skcipher_request_cast(async_req);
 635
 636	/* assign new request to device */
 637	dd->req = req;
 638	dd->total = req->cryptlen;
 639	dd->in_offset = 0;
 640	dd->in_sg = req->src;
 641	dd->out_offset = 0;
 642	dd->out_sg = req->dst;
 643
 644	rctx = skcipher_request_ctx(req);
 645	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 646	rctx->mode &= TDES_FLAGS_MODE_MASK;
 647	dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
 648	dd->ctx = ctx;
 649	ctx->dd = dd;
 650
 651	err = atmel_tdes_write_ctrl(dd);
 652	if (!err)
 653		err = atmel_tdes_crypt_start(dd);
 654	if (err) {
 655		/* des_task will not finish it, so do it here */
 656		atmel_tdes_finish_req(dd, err);
 657		tasklet_schedule(&dd->queue_task);
 658	}
 659
 660	return ret;
 661}
 662
 663static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
 664{
 665	int err = -EINVAL;
 666	size_t count;
 667
 668	if (dd->flags & TDES_FLAGS_DMA) {
 669		err = 0;
 670		if  (dd->flags & TDES_FLAGS_FAST) {
 671			dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
 672			dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 673		} else {
 674			dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
 675				dd->dma_size, DMA_FROM_DEVICE);
 676
 677			/* copy data */
 678			count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
 679				dd->buf_out, dd->buflen, dd->dma_size, 1);
 680			if (count != dd->dma_size) {
 681				err = -EINVAL;
 682				pr_err("not all data converted: %zu\n", count);
 683			}
 684		}
 685	}
 686	return err;
 687}
 688
 689static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
 690{
 691	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 692	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
 693	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 
 
 
 
 694
 695	switch (mode & TDES_FLAGS_OPMODE_MASK) {
 696	case TDES_FLAGS_CFB8:
 697		if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
 698			pr_err("request size is not exact amount of CFB8 blocks\n");
 699			return -EINVAL;
 700		}
 701		ctx->block_size = CFB8_BLOCK_SIZE;
 702		break;
 703
 704	case TDES_FLAGS_CFB16:
 705		if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
 706			pr_err("request size is not exact amount of CFB16 blocks\n");
 707			return -EINVAL;
 708		}
 709		ctx->block_size = CFB16_BLOCK_SIZE;
 710		break;
 711
 712	case TDES_FLAGS_CFB32:
 713		if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
 714			pr_err("request size is not exact amount of CFB32 blocks\n");
 715			return -EINVAL;
 716		}
 717		ctx->block_size = CFB32_BLOCK_SIZE;
 718		break;
 719
 720	default:
 721		if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
 722			pr_err("request size is not exact amount of DES blocks\n");
 723			return -EINVAL;
 724		}
 725		ctx->block_size = DES_BLOCK_SIZE;
 726		break;
 727	}
 728
 729	rctx->mode = mode;
 730
 731	if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
 732	    !(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
 733		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 734
 735		if (req->cryptlen >= ivsize)
 736			scatterwalk_map_and_copy(rctx->lastc, req->src,
 737						 req->cryptlen - ivsize,
 738						 ivsize, 0);
 739	}
 740
 741	return atmel_tdes_handle_queue(ctx->dd, req);
 742}
 743
 744static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
 745{
 746	int ret;
 747
 748	/* Try to grab 2 DMA channels */
 749	dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
 750	if (IS_ERR(dd->dma_lch_in.chan)) {
 751		ret = PTR_ERR(dd->dma_lch_in.chan);
 752		goto err_dma_in;
 753	}
 754
 755	dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
 756		TDES_IDATA1R;
 757	dd->dma_lch_in.dma_conf.src_maxburst = 1;
 758	dd->dma_lch_in.dma_conf.src_addr_width =
 759		DMA_SLAVE_BUSWIDTH_4_BYTES;
 760	dd->dma_lch_in.dma_conf.dst_maxburst = 1;
 761	dd->dma_lch_in.dma_conf.dst_addr_width =
 762		DMA_SLAVE_BUSWIDTH_4_BYTES;
 763	dd->dma_lch_in.dma_conf.device_fc = false;
 764
 765	dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx");
 766	if (IS_ERR(dd->dma_lch_out.chan)) {
 767		ret = PTR_ERR(dd->dma_lch_out.chan);
 768		goto err_dma_out;
 769	}
 770
 771	dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
 772		TDES_ODATA1R;
 773	dd->dma_lch_out.dma_conf.src_maxburst = 1;
 774	dd->dma_lch_out.dma_conf.src_addr_width =
 775		DMA_SLAVE_BUSWIDTH_4_BYTES;
 776	dd->dma_lch_out.dma_conf.dst_maxburst = 1;
 777	dd->dma_lch_out.dma_conf.dst_addr_width =
 778		DMA_SLAVE_BUSWIDTH_4_BYTES;
 779	dd->dma_lch_out.dma_conf.device_fc = false;
 780
 781	return 0;
 782
 783err_dma_out:
 784	dma_release_channel(dd->dma_lch_in.chan);
 785err_dma_in:
 786	dev_err(dd->dev, "no DMA channel available\n");
 787	return ret;
 788}
 789
 790static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
 791{
 792	dma_release_channel(dd->dma_lch_in.chan);
 793	dma_release_channel(dd->dma_lch_out.chan);
 794}
 795
 796static int atmel_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
 797			   unsigned int keylen)
 798{
 799	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 800	int err;
 801
 802	err = verify_skcipher_des_key(tfm, key);
 803	if (err)
 804		return err;
 805
 806	memcpy(ctx->key, key, keylen);
 807	ctx->keylen = keylen;
 808
 809	return 0;
 810}
 811
 812static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 813			   unsigned int keylen)
 814{
 815	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 816	int err;
 817
 818	err = verify_skcipher_des3_key(tfm, key);
 819	if (err)
 820		return err;
 821
 822	memcpy(ctx->key, key, keylen);
 823	ctx->keylen = keylen;
 824
 825	return 0;
 826}
 827
 828static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
 829{
 830	return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT);
 831}
 832
 833static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
 834{
 835	return atmel_tdes_crypt(req, TDES_FLAGS_ECB);
 836}
 837
 838static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
 839{
 840	return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT);
 841}
 842
 843static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
 844{
 845	return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
 846}
 847static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
 848{
 849	return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT);
 850}
 851
 852static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
 853{
 854	return atmel_tdes_crypt(req, TDES_FLAGS_CFB64);
 855}
 856
 857static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
 858{
 859	return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT);
 860}
 861
 862static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
 863{
 864	return atmel_tdes_crypt(req, TDES_FLAGS_CFB8);
 865}
 866
 867static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
 868{
 869	return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT);
 870}
 871
 872static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
 873{
 874	return atmel_tdes_crypt(req, TDES_FLAGS_CFB16);
 875}
 876
 877static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
 878{
 879	return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT);
 880}
 881
 882static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
 883{
 884	return atmel_tdes_crypt(req, TDES_FLAGS_CFB32);
 885}
 886
 887static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
 888{
 889	return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT);
 890}
 891
 892static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
 893{
 894	return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
 895}
 896
 897static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
 898{
 899	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 900	struct atmel_tdes_dev *dd;
 901
 902	crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
 903
 904	dd = atmel_tdes_find_dev(ctx);
 905	if (!dd)
 906		return -ENODEV;
 907
 
 
 908	return 0;
 909}
 910
 911static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
 912{
 913	alg->base.cra_priority = ATMEL_TDES_PRIORITY;
 914	alg->base.cra_flags = CRYPTO_ALG_ASYNC;
 915	alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
 916	alg->base.cra_module = THIS_MODULE;
 917
 918	alg->init = atmel_tdes_init_tfm;
 919}
 920
 921static struct skcipher_alg tdes_algs[] = {
 922{
 923	.base.cra_name		= "ecb(des)",
 924	.base.cra_driver_name	= "atmel-ecb-des",
 925	.base.cra_blocksize	= DES_BLOCK_SIZE,
 926	.base.cra_alignmask	= 0x7,
 927
 928	.min_keysize		= DES_KEY_SIZE,
 929	.max_keysize		= DES_KEY_SIZE,
 930	.setkey			= atmel_des_setkey,
 931	.encrypt		= atmel_tdes_ecb_encrypt,
 932	.decrypt		= atmel_tdes_ecb_decrypt,
 933},
 934{
 935	.base.cra_name		= "cbc(des)",
 936	.base.cra_driver_name	= "atmel-cbc-des",
 937	.base.cra_blocksize	= DES_BLOCK_SIZE,
 938	.base.cra_alignmask	= 0x7,
 939
 940	.min_keysize		= DES_KEY_SIZE,
 941	.max_keysize		= DES_KEY_SIZE,
 942	.ivsize			= DES_BLOCK_SIZE,
 943	.setkey			= atmel_des_setkey,
 944	.encrypt		= atmel_tdes_cbc_encrypt,
 945	.decrypt		= atmel_tdes_cbc_decrypt,
 946},
 947{
 948	.base.cra_name		= "cfb(des)",
 949	.base.cra_driver_name	= "atmel-cfb-des",
 950	.base.cra_blocksize	= DES_BLOCK_SIZE,
 951	.base.cra_alignmask	= 0x7,
 952
 953	.min_keysize		= DES_KEY_SIZE,
 954	.max_keysize		= DES_KEY_SIZE,
 955	.ivsize			= DES_BLOCK_SIZE,
 956	.setkey			= atmel_des_setkey,
 957	.encrypt		= atmel_tdes_cfb_encrypt,
 958	.decrypt		= atmel_tdes_cfb_decrypt,
 959},
 960{
 961	.base.cra_name		= "cfb8(des)",
 962	.base.cra_driver_name	= "atmel-cfb8-des",
 963	.base.cra_blocksize	= CFB8_BLOCK_SIZE,
 964	.base.cra_alignmask	= 0,
 965
 966	.min_keysize		= DES_KEY_SIZE,
 967	.max_keysize		= DES_KEY_SIZE,
 968	.ivsize			= DES_BLOCK_SIZE,
 969	.setkey			= atmel_des_setkey,
 970	.encrypt		= atmel_tdes_cfb8_encrypt,
 971	.decrypt		= atmel_tdes_cfb8_decrypt,
 972},
 973{
 974	.base.cra_name		= "cfb16(des)",
 975	.base.cra_driver_name	= "atmel-cfb16-des",
 976	.base.cra_blocksize	= CFB16_BLOCK_SIZE,
 977	.base.cra_alignmask	= 0x1,
 978
 979	.min_keysize		= DES_KEY_SIZE,
 980	.max_keysize		= DES_KEY_SIZE,
 981	.ivsize			= DES_BLOCK_SIZE,
 982	.setkey			= atmel_des_setkey,
 983	.encrypt		= atmel_tdes_cfb16_encrypt,
 984	.decrypt		= atmel_tdes_cfb16_decrypt,
 985},
 986{
 987	.base.cra_name		= "cfb32(des)",
 988	.base.cra_driver_name	= "atmel-cfb32-des",
 989	.base.cra_blocksize	= CFB32_BLOCK_SIZE,
 990	.base.cra_alignmask	= 0x3,
 991
 992	.min_keysize		= DES_KEY_SIZE,
 993	.max_keysize		= DES_KEY_SIZE,
 994	.ivsize			= DES_BLOCK_SIZE,
 995	.setkey			= atmel_des_setkey,
 996	.encrypt		= atmel_tdes_cfb32_encrypt,
 997	.decrypt		= atmel_tdes_cfb32_decrypt,
 998},
 999{
1000	.base.cra_name		= "ofb(des)",
1001	.base.cra_driver_name	= "atmel-ofb-des",
1002	.base.cra_blocksize	= DES_BLOCK_SIZE,
1003	.base.cra_alignmask	= 0x7,
1004
1005	.min_keysize		= DES_KEY_SIZE,
1006	.max_keysize		= DES_KEY_SIZE,
1007	.ivsize			= DES_BLOCK_SIZE,
1008	.setkey			= atmel_des_setkey,
1009	.encrypt		= atmel_tdes_ofb_encrypt,
1010	.decrypt		= atmel_tdes_ofb_decrypt,
1011},
1012{
1013	.base.cra_name		= "ecb(des3_ede)",
1014	.base.cra_driver_name	= "atmel-ecb-tdes",
1015	.base.cra_blocksize	= DES_BLOCK_SIZE,
1016	.base.cra_alignmask	= 0x7,
1017
1018	.min_keysize		= DES3_EDE_KEY_SIZE,
1019	.max_keysize		= DES3_EDE_KEY_SIZE,
1020	.setkey			= atmel_tdes_setkey,
1021	.encrypt		= atmel_tdes_ecb_encrypt,
1022	.decrypt		= atmel_tdes_ecb_decrypt,
1023},
1024{
1025	.base.cra_name		= "cbc(des3_ede)",
1026	.base.cra_driver_name	= "atmel-cbc-tdes",
1027	.base.cra_blocksize	= DES_BLOCK_SIZE,
1028	.base.cra_alignmask	= 0x7,
1029
1030	.min_keysize		= DES3_EDE_KEY_SIZE,
1031	.max_keysize		= DES3_EDE_KEY_SIZE,
1032	.setkey			= atmel_tdes_setkey,
1033	.encrypt		= atmel_tdes_cbc_encrypt,
1034	.decrypt		= atmel_tdes_cbc_decrypt,
1035	.ivsize			= DES_BLOCK_SIZE,
1036},
1037{
1038	.base.cra_name		= "ofb(des3_ede)",
1039	.base.cra_driver_name	= "atmel-ofb-tdes",
1040	.base.cra_blocksize	= DES_BLOCK_SIZE,
1041	.base.cra_alignmask	= 0x7,
1042
1043	.min_keysize		= DES3_EDE_KEY_SIZE,
1044	.max_keysize		= DES3_EDE_KEY_SIZE,
1045	.setkey			= atmel_tdes_setkey,
1046	.encrypt		= atmel_tdes_ofb_encrypt,
1047	.decrypt		= atmel_tdes_ofb_decrypt,
1048	.ivsize			= DES_BLOCK_SIZE,
1049},
1050};
1051
1052static void atmel_tdes_queue_task(unsigned long data)
1053{
1054	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
1055
1056	atmel_tdes_handle_queue(dd, NULL);
1057}
1058
1059static void atmel_tdes_done_task(unsigned long data)
1060{
1061	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
1062	int err;
1063
1064	if (!(dd->flags & TDES_FLAGS_DMA))
1065		err = atmel_tdes_crypt_pdc_stop(dd);
1066	else
1067		err = atmel_tdes_crypt_dma_stop(dd);
1068
1069	if (dd->total && !err) {
1070		if (dd->flags & TDES_FLAGS_FAST) {
1071			dd->in_sg = sg_next(dd->in_sg);
1072			dd->out_sg = sg_next(dd->out_sg);
1073			if (!dd->in_sg || !dd->out_sg)
1074				err = -EINVAL;
1075		}
1076		if (!err)
1077			err = atmel_tdes_crypt_start(dd);
1078		if (!err)
1079			return; /* DMA started. Not fininishing. */
1080	}
1081
1082	atmel_tdes_finish_req(dd, err);
1083	atmel_tdes_handle_queue(dd, NULL);
1084}
1085
1086static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
1087{
1088	struct atmel_tdes_dev *tdes_dd = dev_id;
1089	u32 reg;
1090
1091	reg = atmel_tdes_read(tdes_dd, TDES_ISR);
1092	if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) {
1093		atmel_tdes_write(tdes_dd, TDES_IDR, reg);
1094		if (TDES_FLAGS_BUSY & tdes_dd->flags)
1095			tasklet_schedule(&tdes_dd->done_task);
1096		else
1097			dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n");
1098		return IRQ_HANDLED;
1099	}
1100
1101	return IRQ_NONE;
1102}
1103
1104static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
1105{
1106	int i;
1107
1108	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
1109		crypto_unregister_skcipher(&tdes_algs[i]);
1110}
1111
1112static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
1113{
1114	int err, i, j;
1115
1116	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
1117		atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
1118
1119		err = crypto_register_skcipher(&tdes_algs[i]);
1120		if (err)
1121			goto err_tdes_algs;
1122	}
1123
1124	return 0;
1125
1126err_tdes_algs:
1127	for (j = 0; j < i; j++)
1128		crypto_unregister_skcipher(&tdes_algs[j]);
1129
1130	return err;
1131}
1132
1133static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
1134{
1135
1136	dd->caps.has_dma = 0;
1137	dd->caps.has_cfb_3keys = 0;
1138
1139	/* keep only major version number */
1140	switch (dd->hw_version & 0xf00) {
 
1141	case 0x700:
1142		dd->caps.has_dma = 1;
1143		dd->caps.has_cfb_3keys = 1;
1144		break;
1145	case 0x600:
1146		break;
1147	default:
1148		dev_warn(dd->dev,
1149				"Unmanaged tdes version, set minimum capabilities\n");
1150		break;
1151	}
1152}
1153
1154#if defined(CONFIG_OF)
1155static const struct of_device_id atmel_tdes_dt_ids[] = {
1156	{ .compatible = "atmel,at91sam9g46-tdes" },
1157	{ /* sentinel */ }
1158};
1159MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
1160#endif
1161
1162static int atmel_tdes_probe(struct platform_device *pdev)
1163{
1164	struct atmel_tdes_dev *tdes_dd;
1165	struct device *dev = &pdev->dev;
1166	struct resource *tdes_res;
1167	int err;
1168
1169	tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
1170	if (!tdes_dd)
1171		return -ENOMEM;
1172
1173	tdes_dd->dev = dev;
1174
1175	platform_set_drvdata(pdev, tdes_dd);
1176
1177	INIT_LIST_HEAD(&tdes_dd->list);
1178	spin_lock_init(&tdes_dd->lock);
1179
1180	tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
1181					(unsigned long)tdes_dd);
1182	tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task,
1183					(unsigned long)tdes_dd);
1184
1185	crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
1186
1187	/* Get the base address */
1188	tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1189	if (!tdes_res) {
1190		dev_err(dev, "no MEM resource info\n");
1191		err = -ENODEV;
1192		goto err_tasklet_kill;
1193	}
1194	tdes_dd->phys_base = tdes_res->start;
1195
1196	/* Get the IRQ */
1197	tdes_dd->irq = platform_get_irq(pdev,  0);
1198	if (tdes_dd->irq < 0) {
1199		err = tdes_dd->irq;
1200		goto err_tasklet_kill;
1201	}
1202
1203	err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
1204			       IRQF_SHARED, "atmel-tdes", tdes_dd);
1205	if (err) {
1206		dev_err(dev, "unable to request tdes irq.\n");
1207		goto err_tasklet_kill;
1208	}
1209
1210	/* Initializing the clock */
1211	tdes_dd->iclk = devm_clk_get(&pdev->dev, "tdes_clk");
1212	if (IS_ERR(tdes_dd->iclk)) {
1213		dev_err(dev, "clock initialization failed.\n");
1214		err = PTR_ERR(tdes_dd->iclk);
1215		goto err_tasklet_kill;
1216	}
1217
1218	tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
1219	if (IS_ERR(tdes_dd->io_base)) {
1220		dev_err(dev, "can't ioremap\n");
1221		err = PTR_ERR(tdes_dd->io_base);
1222		goto err_tasklet_kill;
1223	}
1224
1225	err = atmel_tdes_hw_version_init(tdes_dd);
1226	if (err)
1227		goto err_tasklet_kill;
1228
1229	atmel_tdes_get_cap(tdes_dd);
1230
1231	err = atmel_tdes_buff_init(tdes_dd);
1232	if (err)
1233		goto err_tasklet_kill;
1234
1235	if (tdes_dd->caps.has_dma) {
1236		err = atmel_tdes_dma_init(tdes_dd);
1237		if (err)
1238			goto err_buff_cleanup;
1239
1240		dev_info(dev, "using %s, %s for DMA transfers\n",
1241				dma_chan_name(tdes_dd->dma_lch_in.chan),
1242				dma_chan_name(tdes_dd->dma_lch_out.chan));
1243	}
1244
1245	spin_lock(&atmel_tdes.lock);
1246	list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
1247	spin_unlock(&atmel_tdes.lock);
1248
1249	err = atmel_tdes_register_algs(tdes_dd);
1250	if (err)
1251		goto err_algs;
1252
1253	dev_info(dev, "Atmel DES/TDES\n");
1254
1255	return 0;
1256
1257err_algs:
1258	spin_lock(&atmel_tdes.lock);
1259	list_del(&tdes_dd->list);
1260	spin_unlock(&atmel_tdes.lock);
1261	if (tdes_dd->caps.has_dma)
1262		atmel_tdes_dma_cleanup(tdes_dd);
1263err_buff_cleanup:
1264	atmel_tdes_buff_cleanup(tdes_dd);
1265err_tasklet_kill:
1266	tasklet_kill(&tdes_dd->done_task);
1267	tasklet_kill(&tdes_dd->queue_task);
1268
1269	return err;
1270}
1271
1272static int atmel_tdes_remove(struct platform_device *pdev)
1273{
1274	struct atmel_tdes_dev *tdes_dd;
1275
1276	tdes_dd = platform_get_drvdata(pdev);
1277	if (!tdes_dd)
1278		return -ENODEV;
1279	spin_lock(&atmel_tdes.lock);
1280	list_del(&tdes_dd->list);
1281	spin_unlock(&atmel_tdes.lock);
1282
1283	atmel_tdes_unregister_algs(tdes_dd);
1284
1285	tasklet_kill(&tdes_dd->done_task);
1286	tasklet_kill(&tdes_dd->queue_task);
1287
1288	if (tdes_dd->caps.has_dma)
1289		atmel_tdes_dma_cleanup(tdes_dd);
1290
1291	atmel_tdes_buff_cleanup(tdes_dd);
1292
1293	return 0;
1294}
1295
1296static struct platform_driver atmel_tdes_driver = {
1297	.probe		= atmel_tdes_probe,
1298	.remove		= atmel_tdes_remove,
1299	.driver		= {
1300		.name	= "atmel_tdes",
1301		.of_match_table = of_match_ptr(atmel_tdes_dt_ids),
1302	},
1303};
1304
1305module_platform_driver(atmel_tdes_driver);
1306
1307MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
1308MODULE_LICENSE("GPL v2");
1309MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for ATMEL DES/TDES HW acceleration.
   6 *
   7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
   8 * Author: Nicolas Royer <nicolas@eukrea.com>
   9 *
  10 * Some ideas are from omap-aes.c drivers.
  11 */
  12
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/err.h>
  18#include <linux/clk.h>
  19#include <linux/io.h>
  20#include <linux/hw_random.h>
  21#include <linux/platform_device.h>
  22
  23#include <linux/device.h>
  24#include <linux/dmaengine.h>
  25#include <linux/init.h>
  26#include <linux/errno.h>
  27#include <linux/interrupt.h>
  28#include <linux/irq.h>
  29#include <linux/scatterlist.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/of_device.h>
  32#include <linux/delay.h>
  33#include <linux/crypto.h>
  34#include <crypto/scatterwalk.h>
  35#include <crypto/algapi.h>
  36#include <crypto/internal/des.h>
  37#include <crypto/internal/skcipher.h>
  38#include "atmel-tdes-regs.h"
  39
  40#define ATMEL_TDES_PRIORITY	300
  41
  42/* TDES flags  */
  43/* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
  44#define TDES_FLAGS_ENCRYPT	TDES_MR_CYPHER_ENC
  45#define TDES_FLAGS_OPMODE_MASK	(TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
  46#define TDES_FLAGS_ECB		TDES_MR_OPMOD_ECB
  47#define TDES_FLAGS_CBC		TDES_MR_OPMOD_CBC
  48#define TDES_FLAGS_OFB		TDES_MR_OPMOD_OFB
  49#define TDES_FLAGS_CFB64	(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
  50#define TDES_FLAGS_CFB32	(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
  51#define TDES_FLAGS_CFB16	(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
  52#define TDES_FLAGS_CFB8		(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
  53
  54#define TDES_FLAGS_MODE_MASK	(TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
  55
  56#define TDES_FLAGS_INIT		BIT(3)
  57#define TDES_FLAGS_FAST		BIT(4)
  58#define TDES_FLAGS_BUSY		BIT(5)
  59#define TDES_FLAGS_DMA		BIT(6)
  60
  61#define ATMEL_TDES_QUEUE_LENGTH	50
  62
  63#define CFB8_BLOCK_SIZE		1
  64#define CFB16_BLOCK_SIZE	2
  65#define CFB32_BLOCK_SIZE	4
  66
  67struct atmel_tdes_caps {
  68	bool	has_dma;
  69	u32		has_cfb_3keys;
  70};
  71
  72struct atmel_tdes_dev;
  73
  74struct atmel_tdes_ctx {
  75	struct atmel_tdes_dev *dd;
  76
  77	int		keylen;
  78	u32		key[DES3_EDE_KEY_SIZE / sizeof(u32)];
  79	unsigned long	flags;
  80
  81	u16		block_size;
  82};
  83
  84struct atmel_tdes_reqctx {
  85	unsigned long mode;
  86	u8 lastc[DES_BLOCK_SIZE];
  87};
  88
  89struct atmel_tdes_dma {
  90	struct dma_chan			*chan;
  91	struct dma_slave_config dma_conf;
  92};
  93
  94struct atmel_tdes_dev {
  95	struct list_head	list;
  96	unsigned long		phys_base;
  97	void __iomem		*io_base;
  98
  99	struct atmel_tdes_ctx	*ctx;
 100	struct device		*dev;
 101	struct clk			*iclk;
 102	int					irq;
 103
 104	unsigned long		flags;
 105
 106	spinlock_t		lock;
 107	struct crypto_queue	queue;
 108
 109	struct tasklet_struct	done_task;
 110	struct tasklet_struct	queue_task;
 111
 112	struct skcipher_request	*req;
 113	size_t				total;
 114
 115	struct scatterlist	*in_sg;
 116	unsigned int		nb_in_sg;
 117	size_t				in_offset;
 118	struct scatterlist	*out_sg;
 119	unsigned int		nb_out_sg;
 120	size_t				out_offset;
 121
 122	size_t	buflen;
 123	size_t	dma_size;
 124
 125	void	*buf_in;
 126	int		dma_in;
 127	dma_addr_t	dma_addr_in;
 128	struct atmel_tdes_dma	dma_lch_in;
 129
 130	void	*buf_out;
 131	int		dma_out;
 132	dma_addr_t	dma_addr_out;
 133	struct atmel_tdes_dma	dma_lch_out;
 134
 135	struct atmel_tdes_caps	caps;
 136
 137	u32	hw_version;
 138};
 139
 140struct atmel_tdes_drv {
 141	struct list_head	dev_list;
 142	spinlock_t		lock;
 143};
 144
 145static struct atmel_tdes_drv atmel_tdes = {
 146	.dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list),
 147	.lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock),
 148};
 149
 150static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
 151			void *buf, size_t buflen, size_t total, int out)
 152{
 153	size_t count, off = 0;
 154
 155	while (buflen && total) {
 156		count = min((*sg)->length - *offset, total);
 157		count = min(count, buflen);
 158
 159		if (!count)
 160			return off;
 161
 162		scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
 163
 164		off += count;
 165		buflen -= count;
 166		*offset += count;
 167		total -= count;
 168
 169		if (*offset == (*sg)->length) {
 170			*sg = sg_next(*sg);
 171			if (*sg)
 172				*offset = 0;
 173			else
 174				total = 0;
 175		}
 176	}
 177
 178	return off;
 179}
 180
 181static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
 182{
 183	return readl_relaxed(dd->io_base + offset);
 184}
 185
 186static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
 187					u32 offset, u32 value)
 188{
 189	writel_relaxed(value, dd->io_base + offset);
 190}
 191
 192static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
 193			       const u32 *value, int count)
 194{
 195	for (; count--; value++, offset += 4)
 196		atmel_tdes_write(dd, offset, *value);
 197}
 198
 199static struct atmel_tdes_dev *atmel_tdes_dev_alloc(void)
 200{
 201	struct atmel_tdes_dev *tdes_dd;
 
 202
 203	spin_lock_bh(&atmel_tdes.lock);
 204	/* One TDES IP per SoC. */
 205	tdes_dd = list_first_entry_or_null(&atmel_tdes.dev_list,
 206					   struct atmel_tdes_dev, list);
 
 
 
 
 
 
 207	spin_unlock_bh(&atmel_tdes.lock);
 
 208	return tdes_dd;
 209}
 210
 211static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
 212{
 213	int err;
 214
 215	err = clk_prepare_enable(dd->iclk);
 216	if (err)
 217		return err;
 218
 219	if (!(dd->flags & TDES_FLAGS_INIT)) {
 220		atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
 221		dd->flags |= TDES_FLAGS_INIT;
 222	}
 223
 224	return 0;
 225}
 226
 227static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
 228{
 229	return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
 230}
 231
 232static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
 233{
 234	int err;
 235
 236	err = atmel_tdes_hw_init(dd);
 237	if (err)
 238		return err;
 239
 240	dd->hw_version = atmel_tdes_get_version(dd);
 241
 242	dev_info(dd->dev,
 243			"version: 0x%x\n", dd->hw_version);
 244
 245	clk_disable_unprepare(dd->iclk);
 246
 247	return 0;
 248}
 249
 250static void atmel_tdes_dma_callback(void *data)
 251{
 252	struct atmel_tdes_dev *dd = data;
 253
 254	/* dma_lch_out - completed */
 255	tasklet_schedule(&dd->done_task);
 256}
 257
 258static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
 259{
 260	int err;
 261	u32 valmr = TDES_MR_SMOD_PDC;
 262
 263	err = atmel_tdes_hw_init(dd);
 264
 265	if (err)
 266		return err;
 267
 268	if (!dd->caps.has_dma)
 269		atmel_tdes_write(dd, TDES_PTCR,
 270			TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS);
 271
 272	/* MR register must be set before IV registers */
 273	if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
 274		valmr |= TDES_MR_KEYMOD_3KEY;
 275		valmr |= TDES_MR_TDESMOD_TDES;
 276	} else if (dd->ctx->keylen > DES_KEY_SIZE) {
 277		valmr |= TDES_MR_KEYMOD_2KEY;
 278		valmr |= TDES_MR_TDESMOD_TDES;
 279	} else {
 280		valmr |= TDES_MR_TDESMOD_DES;
 281	}
 282
 283	valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
 284
 285	atmel_tdes_write(dd, TDES_MR, valmr);
 286
 287	atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
 288						dd->ctx->keylen >> 2);
 289
 290	if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
 291		atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
 292
 293	return 0;
 294}
 295
 296static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
 297{
 298	int err = 0;
 299	size_t count;
 300
 301	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
 302
 303	if (dd->flags & TDES_FLAGS_FAST) {
 304		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
 305		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 306	} else {
 307		dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
 308					   dd->dma_size, DMA_FROM_DEVICE);
 309
 310		/* copy data */
 311		count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
 312				dd->buf_out, dd->buflen, dd->dma_size, 1);
 313		if (count != dd->dma_size) {
 314			err = -EINVAL;
 315			dev_dbg(dd->dev, "not all data converted: %zu\n", count);
 316		}
 317	}
 318
 319	return err;
 320}
 321
 322static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
 323{
 324	int err = -ENOMEM;
 325
 326	dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
 327	dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
 328	dd->buflen = PAGE_SIZE;
 329	dd->buflen &= ~(DES_BLOCK_SIZE - 1);
 330
 331	if (!dd->buf_in || !dd->buf_out) {
 332		dev_dbg(dd->dev, "unable to alloc pages.\n");
 333		goto err_alloc;
 334	}
 335
 336	/* MAP here */
 337	dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
 338					dd->buflen, DMA_TO_DEVICE);
 339	err = dma_mapping_error(dd->dev, dd->dma_addr_in);
 340	if (err) {
 341		dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
 342		goto err_map_in;
 343	}
 344
 345	dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
 346					dd->buflen, DMA_FROM_DEVICE);
 347	err = dma_mapping_error(dd->dev, dd->dma_addr_out);
 348	if (err) {
 349		dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
 350		goto err_map_out;
 351	}
 352
 353	return 0;
 354
 355err_map_out:
 356	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
 357		DMA_TO_DEVICE);
 358err_map_in:
 359err_alloc:
 360	free_page((unsigned long)dd->buf_out);
 361	free_page((unsigned long)dd->buf_in);
 
 
 362	return err;
 363}
 364
 365static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
 366{
 367	dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
 368			 DMA_FROM_DEVICE);
 369	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
 370		DMA_TO_DEVICE);
 371	free_page((unsigned long)dd->buf_out);
 372	free_page((unsigned long)dd->buf_in);
 373}
 374
 375static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
 376				dma_addr_t dma_addr_in,
 377				dma_addr_t dma_addr_out, int length)
 378{
 379	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
 380	int len32;
 381
 382	dd->dma_size = length;
 383
 384	if (!(dd->flags & TDES_FLAGS_FAST)) {
 385		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
 386					   DMA_TO_DEVICE);
 387	}
 388
 389	switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
 390	case TDES_FLAGS_CFB8:
 391		len32 = DIV_ROUND_UP(length, sizeof(u8));
 392		break;
 393
 394	case TDES_FLAGS_CFB16:
 395		len32 = DIV_ROUND_UP(length, sizeof(u16));
 396		break;
 397
 398	default:
 399		len32 = DIV_ROUND_UP(length, sizeof(u32));
 400		break;
 401	}
 402
 403	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
 404	atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
 405	atmel_tdes_write(dd, TDES_TCR, len32);
 406	atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
 407	atmel_tdes_write(dd, TDES_RCR, len32);
 408
 409	/* Enable Interrupt */
 410	atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
 411
 412	/* Start DMA transfer */
 413	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
 414
 415	return 0;
 416}
 417
 418static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
 419				dma_addr_t dma_addr_in,
 420				dma_addr_t dma_addr_out, int length)
 421{
 422	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
 423	struct scatterlist sg[2];
 424	struct dma_async_tx_descriptor	*in_desc, *out_desc;
 425	enum dma_slave_buswidth addr_width;
 426
 427	dd->dma_size = length;
 428
 429	if (!(dd->flags & TDES_FLAGS_FAST)) {
 430		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
 431					   DMA_TO_DEVICE);
 432	}
 433
 434	switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
 435	case TDES_FLAGS_CFB8:
 436		addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 437		break;
 438
 439	case TDES_FLAGS_CFB16:
 440		addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 441		break;
 442
 443	default:
 444		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 445		break;
 446	}
 447
 448	dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
 449	dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
 450
 451	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
 452	dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
 453
 454	dd->flags |= TDES_FLAGS_DMA;
 455
 456	sg_init_table(&sg[0], 1);
 457	sg_dma_address(&sg[0]) = dma_addr_in;
 458	sg_dma_len(&sg[0]) = length;
 459
 460	sg_init_table(&sg[1], 1);
 461	sg_dma_address(&sg[1]) = dma_addr_out;
 462	sg_dma_len(&sg[1]) = length;
 463
 464	in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
 465				1, DMA_MEM_TO_DEV,
 466				DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
 467	if (!in_desc)
 468		return -EINVAL;
 469
 470	out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
 471				1, DMA_DEV_TO_MEM,
 472				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 473	if (!out_desc)
 474		return -EINVAL;
 475
 476	out_desc->callback = atmel_tdes_dma_callback;
 477	out_desc->callback_param = dd;
 478
 479	dmaengine_submit(out_desc);
 480	dma_async_issue_pending(dd->dma_lch_out.chan);
 481
 482	dmaengine_submit(in_desc);
 483	dma_async_issue_pending(dd->dma_lch_in.chan);
 484
 485	return 0;
 486}
 487
 488static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
 489{
 490	int err, fast = 0, in, out;
 491	size_t count;
 492	dma_addr_t addr_in, addr_out;
 493
 494	if ((!dd->in_offset) && (!dd->out_offset)) {
 495		/* check for alignment */
 496		in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
 497			IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
 498		out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
 499			IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
 500		fast = in && out;
 501
 502		if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
 503			fast = 0;
 504	}
 505
 506
 507	if (fast)  {
 508		count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
 509		count = min_t(size_t, count, sg_dma_len(dd->out_sg));
 510
 511		err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 512		if (!err) {
 513			dev_dbg(dd->dev, "dma_map_sg() error\n");
 514			return -EINVAL;
 515		}
 516
 517		err = dma_map_sg(dd->dev, dd->out_sg, 1,
 518				DMA_FROM_DEVICE);
 519		if (!err) {
 520			dev_dbg(dd->dev, "dma_map_sg() error\n");
 521			dma_unmap_sg(dd->dev, dd->in_sg, 1,
 522				DMA_TO_DEVICE);
 523			return -EINVAL;
 524		}
 525
 526		addr_in = sg_dma_address(dd->in_sg);
 527		addr_out = sg_dma_address(dd->out_sg);
 528
 529		dd->flags |= TDES_FLAGS_FAST;
 530
 531	} else {
 532		/* use cache buffers */
 533		count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
 534				dd->buf_in, dd->buflen, dd->total, 0);
 535
 536		addr_in = dd->dma_addr_in;
 537		addr_out = dd->dma_addr_out;
 538
 539		dd->flags &= ~TDES_FLAGS_FAST;
 540	}
 541
 542	dd->total -= count;
 543
 544	if (dd->caps.has_dma)
 545		err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count);
 546	else
 547		err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count);
 548
 549	if (err && (dd->flags & TDES_FLAGS_FAST)) {
 550		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 551		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
 552	}
 553
 554	return err;
 555}
 556
 557static void
 558atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
 559{
 560	struct skcipher_request *req = dd->req;
 561	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 562	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 563	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 564
 565	if (req->cryptlen < ivsize)
 566		return;
 567
 568	if (rctx->mode & TDES_FLAGS_ENCRYPT) {
 569		scatterwalk_map_and_copy(req->iv, req->dst,
 570					 req->cryptlen - ivsize, ivsize, 0);
 571	} else {
 572		if (req->src == req->dst)
 573			memcpy(req->iv, rctx->lastc, ivsize);
 574		else
 575			scatterwalk_map_and_copy(req->iv, req->src,
 576						 req->cryptlen - ivsize,
 577						 ivsize, 0);
 578	}
 579}
 580
 581static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
 582{
 583	struct skcipher_request *req = dd->req;
 584	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 585
 586	clk_disable_unprepare(dd->iclk);
 587
 588	dd->flags &= ~TDES_FLAGS_BUSY;
 589
 590	if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
 591		atmel_tdes_set_iv_as_last_ciphertext_block(dd);
 592
 593	req->base.complete(&req->base, err);
 594}
 595
 596static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
 597			       struct skcipher_request *req)
 598{
 599	struct crypto_async_request *async_req, *backlog;
 600	struct atmel_tdes_ctx *ctx;
 601	struct atmel_tdes_reqctx *rctx;
 602	unsigned long flags;
 603	int err, ret = 0;
 604
 605	spin_lock_irqsave(&dd->lock, flags);
 606	if (req)
 607		ret = crypto_enqueue_request(&dd->queue, &req->base);
 608	if (dd->flags & TDES_FLAGS_BUSY) {
 609		spin_unlock_irqrestore(&dd->lock, flags);
 610		return ret;
 611	}
 612	backlog = crypto_get_backlog(&dd->queue);
 613	async_req = crypto_dequeue_request(&dd->queue);
 614	if (async_req)
 615		dd->flags |= TDES_FLAGS_BUSY;
 616	spin_unlock_irqrestore(&dd->lock, flags);
 617
 618	if (!async_req)
 619		return ret;
 620
 621	if (backlog)
 622		backlog->complete(backlog, -EINPROGRESS);
 623
 624	req = skcipher_request_cast(async_req);
 625
 626	/* assign new request to device */
 627	dd->req = req;
 628	dd->total = req->cryptlen;
 629	dd->in_offset = 0;
 630	dd->in_sg = req->src;
 631	dd->out_offset = 0;
 632	dd->out_sg = req->dst;
 633
 634	rctx = skcipher_request_ctx(req);
 635	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 636	rctx->mode &= TDES_FLAGS_MODE_MASK;
 637	dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
 638	dd->ctx = ctx;
 
 639
 640	err = atmel_tdes_write_ctrl(dd);
 641	if (!err)
 642		err = atmel_tdes_crypt_start(dd);
 643	if (err) {
 644		/* des_task will not finish it, so do it here */
 645		atmel_tdes_finish_req(dd, err);
 646		tasklet_schedule(&dd->queue_task);
 647	}
 648
 649	return ret;
 650}
 651
 652static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
 653{
 654	int err = -EINVAL;
 655	size_t count;
 656
 657	if (dd->flags & TDES_FLAGS_DMA) {
 658		err = 0;
 659		if  (dd->flags & TDES_FLAGS_FAST) {
 660			dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
 661			dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 662		} else {
 663			dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
 664				dd->dma_size, DMA_FROM_DEVICE);
 665
 666			/* copy data */
 667			count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
 668				dd->buf_out, dd->buflen, dd->dma_size, 1);
 669			if (count != dd->dma_size) {
 670				err = -EINVAL;
 671				dev_dbg(dd->dev, "not all data converted: %zu\n", count);
 672			}
 673		}
 674	}
 675	return err;
 676}
 677
 678static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
 679{
 680	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 681	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
 682	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 683	struct device *dev = ctx->dd->dev;
 684
 685	if (!req->cryptlen)
 686		return 0;
 687
 688	switch (mode & TDES_FLAGS_OPMODE_MASK) {
 689	case TDES_FLAGS_CFB8:
 690		if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
 691			dev_dbg(dev, "request size is not exact amount of CFB8 blocks\n");
 692			return -EINVAL;
 693		}
 694		ctx->block_size = CFB8_BLOCK_SIZE;
 695		break;
 696
 697	case TDES_FLAGS_CFB16:
 698		if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
 699			dev_dbg(dev, "request size is not exact amount of CFB16 blocks\n");
 700			return -EINVAL;
 701		}
 702		ctx->block_size = CFB16_BLOCK_SIZE;
 703		break;
 704
 705	case TDES_FLAGS_CFB32:
 706		if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
 707			dev_dbg(dev, "request size is not exact amount of CFB32 blocks\n");
 708			return -EINVAL;
 709		}
 710		ctx->block_size = CFB32_BLOCK_SIZE;
 711		break;
 712
 713	default:
 714		if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
 715			dev_dbg(dev, "request size is not exact amount of DES blocks\n");
 716			return -EINVAL;
 717		}
 718		ctx->block_size = DES_BLOCK_SIZE;
 719		break;
 720	}
 721
 722	rctx->mode = mode;
 723
 724	if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
 725	    !(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
 726		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 727
 728		if (req->cryptlen >= ivsize)
 729			scatterwalk_map_and_copy(rctx->lastc, req->src,
 730						 req->cryptlen - ivsize,
 731						 ivsize, 0);
 732	}
 733
 734	return atmel_tdes_handle_queue(ctx->dd, req);
 735}
 736
 737static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
 738{
 739	int ret;
 740
 741	/* Try to grab 2 DMA channels */
 742	dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
 743	if (IS_ERR(dd->dma_lch_in.chan)) {
 744		ret = PTR_ERR(dd->dma_lch_in.chan);
 745		goto err_dma_in;
 746	}
 747
 748	dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
 749		TDES_IDATA1R;
 750	dd->dma_lch_in.dma_conf.src_maxburst = 1;
 751	dd->dma_lch_in.dma_conf.src_addr_width =
 752		DMA_SLAVE_BUSWIDTH_4_BYTES;
 753	dd->dma_lch_in.dma_conf.dst_maxburst = 1;
 754	dd->dma_lch_in.dma_conf.dst_addr_width =
 755		DMA_SLAVE_BUSWIDTH_4_BYTES;
 756	dd->dma_lch_in.dma_conf.device_fc = false;
 757
 758	dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx");
 759	if (IS_ERR(dd->dma_lch_out.chan)) {
 760		ret = PTR_ERR(dd->dma_lch_out.chan);
 761		goto err_dma_out;
 762	}
 763
 764	dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
 765		TDES_ODATA1R;
 766	dd->dma_lch_out.dma_conf.src_maxburst = 1;
 767	dd->dma_lch_out.dma_conf.src_addr_width =
 768		DMA_SLAVE_BUSWIDTH_4_BYTES;
 769	dd->dma_lch_out.dma_conf.dst_maxburst = 1;
 770	dd->dma_lch_out.dma_conf.dst_addr_width =
 771		DMA_SLAVE_BUSWIDTH_4_BYTES;
 772	dd->dma_lch_out.dma_conf.device_fc = false;
 773
 774	return 0;
 775
 776err_dma_out:
 777	dma_release_channel(dd->dma_lch_in.chan);
 778err_dma_in:
 779	dev_err(dd->dev, "no DMA channel available\n");
 780	return ret;
 781}
 782
 783static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
 784{
 785	dma_release_channel(dd->dma_lch_in.chan);
 786	dma_release_channel(dd->dma_lch_out.chan);
 787}
 788
 789static int atmel_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
 790			   unsigned int keylen)
 791{
 792	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 793	int err;
 794
 795	err = verify_skcipher_des_key(tfm, key);
 796	if (err)
 797		return err;
 798
 799	memcpy(ctx->key, key, keylen);
 800	ctx->keylen = keylen;
 801
 802	return 0;
 803}
 804
 805static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 806			   unsigned int keylen)
 807{
 808	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 809	int err;
 810
 811	err = verify_skcipher_des3_key(tfm, key);
 812	if (err)
 813		return err;
 814
 815	memcpy(ctx->key, key, keylen);
 816	ctx->keylen = keylen;
 817
 818	return 0;
 819}
 820
 821static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
 822{
 823	return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT);
 824}
 825
 826static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
 827{
 828	return atmel_tdes_crypt(req, TDES_FLAGS_ECB);
 829}
 830
 831static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
 832{
 833	return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT);
 834}
 835
 836static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
 837{
 838	return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
 839}
 840static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
 841{
 842	return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT);
 843}
 844
 845static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
 846{
 847	return atmel_tdes_crypt(req, TDES_FLAGS_CFB64);
 848}
 849
 850static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
 851{
 852	return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT);
 853}
 854
 855static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
 856{
 857	return atmel_tdes_crypt(req, TDES_FLAGS_CFB8);
 858}
 859
 860static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
 861{
 862	return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT);
 863}
 864
 865static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
 866{
 867	return atmel_tdes_crypt(req, TDES_FLAGS_CFB16);
 868}
 869
 870static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
 871{
 872	return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT);
 873}
 874
 875static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
 876{
 877	return atmel_tdes_crypt(req, TDES_FLAGS_CFB32);
 878}
 879
 880static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
 881{
 882	return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT);
 883}
 884
 885static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
 886{
 887	return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
 888}
 889
 890static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
 891{
 892	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 
 893
 894	ctx->dd = atmel_tdes_dev_alloc();
 895	if (!ctx->dd)
 
 
 896		return -ENODEV;
 897
 898	crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
 899
 900	return 0;
 901}
 902
 903static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
 904{
 905	alg->base.cra_priority = ATMEL_TDES_PRIORITY;
 906	alg->base.cra_flags = CRYPTO_ALG_ASYNC;
 907	alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx);
 908	alg->base.cra_module = THIS_MODULE;
 909
 910	alg->init = atmel_tdes_init_tfm;
 911}
 912
 913static struct skcipher_alg tdes_algs[] = {
 914{
 915	.base.cra_name		= "ecb(des)",
 916	.base.cra_driver_name	= "atmel-ecb-des",
 917	.base.cra_blocksize	= DES_BLOCK_SIZE,
 918	.base.cra_alignmask	= 0x7,
 919
 920	.min_keysize		= DES_KEY_SIZE,
 921	.max_keysize		= DES_KEY_SIZE,
 922	.setkey			= atmel_des_setkey,
 923	.encrypt		= atmel_tdes_ecb_encrypt,
 924	.decrypt		= atmel_tdes_ecb_decrypt,
 925},
 926{
 927	.base.cra_name		= "cbc(des)",
 928	.base.cra_driver_name	= "atmel-cbc-des",
 929	.base.cra_blocksize	= DES_BLOCK_SIZE,
 930	.base.cra_alignmask	= 0x7,
 931
 932	.min_keysize		= DES_KEY_SIZE,
 933	.max_keysize		= DES_KEY_SIZE,
 934	.ivsize			= DES_BLOCK_SIZE,
 935	.setkey			= atmel_des_setkey,
 936	.encrypt		= atmel_tdes_cbc_encrypt,
 937	.decrypt		= atmel_tdes_cbc_decrypt,
 938},
 939{
 940	.base.cra_name		= "cfb(des)",
 941	.base.cra_driver_name	= "atmel-cfb-des",
 942	.base.cra_blocksize	= DES_BLOCK_SIZE,
 943	.base.cra_alignmask	= 0x7,
 944
 945	.min_keysize		= DES_KEY_SIZE,
 946	.max_keysize		= DES_KEY_SIZE,
 947	.ivsize			= DES_BLOCK_SIZE,
 948	.setkey			= atmel_des_setkey,
 949	.encrypt		= atmel_tdes_cfb_encrypt,
 950	.decrypt		= atmel_tdes_cfb_decrypt,
 951},
 952{
 953	.base.cra_name		= "cfb8(des)",
 954	.base.cra_driver_name	= "atmel-cfb8-des",
 955	.base.cra_blocksize	= CFB8_BLOCK_SIZE,
 956	.base.cra_alignmask	= 0,
 957
 958	.min_keysize		= DES_KEY_SIZE,
 959	.max_keysize		= DES_KEY_SIZE,
 960	.ivsize			= DES_BLOCK_SIZE,
 961	.setkey			= atmel_des_setkey,
 962	.encrypt		= atmel_tdes_cfb8_encrypt,
 963	.decrypt		= atmel_tdes_cfb8_decrypt,
 964},
 965{
 966	.base.cra_name		= "cfb16(des)",
 967	.base.cra_driver_name	= "atmel-cfb16-des",
 968	.base.cra_blocksize	= CFB16_BLOCK_SIZE,
 969	.base.cra_alignmask	= 0x1,
 970
 971	.min_keysize		= DES_KEY_SIZE,
 972	.max_keysize		= DES_KEY_SIZE,
 973	.ivsize			= DES_BLOCK_SIZE,
 974	.setkey			= atmel_des_setkey,
 975	.encrypt		= atmel_tdes_cfb16_encrypt,
 976	.decrypt		= atmel_tdes_cfb16_decrypt,
 977},
 978{
 979	.base.cra_name		= "cfb32(des)",
 980	.base.cra_driver_name	= "atmel-cfb32-des",
 981	.base.cra_blocksize	= CFB32_BLOCK_SIZE,
 982	.base.cra_alignmask	= 0x3,
 983
 984	.min_keysize		= DES_KEY_SIZE,
 985	.max_keysize		= DES_KEY_SIZE,
 986	.ivsize			= DES_BLOCK_SIZE,
 987	.setkey			= atmel_des_setkey,
 988	.encrypt		= atmel_tdes_cfb32_encrypt,
 989	.decrypt		= atmel_tdes_cfb32_decrypt,
 990},
 991{
 992	.base.cra_name		= "ofb(des)",
 993	.base.cra_driver_name	= "atmel-ofb-des",
 994	.base.cra_blocksize	= 1,
 995	.base.cra_alignmask	= 0x7,
 996
 997	.min_keysize		= DES_KEY_SIZE,
 998	.max_keysize		= DES_KEY_SIZE,
 999	.ivsize			= DES_BLOCK_SIZE,
1000	.setkey			= atmel_des_setkey,
1001	.encrypt		= atmel_tdes_ofb_encrypt,
1002	.decrypt		= atmel_tdes_ofb_decrypt,
1003},
1004{
1005	.base.cra_name		= "ecb(des3_ede)",
1006	.base.cra_driver_name	= "atmel-ecb-tdes",
1007	.base.cra_blocksize	= DES_BLOCK_SIZE,
1008	.base.cra_alignmask	= 0x7,
1009
1010	.min_keysize		= DES3_EDE_KEY_SIZE,
1011	.max_keysize		= DES3_EDE_KEY_SIZE,
1012	.setkey			= atmel_tdes_setkey,
1013	.encrypt		= atmel_tdes_ecb_encrypt,
1014	.decrypt		= atmel_tdes_ecb_decrypt,
1015},
1016{
1017	.base.cra_name		= "cbc(des3_ede)",
1018	.base.cra_driver_name	= "atmel-cbc-tdes",
1019	.base.cra_blocksize	= DES_BLOCK_SIZE,
1020	.base.cra_alignmask	= 0x7,
1021
1022	.min_keysize		= DES3_EDE_KEY_SIZE,
1023	.max_keysize		= DES3_EDE_KEY_SIZE,
1024	.setkey			= atmel_tdes_setkey,
1025	.encrypt		= atmel_tdes_cbc_encrypt,
1026	.decrypt		= atmel_tdes_cbc_decrypt,
1027	.ivsize			= DES_BLOCK_SIZE,
1028},
1029{
1030	.base.cra_name		= "ofb(des3_ede)",
1031	.base.cra_driver_name	= "atmel-ofb-tdes",
1032	.base.cra_blocksize	= DES_BLOCK_SIZE,
1033	.base.cra_alignmask	= 0x7,
1034
1035	.min_keysize		= DES3_EDE_KEY_SIZE,
1036	.max_keysize		= DES3_EDE_KEY_SIZE,
1037	.setkey			= atmel_tdes_setkey,
1038	.encrypt		= atmel_tdes_ofb_encrypt,
1039	.decrypt		= atmel_tdes_ofb_decrypt,
1040	.ivsize			= DES_BLOCK_SIZE,
1041},
1042};
1043
1044static void atmel_tdes_queue_task(unsigned long data)
1045{
1046	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
1047
1048	atmel_tdes_handle_queue(dd, NULL);
1049}
1050
1051static void atmel_tdes_done_task(unsigned long data)
1052{
1053	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
1054	int err;
1055
1056	if (!(dd->flags & TDES_FLAGS_DMA))
1057		err = atmel_tdes_crypt_pdc_stop(dd);
1058	else
1059		err = atmel_tdes_crypt_dma_stop(dd);
1060
1061	if (dd->total && !err) {
1062		if (dd->flags & TDES_FLAGS_FAST) {
1063			dd->in_sg = sg_next(dd->in_sg);
1064			dd->out_sg = sg_next(dd->out_sg);
1065			if (!dd->in_sg || !dd->out_sg)
1066				err = -EINVAL;
1067		}
1068		if (!err)
1069			err = atmel_tdes_crypt_start(dd);
1070		if (!err)
1071			return; /* DMA started. Not fininishing. */
1072	}
1073
1074	atmel_tdes_finish_req(dd, err);
1075	atmel_tdes_handle_queue(dd, NULL);
1076}
1077
1078static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
1079{
1080	struct atmel_tdes_dev *tdes_dd = dev_id;
1081	u32 reg;
1082
1083	reg = atmel_tdes_read(tdes_dd, TDES_ISR);
1084	if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) {
1085		atmel_tdes_write(tdes_dd, TDES_IDR, reg);
1086		if (TDES_FLAGS_BUSY & tdes_dd->flags)
1087			tasklet_schedule(&tdes_dd->done_task);
1088		else
1089			dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n");
1090		return IRQ_HANDLED;
1091	}
1092
1093	return IRQ_NONE;
1094}
1095
1096static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
1097{
1098	int i;
1099
1100	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
1101		crypto_unregister_skcipher(&tdes_algs[i]);
1102}
1103
1104static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
1105{
1106	int err, i, j;
1107
1108	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
1109		atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
1110
1111		err = crypto_register_skcipher(&tdes_algs[i]);
1112		if (err)
1113			goto err_tdes_algs;
1114	}
1115
1116	return 0;
1117
1118err_tdes_algs:
1119	for (j = 0; j < i; j++)
1120		crypto_unregister_skcipher(&tdes_algs[j]);
1121
1122	return err;
1123}
1124
1125static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
1126{
1127
1128	dd->caps.has_dma = 0;
1129	dd->caps.has_cfb_3keys = 0;
1130
1131	/* keep only major version number */
1132	switch (dd->hw_version & 0xf00) {
1133	case 0x800:
1134	case 0x700:
1135		dd->caps.has_dma = 1;
1136		dd->caps.has_cfb_3keys = 1;
1137		break;
1138	case 0x600:
1139		break;
1140	default:
1141		dev_warn(dd->dev,
1142				"Unmanaged tdes version, set minimum capabilities\n");
1143		break;
1144	}
1145}
1146
1147#if defined(CONFIG_OF)
1148static const struct of_device_id atmel_tdes_dt_ids[] = {
1149	{ .compatible = "atmel,at91sam9g46-tdes" },
1150	{ /* sentinel */ }
1151};
1152MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
1153#endif
1154
1155static int atmel_tdes_probe(struct platform_device *pdev)
1156{
1157	struct atmel_tdes_dev *tdes_dd;
1158	struct device *dev = &pdev->dev;
1159	struct resource *tdes_res;
1160	int err;
1161
1162	tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
1163	if (!tdes_dd)
1164		return -ENOMEM;
1165
1166	tdes_dd->dev = dev;
1167
1168	platform_set_drvdata(pdev, tdes_dd);
1169
1170	INIT_LIST_HEAD(&tdes_dd->list);
1171	spin_lock_init(&tdes_dd->lock);
1172
1173	tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
1174					(unsigned long)tdes_dd);
1175	tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task,
1176					(unsigned long)tdes_dd);
1177
1178	crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
1179
1180	/* Get the base address */
1181	tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1182	if (!tdes_res) {
1183		dev_err(dev, "no MEM resource info\n");
1184		err = -ENODEV;
1185		goto err_tasklet_kill;
1186	}
1187	tdes_dd->phys_base = tdes_res->start;
1188
1189	/* Get the IRQ */
1190	tdes_dd->irq = platform_get_irq(pdev,  0);
1191	if (tdes_dd->irq < 0) {
1192		err = tdes_dd->irq;
1193		goto err_tasklet_kill;
1194	}
1195
1196	err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
1197			       IRQF_SHARED, "atmel-tdes", tdes_dd);
1198	if (err) {
1199		dev_err(dev, "unable to request tdes irq.\n");
1200		goto err_tasklet_kill;
1201	}
1202
1203	/* Initializing the clock */
1204	tdes_dd->iclk = devm_clk_get(&pdev->dev, "tdes_clk");
1205	if (IS_ERR(tdes_dd->iclk)) {
1206		dev_err(dev, "clock initialization failed.\n");
1207		err = PTR_ERR(tdes_dd->iclk);
1208		goto err_tasklet_kill;
1209	}
1210
1211	tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
1212	if (IS_ERR(tdes_dd->io_base)) {
 
1213		err = PTR_ERR(tdes_dd->io_base);
1214		goto err_tasklet_kill;
1215	}
1216
1217	err = atmel_tdes_hw_version_init(tdes_dd);
1218	if (err)
1219		goto err_tasklet_kill;
1220
1221	atmel_tdes_get_cap(tdes_dd);
1222
1223	err = atmel_tdes_buff_init(tdes_dd);
1224	if (err)
1225		goto err_tasklet_kill;
1226
1227	if (tdes_dd->caps.has_dma) {
1228		err = atmel_tdes_dma_init(tdes_dd);
1229		if (err)
1230			goto err_buff_cleanup;
1231
1232		dev_info(dev, "using %s, %s for DMA transfers\n",
1233				dma_chan_name(tdes_dd->dma_lch_in.chan),
1234				dma_chan_name(tdes_dd->dma_lch_out.chan));
1235	}
1236
1237	spin_lock(&atmel_tdes.lock);
1238	list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
1239	spin_unlock(&atmel_tdes.lock);
1240
1241	err = atmel_tdes_register_algs(tdes_dd);
1242	if (err)
1243		goto err_algs;
1244
1245	dev_info(dev, "Atmel DES/TDES\n");
1246
1247	return 0;
1248
1249err_algs:
1250	spin_lock(&atmel_tdes.lock);
1251	list_del(&tdes_dd->list);
1252	spin_unlock(&atmel_tdes.lock);
1253	if (tdes_dd->caps.has_dma)
1254		atmel_tdes_dma_cleanup(tdes_dd);
1255err_buff_cleanup:
1256	atmel_tdes_buff_cleanup(tdes_dd);
1257err_tasklet_kill:
1258	tasklet_kill(&tdes_dd->done_task);
1259	tasklet_kill(&tdes_dd->queue_task);
1260
1261	return err;
1262}
1263
1264static int atmel_tdes_remove(struct platform_device *pdev)
1265{
1266	struct atmel_tdes_dev *tdes_dd = platform_get_drvdata(pdev);
1267
 
 
 
1268	spin_lock(&atmel_tdes.lock);
1269	list_del(&tdes_dd->list);
1270	spin_unlock(&atmel_tdes.lock);
1271
1272	atmel_tdes_unregister_algs(tdes_dd);
1273
1274	tasklet_kill(&tdes_dd->done_task);
1275	tasklet_kill(&tdes_dd->queue_task);
1276
1277	if (tdes_dd->caps.has_dma)
1278		atmel_tdes_dma_cleanup(tdes_dd);
1279
1280	atmel_tdes_buff_cleanup(tdes_dd);
1281
1282	return 0;
1283}
1284
1285static struct platform_driver atmel_tdes_driver = {
1286	.probe		= atmel_tdes_probe,
1287	.remove		= atmel_tdes_remove,
1288	.driver		= {
1289		.name	= "atmel_tdes",
1290		.of_match_table = of_match_ptr(atmel_tdes_dt_ids),
1291	},
1292};
1293
1294module_platform_driver(atmel_tdes_driver);
1295
1296MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
1297MODULE_LICENSE("GPL v2");
1298MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");