Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for ATMEL DES/TDES HW acceleration.
   6 *
   7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
   8 * Author: Nicolas Royer <nicolas@eukrea.com>
   9 *
  10 * Some ideas are from omap-aes.c drivers.
  11 */
  12
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/err.h>
  18#include <linux/clk.h>
  19#include <linux/io.h>
  20#include <linux/hw_random.h>
  21#include <linux/platform_device.h>
  22
  23#include <linux/device.h>
  24#include <linux/dmaengine.h>
  25#include <linux/init.h>
  26#include <linux/errno.h>
  27#include <linux/interrupt.h>
  28#include <linux/irq.h>
  29#include <linux/scatterlist.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/mod_devicetable.h>
  32#include <linux/delay.h>
  33#include <linux/crypto.h>
  34#include <crypto/scatterwalk.h>
  35#include <crypto/algapi.h>
  36#include <crypto/internal/des.h>
  37#include <crypto/internal/skcipher.h>
  38#include "atmel-tdes-regs.h"
  39
  40#define ATMEL_TDES_PRIORITY	300
  41
  42/* TDES flags  */
  43/* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
  44#define TDES_FLAGS_ENCRYPT	TDES_MR_CYPHER_ENC
  45#define TDES_FLAGS_OPMODE_MASK	(TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
  46#define TDES_FLAGS_ECB		TDES_MR_OPMOD_ECB
  47#define TDES_FLAGS_CBC		TDES_MR_OPMOD_CBC
 
 
 
 
 
  48
  49#define TDES_FLAGS_MODE_MASK	(TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
  50
  51#define TDES_FLAGS_INIT		BIT(3)
  52#define TDES_FLAGS_FAST		BIT(4)
  53#define TDES_FLAGS_BUSY		BIT(5)
  54#define TDES_FLAGS_DMA		BIT(6)
  55
  56#define ATMEL_TDES_QUEUE_LENGTH	50
  57
 
 
 
 
  58struct atmel_tdes_caps {
  59	bool	has_dma;
 
  60};
  61
  62struct atmel_tdes_dev;
  63
  64struct atmel_tdes_ctx {
  65	struct atmel_tdes_dev *dd;
  66
  67	int		keylen;
  68	u32		key[DES3_EDE_KEY_SIZE / sizeof(u32)];
  69	unsigned long	flags;
  70
  71	u16		block_size;
  72};
  73
  74struct atmel_tdes_reqctx {
  75	unsigned long mode;
  76	u8 lastc[DES_BLOCK_SIZE];
  77};
  78
  79struct atmel_tdes_dma {
  80	struct dma_chan			*chan;
  81	struct dma_slave_config dma_conf;
  82};
  83
  84struct atmel_tdes_dev {
  85	struct list_head	list;
  86	unsigned long		phys_base;
  87	void __iomem		*io_base;
  88
  89	struct atmel_tdes_ctx	*ctx;
  90	struct device		*dev;
  91	struct clk			*iclk;
  92	int					irq;
  93
  94	unsigned long		flags;
  95
  96	spinlock_t		lock;
  97	struct crypto_queue	queue;
  98
  99	struct tasklet_struct	done_task;
 100	struct tasklet_struct	queue_task;
 101
 102	struct skcipher_request	*req;
 103	size_t				total;
 104
 105	struct scatterlist	*in_sg;
 106	unsigned int		nb_in_sg;
 107	size_t				in_offset;
 108	struct scatterlist	*out_sg;
 109	unsigned int		nb_out_sg;
 110	size_t				out_offset;
 111
 112	size_t	buflen;
 113	size_t	dma_size;
 114
 115	void	*buf_in;
 116	int		dma_in;
 117	dma_addr_t	dma_addr_in;
 118	struct atmel_tdes_dma	dma_lch_in;
 119
 120	void	*buf_out;
 121	int		dma_out;
 122	dma_addr_t	dma_addr_out;
 123	struct atmel_tdes_dma	dma_lch_out;
 124
 125	struct atmel_tdes_caps	caps;
 126
 127	u32	hw_version;
 128};
 129
 130struct atmel_tdes_drv {
 131	struct list_head	dev_list;
 132	spinlock_t		lock;
 133};
 134
 135static struct atmel_tdes_drv atmel_tdes = {
 136	.dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list),
 137	.lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock),
 138};
 139
 140static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
 141			void *buf, size_t buflen, size_t total, int out)
 142{
 143	size_t count, off = 0;
 144
 145	while (buflen && total) {
 146		count = min((*sg)->length - *offset, total);
 147		count = min(count, buflen);
 148
 149		if (!count)
 150			return off;
 151
 152		scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
 153
 154		off += count;
 155		buflen -= count;
 156		*offset += count;
 157		total -= count;
 158
 159		if (*offset == (*sg)->length) {
 160			*sg = sg_next(*sg);
 161			if (*sg)
 162				*offset = 0;
 163			else
 164				total = 0;
 165		}
 166	}
 167
 168	return off;
 169}
 170
 171static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
 172{
 173	return readl_relaxed(dd->io_base + offset);
 174}
 175
 176static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
 177					u32 offset, u32 value)
 178{
 179	writel_relaxed(value, dd->io_base + offset);
 180}
 181
 182static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
 183			       const u32 *value, int count)
 184{
 185	for (; count--; value++, offset += 4)
 186		atmel_tdes_write(dd, offset, *value);
 187}
 188
 189static struct atmel_tdes_dev *atmel_tdes_dev_alloc(void)
 190{
 191	struct atmel_tdes_dev *tdes_dd;
 192
 193	spin_lock_bh(&atmel_tdes.lock);
 194	/* One TDES IP per SoC. */
 195	tdes_dd = list_first_entry_or_null(&atmel_tdes.dev_list,
 196					   struct atmel_tdes_dev, list);
 197	spin_unlock_bh(&atmel_tdes.lock);
 198	return tdes_dd;
 199}
 200
 201static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
 202{
 203	int err;
 204
 205	err = clk_prepare_enable(dd->iclk);
 206	if (err)
 207		return err;
 208
 209	if (!(dd->flags & TDES_FLAGS_INIT)) {
 210		atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
 211		dd->flags |= TDES_FLAGS_INIT;
 212	}
 213
 214	return 0;
 215}
 216
 217static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
 218{
 219	return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
 220}
 221
 222static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
 223{
 224	int err;
 225
 226	err = atmel_tdes_hw_init(dd);
 227	if (err)
 228		return err;
 229
 230	dd->hw_version = atmel_tdes_get_version(dd);
 231
 232	dev_info(dd->dev,
 233			"version: 0x%x\n", dd->hw_version);
 234
 235	clk_disable_unprepare(dd->iclk);
 236
 237	return 0;
 238}
 239
 240static void atmel_tdes_dma_callback(void *data)
 241{
 242	struct atmel_tdes_dev *dd = data;
 243
 244	/* dma_lch_out - completed */
 245	tasklet_schedule(&dd->done_task);
 246}
 247
 248static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
 249{
 250	int err;
 251	u32 valmr = TDES_MR_SMOD_PDC;
 252
 253	err = atmel_tdes_hw_init(dd);
 254
 255	if (err)
 256		return err;
 257
 258	if (!dd->caps.has_dma)
 259		atmel_tdes_write(dd, TDES_PTCR,
 260			TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS);
 261
 262	/* MR register must be set before IV registers */
 263	if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
 264		valmr |= TDES_MR_KEYMOD_3KEY;
 265		valmr |= TDES_MR_TDESMOD_TDES;
 266	} else if (dd->ctx->keylen > DES_KEY_SIZE) {
 267		valmr |= TDES_MR_KEYMOD_2KEY;
 268		valmr |= TDES_MR_TDESMOD_TDES;
 269	} else {
 270		valmr |= TDES_MR_TDESMOD_DES;
 271	}
 272
 273	valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
 274
 275	atmel_tdes_write(dd, TDES_MR, valmr);
 276
 277	atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
 278						dd->ctx->keylen >> 2);
 279
 280	if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
 281		atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
 282
 283	return 0;
 284}
 285
 286static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
 287{
 288	int err = 0;
 289	size_t count;
 290
 291	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
 292
 293	if (dd->flags & TDES_FLAGS_FAST) {
 294		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
 295		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 296	} else {
 297		dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
 298					   dd->dma_size, DMA_FROM_DEVICE);
 299
 300		/* copy data */
 301		count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
 302				dd->buf_out, dd->buflen, dd->dma_size, 1);
 303		if (count != dd->dma_size) {
 304			err = -EINVAL;
 305			dev_dbg(dd->dev, "not all data converted: %zu\n", count);
 306		}
 307	}
 308
 309	return err;
 310}
 311
 312static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
 313{
 314	int err = -ENOMEM;
 315
 316	dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
 317	dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
 318	dd->buflen = PAGE_SIZE;
 319	dd->buflen &= ~(DES_BLOCK_SIZE - 1);
 320
 321	if (!dd->buf_in || !dd->buf_out) {
 322		dev_dbg(dd->dev, "unable to alloc pages.\n");
 323		goto err_alloc;
 324	}
 325
 326	/* MAP here */
 327	dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
 328					dd->buflen, DMA_TO_DEVICE);
 329	err = dma_mapping_error(dd->dev, dd->dma_addr_in);
 330	if (err) {
 331		dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
 332		goto err_map_in;
 333	}
 334
 335	dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
 336					dd->buflen, DMA_FROM_DEVICE);
 337	err = dma_mapping_error(dd->dev, dd->dma_addr_out);
 338	if (err) {
 339		dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
 340		goto err_map_out;
 341	}
 342
 343	return 0;
 344
 345err_map_out:
 346	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
 347		DMA_TO_DEVICE);
 348err_map_in:
 349err_alloc:
 350	free_page((unsigned long)dd->buf_out);
 351	free_page((unsigned long)dd->buf_in);
 352	return err;
 353}
 354
 355static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
 356{
 357	dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
 358			 DMA_FROM_DEVICE);
 359	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
 360		DMA_TO_DEVICE);
 361	free_page((unsigned long)dd->buf_out);
 362	free_page((unsigned long)dd->buf_in);
 363}
 364
 365static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
 366				dma_addr_t dma_addr_in,
 367				dma_addr_t dma_addr_out, int length)
 368{
 
 369	int len32;
 370
 371	dd->dma_size = length;
 372
 373	if (!(dd->flags & TDES_FLAGS_FAST)) {
 374		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
 375					   DMA_TO_DEVICE);
 376	}
 377
 378	len32 = DIV_ROUND_UP(length, sizeof(u32));
 
 
 
 
 
 
 
 
 
 
 
 
 379
 380	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
 381	atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
 382	atmel_tdes_write(dd, TDES_TCR, len32);
 383	atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
 384	atmel_tdes_write(dd, TDES_RCR, len32);
 385
 386	/* Enable Interrupt */
 387	atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
 388
 389	/* Start DMA transfer */
 390	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
 391
 392	return 0;
 393}
 394
 395static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
 396				dma_addr_t dma_addr_in,
 397				dma_addr_t dma_addr_out, int length)
 398{
 
 399	struct scatterlist sg[2];
 400	struct dma_async_tx_descriptor	*in_desc, *out_desc;
 401	enum dma_slave_buswidth addr_width;
 402
 403	dd->dma_size = length;
 404
 405	if (!(dd->flags & TDES_FLAGS_FAST)) {
 406		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
 407					   DMA_TO_DEVICE);
 408	}
 409
 410	addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 
 
 
 
 
 
 
 
 
 
 
 
 411
 412	dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
 413	dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
 414
 415	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
 416	dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
 417
 418	dd->flags |= TDES_FLAGS_DMA;
 419
 420	sg_init_table(&sg[0], 1);
 421	sg_dma_address(&sg[0]) = dma_addr_in;
 422	sg_dma_len(&sg[0]) = length;
 423
 424	sg_init_table(&sg[1], 1);
 425	sg_dma_address(&sg[1]) = dma_addr_out;
 426	sg_dma_len(&sg[1]) = length;
 427
 428	in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
 429				1, DMA_MEM_TO_DEV,
 430				DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
 431	if (!in_desc)
 432		return -EINVAL;
 433
 434	out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
 435				1, DMA_DEV_TO_MEM,
 436				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 437	if (!out_desc)
 438		return -EINVAL;
 439
 440	out_desc->callback = atmel_tdes_dma_callback;
 441	out_desc->callback_param = dd;
 442
 443	dmaengine_submit(out_desc);
 444	dma_async_issue_pending(dd->dma_lch_out.chan);
 445
 446	dmaengine_submit(in_desc);
 447	dma_async_issue_pending(dd->dma_lch_in.chan);
 448
 449	return 0;
 450}
 451
 452static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
 453{
 454	int err, fast = 0, in, out;
 455	size_t count;
 456	dma_addr_t addr_in, addr_out;
 457
 458	if ((!dd->in_offset) && (!dd->out_offset)) {
 459		/* check for alignment */
 460		in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
 461			IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
 462		out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
 463			IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
 464		fast = in && out;
 465
 466		if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
 467			fast = 0;
 468	}
 469
 470
 471	if (fast)  {
 472		count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
 473		count = min_t(size_t, count, sg_dma_len(dd->out_sg));
 474
 475		err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 476		if (!err) {
 477			dev_dbg(dd->dev, "dma_map_sg() error\n");
 478			return -EINVAL;
 479		}
 480
 481		err = dma_map_sg(dd->dev, dd->out_sg, 1,
 482				DMA_FROM_DEVICE);
 483		if (!err) {
 484			dev_dbg(dd->dev, "dma_map_sg() error\n");
 485			dma_unmap_sg(dd->dev, dd->in_sg, 1,
 486				DMA_TO_DEVICE);
 487			return -EINVAL;
 488		}
 489
 490		addr_in = sg_dma_address(dd->in_sg);
 491		addr_out = sg_dma_address(dd->out_sg);
 492
 493		dd->flags |= TDES_FLAGS_FAST;
 494
 495	} else {
 496		/* use cache buffers */
 497		count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
 498				dd->buf_in, dd->buflen, dd->total, 0);
 499
 500		addr_in = dd->dma_addr_in;
 501		addr_out = dd->dma_addr_out;
 502
 503		dd->flags &= ~TDES_FLAGS_FAST;
 504	}
 505
 506	dd->total -= count;
 507
 508	if (dd->caps.has_dma)
 509		err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count);
 510	else
 511		err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count);
 512
 513	if (err && (dd->flags & TDES_FLAGS_FAST)) {
 514		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 515		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
 516	}
 517
 518	return err;
 519}
 520
 521static void
 522atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
 523{
 524	struct skcipher_request *req = dd->req;
 525	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 526	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 527	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 528
 529	if (req->cryptlen < ivsize)
 530		return;
 531
 532	if (rctx->mode & TDES_FLAGS_ENCRYPT)
 533		scatterwalk_map_and_copy(req->iv, req->dst,
 534					 req->cryptlen - ivsize, ivsize, 0);
 535	else
 536		memcpy(req->iv, rctx->lastc, ivsize);
 537
 
 
 
 
 
 538}
 539
 540static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
 541{
 542	struct skcipher_request *req = dd->req;
 543	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 544
 545	clk_disable_unprepare(dd->iclk);
 546
 547	dd->flags &= ~TDES_FLAGS_BUSY;
 548
 549	if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
 550		atmel_tdes_set_iv_as_last_ciphertext_block(dd);
 551
 552	skcipher_request_complete(req, err);
 553}
 554
 555static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
 556			       struct skcipher_request *req)
 557{
 558	struct crypto_async_request *async_req, *backlog;
 559	struct atmel_tdes_ctx *ctx;
 560	struct atmel_tdes_reqctx *rctx;
 561	unsigned long flags;
 562	int err, ret = 0;
 563
 564	spin_lock_irqsave(&dd->lock, flags);
 565	if (req)
 566		ret = crypto_enqueue_request(&dd->queue, &req->base);
 567	if (dd->flags & TDES_FLAGS_BUSY) {
 568		spin_unlock_irqrestore(&dd->lock, flags);
 569		return ret;
 570	}
 571	backlog = crypto_get_backlog(&dd->queue);
 572	async_req = crypto_dequeue_request(&dd->queue);
 573	if (async_req)
 574		dd->flags |= TDES_FLAGS_BUSY;
 575	spin_unlock_irqrestore(&dd->lock, flags);
 576
 577	if (!async_req)
 578		return ret;
 579
 580	if (backlog)
 581		crypto_request_complete(backlog, -EINPROGRESS);
 582
 583	req = skcipher_request_cast(async_req);
 584
 585	/* assign new request to device */
 586	dd->req = req;
 587	dd->total = req->cryptlen;
 588	dd->in_offset = 0;
 589	dd->in_sg = req->src;
 590	dd->out_offset = 0;
 591	dd->out_sg = req->dst;
 592
 593	rctx = skcipher_request_ctx(req);
 594	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 595	rctx->mode &= TDES_FLAGS_MODE_MASK;
 596	dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
 597	dd->ctx = ctx;
 598
 599	err = atmel_tdes_write_ctrl(dd);
 600	if (!err)
 601		err = atmel_tdes_crypt_start(dd);
 602	if (err) {
 603		/* des_task will not finish it, so do it here */
 604		atmel_tdes_finish_req(dd, err);
 605		tasklet_schedule(&dd->queue_task);
 606	}
 607
 608	return ret;
 609}
 610
 611static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
 612{
 613	int err = -EINVAL;
 614	size_t count;
 615
 616	if (dd->flags & TDES_FLAGS_DMA) {
 617		err = 0;
 618		if  (dd->flags & TDES_FLAGS_FAST) {
 619			dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
 620			dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 621		} else {
 622			dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
 623				dd->dma_size, DMA_FROM_DEVICE);
 624
 625			/* copy data */
 626			count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
 627				dd->buf_out, dd->buflen, dd->dma_size, 1);
 628			if (count != dd->dma_size) {
 629				err = -EINVAL;
 630				dev_dbg(dd->dev, "not all data converted: %zu\n", count);
 631			}
 632		}
 633	}
 634	return err;
 635}
 636
 637static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
 638{
 639	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 640	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
 641	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 642	struct device *dev = ctx->dd->dev;
 643
 644	if (!req->cryptlen)
 645		return 0;
 646
 647	if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
 648		dev_dbg(dev, "request size is not exact amount of DES blocks\n");
 649		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 650	}
 651	ctx->block_size = DES_BLOCK_SIZE;
 652
 653	rctx->mode = mode;
 654
 655	if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
 656	    !(mode & TDES_FLAGS_ENCRYPT)) {
 657		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 658
 659		if (req->cryptlen >= ivsize)
 660			scatterwalk_map_and_copy(rctx->lastc, req->src,
 661						 req->cryptlen - ivsize,
 662						 ivsize, 0);
 663	}
 664
 665	return atmel_tdes_handle_queue(ctx->dd, req);
 666}
 667
 668static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
 669{
 670	int ret;
 671
 672	/* Try to grab 2 DMA channels */
 673	dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
 674	if (IS_ERR(dd->dma_lch_in.chan)) {
 675		ret = PTR_ERR(dd->dma_lch_in.chan);
 676		goto err_dma_in;
 677	}
 678
 679	dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
 680		TDES_IDATA1R;
 681	dd->dma_lch_in.dma_conf.src_maxburst = 1;
 682	dd->dma_lch_in.dma_conf.src_addr_width =
 683		DMA_SLAVE_BUSWIDTH_4_BYTES;
 684	dd->dma_lch_in.dma_conf.dst_maxburst = 1;
 685	dd->dma_lch_in.dma_conf.dst_addr_width =
 686		DMA_SLAVE_BUSWIDTH_4_BYTES;
 687	dd->dma_lch_in.dma_conf.device_fc = false;
 688
 689	dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx");
 690	if (IS_ERR(dd->dma_lch_out.chan)) {
 691		ret = PTR_ERR(dd->dma_lch_out.chan);
 692		goto err_dma_out;
 693	}
 694
 695	dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
 696		TDES_ODATA1R;
 697	dd->dma_lch_out.dma_conf.src_maxburst = 1;
 698	dd->dma_lch_out.dma_conf.src_addr_width =
 699		DMA_SLAVE_BUSWIDTH_4_BYTES;
 700	dd->dma_lch_out.dma_conf.dst_maxburst = 1;
 701	dd->dma_lch_out.dma_conf.dst_addr_width =
 702		DMA_SLAVE_BUSWIDTH_4_BYTES;
 703	dd->dma_lch_out.dma_conf.device_fc = false;
 704
 705	return 0;
 706
 707err_dma_out:
 708	dma_release_channel(dd->dma_lch_in.chan);
 709err_dma_in:
 710	dev_err(dd->dev, "no DMA channel available\n");
 711	return ret;
 712}
 713
 714static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
 715{
 716	dma_release_channel(dd->dma_lch_in.chan);
 717	dma_release_channel(dd->dma_lch_out.chan);
 718}
 719
 720static int atmel_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
 721			   unsigned int keylen)
 722{
 723	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 724	int err;
 725
 726	err = verify_skcipher_des_key(tfm, key);
 727	if (err)
 728		return err;
 729
 730	memcpy(ctx->key, key, keylen);
 731	ctx->keylen = keylen;
 732
 733	return 0;
 734}
 735
 736static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 737			   unsigned int keylen)
 738{
 739	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 740	int err;
 741
 742	err = verify_skcipher_des3_key(tfm, key);
 743	if (err)
 744		return err;
 745
 746	memcpy(ctx->key, key, keylen);
 747	ctx->keylen = keylen;
 748
 749	return 0;
 750}
 751
 752static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
 753{
 754	return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT);
 755}
 756
 757static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
 758{
 759	return atmel_tdes_crypt(req, TDES_FLAGS_ECB);
 760}
 761
 762static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
 763{
 764	return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT);
 765}
 766
 767static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
 768{
 769	return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
 770}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 771
 772static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
 773{
 774	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 775
 776	ctx->dd = atmel_tdes_dev_alloc();
 777	if (!ctx->dd)
 778		return -ENODEV;
 779
 780	crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
 781
 782	return 0;
 783}
 784
 785static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
 786{
 787	alg->base.cra_priority = ATMEL_TDES_PRIORITY;
 788	alg->base.cra_flags = CRYPTO_ALG_ASYNC;
 789	alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx);
 790	alg->base.cra_module = THIS_MODULE;
 791
 792	alg->init = atmel_tdes_init_tfm;
 793}
 794
 795static struct skcipher_alg tdes_algs[] = {
 796{
 797	.base.cra_name		= "ecb(des)",
 798	.base.cra_driver_name	= "atmel-ecb-des",
 799	.base.cra_blocksize	= DES_BLOCK_SIZE,
 800	.base.cra_alignmask	= 0x7,
 801
 802	.min_keysize		= DES_KEY_SIZE,
 803	.max_keysize		= DES_KEY_SIZE,
 804	.setkey			= atmel_des_setkey,
 805	.encrypt		= atmel_tdes_ecb_encrypt,
 806	.decrypt		= atmel_tdes_ecb_decrypt,
 807},
 808{
 809	.base.cra_name		= "cbc(des)",
 810	.base.cra_driver_name	= "atmel-cbc-des",
 811	.base.cra_blocksize	= DES_BLOCK_SIZE,
 812	.base.cra_alignmask	= 0x7,
 813
 814	.min_keysize		= DES_KEY_SIZE,
 815	.max_keysize		= DES_KEY_SIZE,
 816	.ivsize			= DES_BLOCK_SIZE,
 817	.setkey			= atmel_des_setkey,
 818	.encrypt		= atmel_tdes_cbc_encrypt,
 819	.decrypt		= atmel_tdes_cbc_decrypt,
 820},
 821{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 822	.base.cra_name		= "ecb(des3_ede)",
 823	.base.cra_driver_name	= "atmel-ecb-tdes",
 824	.base.cra_blocksize	= DES_BLOCK_SIZE,
 825	.base.cra_alignmask	= 0x7,
 826
 827	.min_keysize		= DES3_EDE_KEY_SIZE,
 828	.max_keysize		= DES3_EDE_KEY_SIZE,
 829	.setkey			= atmel_tdes_setkey,
 830	.encrypt		= atmel_tdes_ecb_encrypt,
 831	.decrypt		= atmel_tdes_ecb_decrypt,
 832},
 833{
 834	.base.cra_name		= "cbc(des3_ede)",
 835	.base.cra_driver_name	= "atmel-cbc-tdes",
 836	.base.cra_blocksize	= DES_BLOCK_SIZE,
 837	.base.cra_alignmask	= 0x7,
 838
 839	.min_keysize		= DES3_EDE_KEY_SIZE,
 840	.max_keysize		= DES3_EDE_KEY_SIZE,
 841	.setkey			= atmel_tdes_setkey,
 842	.encrypt		= atmel_tdes_cbc_encrypt,
 843	.decrypt		= atmel_tdes_cbc_decrypt,
 844	.ivsize			= DES_BLOCK_SIZE,
 845},
 
 
 
 
 
 
 
 
 
 
 
 
 
 846};
 847
 848static void atmel_tdes_queue_task(unsigned long data)
 849{
 850	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
 851
 852	atmel_tdes_handle_queue(dd, NULL);
 853}
 854
 855static void atmel_tdes_done_task(unsigned long data)
 856{
 857	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
 858	int err;
 859
 860	if (!(dd->flags & TDES_FLAGS_DMA))
 861		err = atmel_tdes_crypt_pdc_stop(dd);
 862	else
 863		err = atmel_tdes_crypt_dma_stop(dd);
 864
 865	if (dd->total && !err) {
 866		if (dd->flags & TDES_FLAGS_FAST) {
 867			dd->in_sg = sg_next(dd->in_sg);
 868			dd->out_sg = sg_next(dd->out_sg);
 869			if (!dd->in_sg || !dd->out_sg)
 870				err = -EINVAL;
 871		}
 872		if (!err)
 873			err = atmel_tdes_crypt_start(dd);
 874		if (!err)
 875			return; /* DMA started. Not fininishing. */
 876	}
 877
 878	atmel_tdes_finish_req(dd, err);
 879	atmel_tdes_handle_queue(dd, NULL);
 880}
 881
 882static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
 883{
 884	struct atmel_tdes_dev *tdes_dd = dev_id;
 885	u32 reg;
 886
 887	reg = atmel_tdes_read(tdes_dd, TDES_ISR);
 888	if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) {
 889		atmel_tdes_write(tdes_dd, TDES_IDR, reg);
 890		if (TDES_FLAGS_BUSY & tdes_dd->flags)
 891			tasklet_schedule(&tdes_dd->done_task);
 892		else
 893			dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n");
 894		return IRQ_HANDLED;
 895	}
 896
 897	return IRQ_NONE;
 898}
 899
 900static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
 901{
 902	int i;
 903
 904	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
 905		crypto_unregister_skcipher(&tdes_algs[i]);
 906}
 907
 908static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
 909{
 910	int err, i, j;
 911
 912	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
 913		atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
 914
 915		err = crypto_register_skcipher(&tdes_algs[i]);
 916		if (err)
 917			goto err_tdes_algs;
 918	}
 919
 920	return 0;
 921
 922err_tdes_algs:
 923	for (j = 0; j < i; j++)
 924		crypto_unregister_skcipher(&tdes_algs[j]);
 925
 926	return err;
 927}
 928
 929static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
 930{
 931
 932	dd->caps.has_dma = 0;
 
 933
 934	/* keep only major version number */
 935	switch (dd->hw_version & 0xf00) {
 936	case 0x800:
 937	case 0x700:
 938		dd->caps.has_dma = 1;
 
 939		break;
 940	case 0x600:
 941		break;
 942	default:
 943		dev_warn(dd->dev,
 944				"Unmanaged tdes version, set minimum capabilities\n");
 945		break;
 946	}
 947}
 948
 
 949static const struct of_device_id atmel_tdes_dt_ids[] = {
 950	{ .compatible = "atmel,at91sam9g46-tdes" },
 951	{ /* sentinel */ }
 952};
 953MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
 
 954
 955static int atmel_tdes_probe(struct platform_device *pdev)
 956{
 957	struct atmel_tdes_dev *tdes_dd;
 958	struct device *dev = &pdev->dev;
 959	struct resource *tdes_res;
 960	int err;
 961
 962	tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
 963	if (!tdes_dd)
 964		return -ENOMEM;
 965
 966	tdes_dd->dev = dev;
 967
 968	platform_set_drvdata(pdev, tdes_dd);
 969
 970	INIT_LIST_HEAD(&tdes_dd->list);
 971	spin_lock_init(&tdes_dd->lock);
 972
 973	tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
 974					(unsigned long)tdes_dd);
 975	tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task,
 976					(unsigned long)tdes_dd);
 977
 978	crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
 979
 980	tdes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &tdes_res);
 981	if (IS_ERR(tdes_dd->io_base)) {
 982		err = PTR_ERR(tdes_dd->io_base);
 
 
 983		goto err_tasklet_kill;
 984	}
 985	tdes_dd->phys_base = tdes_res->start;
 986
 987	/* Get the IRQ */
 988	tdes_dd->irq = platform_get_irq(pdev,  0);
 989	if (tdes_dd->irq < 0) {
 990		err = tdes_dd->irq;
 991		goto err_tasklet_kill;
 992	}
 993
 994	err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
 995			       IRQF_SHARED, "atmel-tdes", tdes_dd);
 996	if (err) {
 997		dev_err(dev, "unable to request tdes irq.\n");
 998		goto err_tasklet_kill;
 999	}
1000
1001	/* Initializing the clock */
1002	tdes_dd->iclk = devm_clk_get(&pdev->dev, "tdes_clk");
1003	if (IS_ERR(tdes_dd->iclk)) {
1004		dev_err(dev, "clock initialization failed.\n");
1005		err = PTR_ERR(tdes_dd->iclk);
1006		goto err_tasklet_kill;
1007	}
1008
 
 
 
 
 
 
1009	err = atmel_tdes_hw_version_init(tdes_dd);
1010	if (err)
1011		goto err_tasklet_kill;
1012
1013	atmel_tdes_get_cap(tdes_dd);
1014
1015	err = atmel_tdes_buff_init(tdes_dd);
1016	if (err)
1017		goto err_tasklet_kill;
1018
1019	if (tdes_dd->caps.has_dma) {
1020		err = atmel_tdes_dma_init(tdes_dd);
1021		if (err)
1022			goto err_buff_cleanup;
1023
1024		dev_info(dev, "using %s, %s for DMA transfers\n",
1025				dma_chan_name(tdes_dd->dma_lch_in.chan),
1026				dma_chan_name(tdes_dd->dma_lch_out.chan));
1027	}
1028
1029	spin_lock(&atmel_tdes.lock);
1030	list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
1031	spin_unlock(&atmel_tdes.lock);
1032
1033	err = atmel_tdes_register_algs(tdes_dd);
1034	if (err)
1035		goto err_algs;
1036
1037	dev_info(dev, "Atmel DES/TDES\n");
1038
1039	return 0;
1040
1041err_algs:
1042	spin_lock(&atmel_tdes.lock);
1043	list_del(&tdes_dd->list);
1044	spin_unlock(&atmel_tdes.lock);
1045	if (tdes_dd->caps.has_dma)
1046		atmel_tdes_dma_cleanup(tdes_dd);
1047err_buff_cleanup:
1048	atmel_tdes_buff_cleanup(tdes_dd);
1049err_tasklet_kill:
1050	tasklet_kill(&tdes_dd->done_task);
1051	tasklet_kill(&tdes_dd->queue_task);
1052
1053	return err;
1054}
1055
1056static void atmel_tdes_remove(struct platform_device *pdev)
1057{
1058	struct atmel_tdes_dev *tdes_dd = platform_get_drvdata(pdev);
1059
1060	spin_lock(&atmel_tdes.lock);
1061	list_del(&tdes_dd->list);
1062	spin_unlock(&atmel_tdes.lock);
1063
1064	atmel_tdes_unregister_algs(tdes_dd);
1065
1066	tasklet_kill(&tdes_dd->done_task);
1067	tasklet_kill(&tdes_dd->queue_task);
1068
1069	if (tdes_dd->caps.has_dma)
1070		atmel_tdes_dma_cleanup(tdes_dd);
1071
1072	atmel_tdes_buff_cleanup(tdes_dd);
 
 
1073}
1074
1075static struct platform_driver atmel_tdes_driver = {
1076	.probe		= atmel_tdes_probe,
1077	.remove_new	= atmel_tdes_remove,
1078	.driver		= {
1079		.name	= "atmel_tdes",
1080		.of_match_table = atmel_tdes_dt_ids,
1081	},
1082};
1083
1084module_platform_driver(atmel_tdes_driver);
1085
1086MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
1087MODULE_LICENSE("GPL v2");
1088MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for ATMEL DES/TDES HW acceleration.
   6 *
   7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
   8 * Author: Nicolas Royer <nicolas@eukrea.com>
   9 *
  10 * Some ideas are from omap-aes.c drivers.
  11 */
  12
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/err.h>
  18#include <linux/clk.h>
  19#include <linux/io.h>
  20#include <linux/hw_random.h>
  21#include <linux/platform_device.h>
  22
  23#include <linux/device.h>
  24#include <linux/dmaengine.h>
  25#include <linux/init.h>
  26#include <linux/errno.h>
  27#include <linux/interrupt.h>
  28#include <linux/irq.h>
  29#include <linux/scatterlist.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/of_device.h>
  32#include <linux/delay.h>
  33#include <linux/crypto.h>
  34#include <crypto/scatterwalk.h>
  35#include <crypto/algapi.h>
  36#include <crypto/internal/des.h>
  37#include <crypto/internal/skcipher.h>
  38#include "atmel-tdes-regs.h"
  39
  40#define ATMEL_TDES_PRIORITY	300
  41
  42/* TDES flags  */
  43/* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
  44#define TDES_FLAGS_ENCRYPT	TDES_MR_CYPHER_ENC
  45#define TDES_FLAGS_OPMODE_MASK	(TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
  46#define TDES_FLAGS_ECB		TDES_MR_OPMOD_ECB
  47#define TDES_FLAGS_CBC		TDES_MR_OPMOD_CBC
  48#define TDES_FLAGS_OFB		TDES_MR_OPMOD_OFB
  49#define TDES_FLAGS_CFB64	(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
  50#define TDES_FLAGS_CFB32	(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
  51#define TDES_FLAGS_CFB16	(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
  52#define TDES_FLAGS_CFB8		(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
  53
  54#define TDES_FLAGS_MODE_MASK	(TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
  55
  56#define TDES_FLAGS_INIT		BIT(3)
  57#define TDES_FLAGS_FAST		BIT(4)
  58#define TDES_FLAGS_BUSY		BIT(5)
  59#define TDES_FLAGS_DMA		BIT(6)
  60
  61#define ATMEL_TDES_QUEUE_LENGTH	50
  62
  63#define CFB8_BLOCK_SIZE		1
  64#define CFB16_BLOCK_SIZE	2
  65#define CFB32_BLOCK_SIZE	4
  66
  67struct atmel_tdes_caps {
  68	bool	has_dma;
  69	u32		has_cfb_3keys;
  70};
  71
  72struct atmel_tdes_dev;
  73
  74struct atmel_tdes_ctx {
  75	struct atmel_tdes_dev *dd;
  76
  77	int		keylen;
  78	u32		key[DES3_EDE_KEY_SIZE / sizeof(u32)];
  79	unsigned long	flags;
  80
  81	u16		block_size;
  82};
  83
  84struct atmel_tdes_reqctx {
  85	unsigned long mode;
  86	u8 lastc[DES_BLOCK_SIZE];
  87};
  88
  89struct atmel_tdes_dma {
  90	struct dma_chan			*chan;
  91	struct dma_slave_config dma_conf;
  92};
  93
  94struct atmel_tdes_dev {
  95	struct list_head	list;
  96	unsigned long		phys_base;
  97	void __iomem		*io_base;
  98
  99	struct atmel_tdes_ctx	*ctx;
 100	struct device		*dev;
 101	struct clk			*iclk;
 102	int					irq;
 103
 104	unsigned long		flags;
 105
 106	spinlock_t		lock;
 107	struct crypto_queue	queue;
 108
 109	struct tasklet_struct	done_task;
 110	struct tasklet_struct	queue_task;
 111
 112	struct skcipher_request	*req;
 113	size_t				total;
 114
 115	struct scatterlist	*in_sg;
 116	unsigned int		nb_in_sg;
 117	size_t				in_offset;
 118	struct scatterlist	*out_sg;
 119	unsigned int		nb_out_sg;
 120	size_t				out_offset;
 121
 122	size_t	buflen;
 123	size_t	dma_size;
 124
 125	void	*buf_in;
 126	int		dma_in;
 127	dma_addr_t	dma_addr_in;
 128	struct atmel_tdes_dma	dma_lch_in;
 129
 130	void	*buf_out;
 131	int		dma_out;
 132	dma_addr_t	dma_addr_out;
 133	struct atmel_tdes_dma	dma_lch_out;
 134
 135	struct atmel_tdes_caps	caps;
 136
 137	u32	hw_version;
 138};
 139
 140struct atmel_tdes_drv {
 141	struct list_head	dev_list;
 142	spinlock_t		lock;
 143};
 144
 145static struct atmel_tdes_drv atmel_tdes = {
 146	.dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list),
 147	.lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock),
 148};
 149
 150static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
 151			void *buf, size_t buflen, size_t total, int out)
 152{
 153	size_t count, off = 0;
 154
 155	while (buflen && total) {
 156		count = min((*sg)->length - *offset, total);
 157		count = min(count, buflen);
 158
 159		if (!count)
 160			return off;
 161
 162		scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
 163
 164		off += count;
 165		buflen -= count;
 166		*offset += count;
 167		total -= count;
 168
 169		if (*offset == (*sg)->length) {
 170			*sg = sg_next(*sg);
 171			if (*sg)
 172				*offset = 0;
 173			else
 174				total = 0;
 175		}
 176	}
 177
 178	return off;
 179}
 180
 181static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
 182{
 183	return readl_relaxed(dd->io_base + offset);
 184}
 185
 186static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
 187					u32 offset, u32 value)
 188{
 189	writel_relaxed(value, dd->io_base + offset);
 190}
 191
 192static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
 193			       const u32 *value, int count)
 194{
 195	for (; count--; value++, offset += 4)
 196		atmel_tdes_write(dd, offset, *value);
 197}
 198
 199static struct atmel_tdes_dev *atmel_tdes_dev_alloc(void)
 200{
 201	struct atmel_tdes_dev *tdes_dd;
 202
 203	spin_lock_bh(&atmel_tdes.lock);
 204	/* One TDES IP per SoC. */
 205	tdes_dd = list_first_entry_or_null(&atmel_tdes.dev_list,
 206					   struct atmel_tdes_dev, list);
 207	spin_unlock_bh(&atmel_tdes.lock);
 208	return tdes_dd;
 209}
 210
 211static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
 212{
 213	int err;
 214
 215	err = clk_prepare_enable(dd->iclk);
 216	if (err)
 217		return err;
 218
 219	if (!(dd->flags & TDES_FLAGS_INIT)) {
 220		atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
 221		dd->flags |= TDES_FLAGS_INIT;
 222	}
 223
 224	return 0;
 225}
 226
 227static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
 228{
 229	return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
 230}
 231
 232static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
 233{
 234	int err;
 235
 236	err = atmel_tdes_hw_init(dd);
 237	if (err)
 238		return err;
 239
 240	dd->hw_version = atmel_tdes_get_version(dd);
 241
 242	dev_info(dd->dev,
 243			"version: 0x%x\n", dd->hw_version);
 244
 245	clk_disable_unprepare(dd->iclk);
 246
 247	return 0;
 248}
 249
 250static void atmel_tdes_dma_callback(void *data)
 251{
 252	struct atmel_tdes_dev *dd = data;
 253
 254	/* dma_lch_out - completed */
 255	tasklet_schedule(&dd->done_task);
 256}
 257
 258static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
 259{
 260	int err;
 261	u32 valmr = TDES_MR_SMOD_PDC;
 262
 263	err = atmel_tdes_hw_init(dd);
 264
 265	if (err)
 266		return err;
 267
 268	if (!dd->caps.has_dma)
 269		atmel_tdes_write(dd, TDES_PTCR,
 270			TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS);
 271
 272	/* MR register must be set before IV registers */
 273	if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
 274		valmr |= TDES_MR_KEYMOD_3KEY;
 275		valmr |= TDES_MR_TDESMOD_TDES;
 276	} else if (dd->ctx->keylen > DES_KEY_SIZE) {
 277		valmr |= TDES_MR_KEYMOD_2KEY;
 278		valmr |= TDES_MR_TDESMOD_TDES;
 279	} else {
 280		valmr |= TDES_MR_TDESMOD_DES;
 281	}
 282
 283	valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
 284
 285	atmel_tdes_write(dd, TDES_MR, valmr);
 286
 287	atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
 288						dd->ctx->keylen >> 2);
 289
 290	if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
 291		atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
 292
 293	return 0;
 294}
 295
 296static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
 297{
 298	int err = 0;
 299	size_t count;
 300
 301	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
 302
 303	if (dd->flags & TDES_FLAGS_FAST) {
 304		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
 305		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 306	} else {
 307		dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
 308					   dd->dma_size, DMA_FROM_DEVICE);
 309
 310		/* copy data */
 311		count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
 312				dd->buf_out, dd->buflen, dd->dma_size, 1);
 313		if (count != dd->dma_size) {
 314			err = -EINVAL;
 315			dev_dbg(dd->dev, "not all data converted: %zu\n", count);
 316		}
 317	}
 318
 319	return err;
 320}
 321
 322static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
 323{
 324	int err = -ENOMEM;
 325
 326	dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
 327	dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
 328	dd->buflen = PAGE_SIZE;
 329	dd->buflen &= ~(DES_BLOCK_SIZE - 1);
 330
 331	if (!dd->buf_in || !dd->buf_out) {
 332		dev_dbg(dd->dev, "unable to alloc pages.\n");
 333		goto err_alloc;
 334	}
 335
 336	/* MAP here */
 337	dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
 338					dd->buflen, DMA_TO_DEVICE);
 339	err = dma_mapping_error(dd->dev, dd->dma_addr_in);
 340	if (err) {
 341		dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
 342		goto err_map_in;
 343	}
 344
 345	dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
 346					dd->buflen, DMA_FROM_DEVICE);
 347	err = dma_mapping_error(dd->dev, dd->dma_addr_out);
 348	if (err) {
 349		dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
 350		goto err_map_out;
 351	}
 352
 353	return 0;
 354
 355err_map_out:
 356	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
 357		DMA_TO_DEVICE);
 358err_map_in:
 359err_alloc:
 360	free_page((unsigned long)dd->buf_out);
 361	free_page((unsigned long)dd->buf_in);
 362	return err;
 363}
 364
 365static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
 366{
 367	dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
 368			 DMA_FROM_DEVICE);
 369	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
 370		DMA_TO_DEVICE);
 371	free_page((unsigned long)dd->buf_out);
 372	free_page((unsigned long)dd->buf_in);
 373}
 374
 375static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
 376				dma_addr_t dma_addr_in,
 377				dma_addr_t dma_addr_out, int length)
 378{
 379	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
 380	int len32;
 381
 382	dd->dma_size = length;
 383
 384	if (!(dd->flags & TDES_FLAGS_FAST)) {
 385		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
 386					   DMA_TO_DEVICE);
 387	}
 388
 389	switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
 390	case TDES_FLAGS_CFB8:
 391		len32 = DIV_ROUND_UP(length, sizeof(u8));
 392		break;
 393
 394	case TDES_FLAGS_CFB16:
 395		len32 = DIV_ROUND_UP(length, sizeof(u16));
 396		break;
 397
 398	default:
 399		len32 = DIV_ROUND_UP(length, sizeof(u32));
 400		break;
 401	}
 402
 403	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
 404	atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
 405	atmel_tdes_write(dd, TDES_TCR, len32);
 406	atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
 407	atmel_tdes_write(dd, TDES_RCR, len32);
 408
 409	/* Enable Interrupt */
 410	atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
 411
 412	/* Start DMA transfer */
 413	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
 414
 415	return 0;
 416}
 417
 418static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
 419				dma_addr_t dma_addr_in,
 420				dma_addr_t dma_addr_out, int length)
 421{
 422	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
 423	struct scatterlist sg[2];
 424	struct dma_async_tx_descriptor	*in_desc, *out_desc;
 425	enum dma_slave_buswidth addr_width;
 426
 427	dd->dma_size = length;
 428
 429	if (!(dd->flags & TDES_FLAGS_FAST)) {
 430		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
 431					   DMA_TO_DEVICE);
 432	}
 433
 434	switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
 435	case TDES_FLAGS_CFB8:
 436		addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 437		break;
 438
 439	case TDES_FLAGS_CFB16:
 440		addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 441		break;
 442
 443	default:
 444		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 445		break;
 446	}
 447
 448	dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
 449	dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
 450
 451	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
 452	dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
 453
 454	dd->flags |= TDES_FLAGS_DMA;
 455
 456	sg_init_table(&sg[0], 1);
 457	sg_dma_address(&sg[0]) = dma_addr_in;
 458	sg_dma_len(&sg[0]) = length;
 459
 460	sg_init_table(&sg[1], 1);
 461	sg_dma_address(&sg[1]) = dma_addr_out;
 462	sg_dma_len(&sg[1]) = length;
 463
 464	in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
 465				1, DMA_MEM_TO_DEV,
 466				DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
 467	if (!in_desc)
 468		return -EINVAL;
 469
 470	out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
 471				1, DMA_DEV_TO_MEM,
 472				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 473	if (!out_desc)
 474		return -EINVAL;
 475
 476	out_desc->callback = atmel_tdes_dma_callback;
 477	out_desc->callback_param = dd;
 478
 479	dmaengine_submit(out_desc);
 480	dma_async_issue_pending(dd->dma_lch_out.chan);
 481
 482	dmaengine_submit(in_desc);
 483	dma_async_issue_pending(dd->dma_lch_in.chan);
 484
 485	return 0;
 486}
 487
 488static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
 489{
 490	int err, fast = 0, in, out;
 491	size_t count;
 492	dma_addr_t addr_in, addr_out;
 493
 494	if ((!dd->in_offset) && (!dd->out_offset)) {
 495		/* check for alignment */
 496		in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
 497			IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
 498		out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
 499			IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
 500		fast = in && out;
 501
 502		if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
 503			fast = 0;
 504	}
 505
 506
 507	if (fast)  {
 508		count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
 509		count = min_t(size_t, count, sg_dma_len(dd->out_sg));
 510
 511		err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 512		if (!err) {
 513			dev_dbg(dd->dev, "dma_map_sg() error\n");
 514			return -EINVAL;
 515		}
 516
 517		err = dma_map_sg(dd->dev, dd->out_sg, 1,
 518				DMA_FROM_DEVICE);
 519		if (!err) {
 520			dev_dbg(dd->dev, "dma_map_sg() error\n");
 521			dma_unmap_sg(dd->dev, dd->in_sg, 1,
 522				DMA_TO_DEVICE);
 523			return -EINVAL;
 524		}
 525
 526		addr_in = sg_dma_address(dd->in_sg);
 527		addr_out = sg_dma_address(dd->out_sg);
 528
 529		dd->flags |= TDES_FLAGS_FAST;
 530
 531	} else {
 532		/* use cache buffers */
 533		count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
 534				dd->buf_in, dd->buflen, dd->total, 0);
 535
 536		addr_in = dd->dma_addr_in;
 537		addr_out = dd->dma_addr_out;
 538
 539		dd->flags &= ~TDES_FLAGS_FAST;
 540	}
 541
 542	dd->total -= count;
 543
 544	if (dd->caps.has_dma)
 545		err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count);
 546	else
 547		err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count);
 548
 549	if (err && (dd->flags & TDES_FLAGS_FAST)) {
 550		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 551		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
 552	}
 553
 554	return err;
 555}
 556
 557static void
 558atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
 559{
 560	struct skcipher_request *req = dd->req;
 561	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 562	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 563	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 564
 565	if (req->cryptlen < ivsize)
 566		return;
 567
 568	if (rctx->mode & TDES_FLAGS_ENCRYPT) {
 569		scatterwalk_map_and_copy(req->iv, req->dst,
 570					 req->cryptlen - ivsize, ivsize, 0);
 571	} else {
 572		if (req->src == req->dst)
 573			memcpy(req->iv, rctx->lastc, ivsize);
 574		else
 575			scatterwalk_map_and_copy(req->iv, req->src,
 576						 req->cryptlen - ivsize,
 577						 ivsize, 0);
 578	}
 579}
 580
 581static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
 582{
 583	struct skcipher_request *req = dd->req;
 584	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 585
 586	clk_disable_unprepare(dd->iclk);
 587
 588	dd->flags &= ~TDES_FLAGS_BUSY;
 589
 590	if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
 591		atmel_tdes_set_iv_as_last_ciphertext_block(dd);
 592
 593	req->base.complete(&req->base, err);
 594}
 595
 596static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
 597			       struct skcipher_request *req)
 598{
 599	struct crypto_async_request *async_req, *backlog;
 600	struct atmel_tdes_ctx *ctx;
 601	struct atmel_tdes_reqctx *rctx;
 602	unsigned long flags;
 603	int err, ret = 0;
 604
 605	spin_lock_irqsave(&dd->lock, flags);
 606	if (req)
 607		ret = crypto_enqueue_request(&dd->queue, &req->base);
 608	if (dd->flags & TDES_FLAGS_BUSY) {
 609		spin_unlock_irqrestore(&dd->lock, flags);
 610		return ret;
 611	}
 612	backlog = crypto_get_backlog(&dd->queue);
 613	async_req = crypto_dequeue_request(&dd->queue);
 614	if (async_req)
 615		dd->flags |= TDES_FLAGS_BUSY;
 616	spin_unlock_irqrestore(&dd->lock, flags);
 617
 618	if (!async_req)
 619		return ret;
 620
 621	if (backlog)
 622		backlog->complete(backlog, -EINPROGRESS);
 623
 624	req = skcipher_request_cast(async_req);
 625
 626	/* assign new request to device */
 627	dd->req = req;
 628	dd->total = req->cryptlen;
 629	dd->in_offset = 0;
 630	dd->in_sg = req->src;
 631	dd->out_offset = 0;
 632	dd->out_sg = req->dst;
 633
 634	rctx = skcipher_request_ctx(req);
 635	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 636	rctx->mode &= TDES_FLAGS_MODE_MASK;
 637	dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
 638	dd->ctx = ctx;
 639
 640	err = atmel_tdes_write_ctrl(dd);
 641	if (!err)
 642		err = atmel_tdes_crypt_start(dd);
 643	if (err) {
 644		/* des_task will not finish it, so do it here */
 645		atmel_tdes_finish_req(dd, err);
 646		tasklet_schedule(&dd->queue_task);
 647	}
 648
 649	return ret;
 650}
 651
 652static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
 653{
 654	int err = -EINVAL;
 655	size_t count;
 656
 657	if (dd->flags & TDES_FLAGS_DMA) {
 658		err = 0;
 659		if  (dd->flags & TDES_FLAGS_FAST) {
 660			dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
 661			dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 662		} else {
 663			dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
 664				dd->dma_size, DMA_FROM_DEVICE);
 665
 666			/* copy data */
 667			count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
 668				dd->buf_out, dd->buflen, dd->dma_size, 1);
 669			if (count != dd->dma_size) {
 670				err = -EINVAL;
 671				dev_dbg(dd->dev, "not all data converted: %zu\n", count);
 672			}
 673		}
 674	}
 675	return err;
 676}
 677
 678static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
 679{
 680	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 681	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
 682	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 683	struct device *dev = ctx->dd->dev;
 684
 685	if (!req->cryptlen)
 686		return 0;
 687
 688	switch (mode & TDES_FLAGS_OPMODE_MASK) {
 689	case TDES_FLAGS_CFB8:
 690		if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
 691			dev_dbg(dev, "request size is not exact amount of CFB8 blocks\n");
 692			return -EINVAL;
 693		}
 694		ctx->block_size = CFB8_BLOCK_SIZE;
 695		break;
 696
 697	case TDES_FLAGS_CFB16:
 698		if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
 699			dev_dbg(dev, "request size is not exact amount of CFB16 blocks\n");
 700			return -EINVAL;
 701		}
 702		ctx->block_size = CFB16_BLOCK_SIZE;
 703		break;
 704
 705	case TDES_FLAGS_CFB32:
 706		if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
 707			dev_dbg(dev, "request size is not exact amount of CFB32 blocks\n");
 708			return -EINVAL;
 709		}
 710		ctx->block_size = CFB32_BLOCK_SIZE;
 711		break;
 712
 713	default:
 714		if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
 715			dev_dbg(dev, "request size is not exact amount of DES blocks\n");
 716			return -EINVAL;
 717		}
 718		ctx->block_size = DES_BLOCK_SIZE;
 719		break;
 720	}
 
 721
 722	rctx->mode = mode;
 723
 724	if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
 725	    !(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
 726		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 727
 728		if (req->cryptlen >= ivsize)
 729			scatterwalk_map_and_copy(rctx->lastc, req->src,
 730						 req->cryptlen - ivsize,
 731						 ivsize, 0);
 732	}
 733
 734	return atmel_tdes_handle_queue(ctx->dd, req);
 735}
 736
 737static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
 738{
 739	int ret;
 740
 741	/* Try to grab 2 DMA channels */
 742	dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
 743	if (IS_ERR(dd->dma_lch_in.chan)) {
 744		ret = PTR_ERR(dd->dma_lch_in.chan);
 745		goto err_dma_in;
 746	}
 747
 748	dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
 749		TDES_IDATA1R;
 750	dd->dma_lch_in.dma_conf.src_maxburst = 1;
 751	dd->dma_lch_in.dma_conf.src_addr_width =
 752		DMA_SLAVE_BUSWIDTH_4_BYTES;
 753	dd->dma_lch_in.dma_conf.dst_maxburst = 1;
 754	dd->dma_lch_in.dma_conf.dst_addr_width =
 755		DMA_SLAVE_BUSWIDTH_4_BYTES;
 756	dd->dma_lch_in.dma_conf.device_fc = false;
 757
 758	dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx");
 759	if (IS_ERR(dd->dma_lch_out.chan)) {
 760		ret = PTR_ERR(dd->dma_lch_out.chan);
 761		goto err_dma_out;
 762	}
 763
 764	dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
 765		TDES_ODATA1R;
 766	dd->dma_lch_out.dma_conf.src_maxburst = 1;
 767	dd->dma_lch_out.dma_conf.src_addr_width =
 768		DMA_SLAVE_BUSWIDTH_4_BYTES;
 769	dd->dma_lch_out.dma_conf.dst_maxburst = 1;
 770	dd->dma_lch_out.dma_conf.dst_addr_width =
 771		DMA_SLAVE_BUSWIDTH_4_BYTES;
 772	dd->dma_lch_out.dma_conf.device_fc = false;
 773
 774	return 0;
 775
 776err_dma_out:
 777	dma_release_channel(dd->dma_lch_in.chan);
 778err_dma_in:
 779	dev_err(dd->dev, "no DMA channel available\n");
 780	return ret;
 781}
 782
 783static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
 784{
 785	dma_release_channel(dd->dma_lch_in.chan);
 786	dma_release_channel(dd->dma_lch_out.chan);
 787}
 788
 789static int atmel_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
 790			   unsigned int keylen)
 791{
 792	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 793	int err;
 794
 795	err = verify_skcipher_des_key(tfm, key);
 796	if (err)
 797		return err;
 798
 799	memcpy(ctx->key, key, keylen);
 800	ctx->keylen = keylen;
 801
 802	return 0;
 803}
 804
 805static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 806			   unsigned int keylen)
 807{
 808	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 809	int err;
 810
 811	err = verify_skcipher_des3_key(tfm, key);
 812	if (err)
 813		return err;
 814
 815	memcpy(ctx->key, key, keylen);
 816	ctx->keylen = keylen;
 817
 818	return 0;
 819}
 820
 821static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
 822{
 823	return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT);
 824}
 825
 826static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
 827{
 828	return atmel_tdes_crypt(req, TDES_FLAGS_ECB);
 829}
 830
 831static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
 832{
 833	return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT);
 834}
 835
 836static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
 837{
 838	return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
 839}
 840static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
 841{
 842	return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT);
 843}
 844
 845static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
 846{
 847	return atmel_tdes_crypt(req, TDES_FLAGS_CFB64);
 848}
 849
 850static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
 851{
 852	return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT);
 853}
 854
 855static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
 856{
 857	return atmel_tdes_crypt(req, TDES_FLAGS_CFB8);
 858}
 859
 860static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
 861{
 862	return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT);
 863}
 864
 865static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
 866{
 867	return atmel_tdes_crypt(req, TDES_FLAGS_CFB16);
 868}
 869
 870static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
 871{
 872	return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT);
 873}
 874
 875static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
 876{
 877	return atmel_tdes_crypt(req, TDES_FLAGS_CFB32);
 878}
 879
 880static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
 881{
 882	return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT);
 883}
 884
 885static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
 886{
 887	return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
 888}
 889
 890static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
 891{
 892	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 893
 894	ctx->dd = atmel_tdes_dev_alloc();
 895	if (!ctx->dd)
 896		return -ENODEV;
 897
 898	crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
 899
 900	return 0;
 901}
 902
 903static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
 904{
 905	alg->base.cra_priority = ATMEL_TDES_PRIORITY;
 906	alg->base.cra_flags = CRYPTO_ALG_ASYNC;
 907	alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx);
 908	alg->base.cra_module = THIS_MODULE;
 909
 910	alg->init = atmel_tdes_init_tfm;
 911}
 912
 913static struct skcipher_alg tdes_algs[] = {
 914{
 915	.base.cra_name		= "ecb(des)",
 916	.base.cra_driver_name	= "atmel-ecb-des",
 917	.base.cra_blocksize	= DES_BLOCK_SIZE,
 918	.base.cra_alignmask	= 0x7,
 919
 920	.min_keysize		= DES_KEY_SIZE,
 921	.max_keysize		= DES_KEY_SIZE,
 922	.setkey			= atmel_des_setkey,
 923	.encrypt		= atmel_tdes_ecb_encrypt,
 924	.decrypt		= atmel_tdes_ecb_decrypt,
 925},
 926{
 927	.base.cra_name		= "cbc(des)",
 928	.base.cra_driver_name	= "atmel-cbc-des",
 929	.base.cra_blocksize	= DES_BLOCK_SIZE,
 930	.base.cra_alignmask	= 0x7,
 931
 932	.min_keysize		= DES_KEY_SIZE,
 933	.max_keysize		= DES_KEY_SIZE,
 934	.ivsize			= DES_BLOCK_SIZE,
 935	.setkey			= atmel_des_setkey,
 936	.encrypt		= atmel_tdes_cbc_encrypt,
 937	.decrypt		= atmel_tdes_cbc_decrypt,
 938},
 939{
 940	.base.cra_name		= "cfb(des)",
 941	.base.cra_driver_name	= "atmel-cfb-des",
 942	.base.cra_blocksize	= DES_BLOCK_SIZE,
 943	.base.cra_alignmask	= 0x7,
 944
 945	.min_keysize		= DES_KEY_SIZE,
 946	.max_keysize		= DES_KEY_SIZE,
 947	.ivsize			= DES_BLOCK_SIZE,
 948	.setkey			= atmel_des_setkey,
 949	.encrypt		= atmel_tdes_cfb_encrypt,
 950	.decrypt		= atmel_tdes_cfb_decrypt,
 951},
 952{
 953	.base.cra_name		= "cfb8(des)",
 954	.base.cra_driver_name	= "atmel-cfb8-des",
 955	.base.cra_blocksize	= CFB8_BLOCK_SIZE,
 956	.base.cra_alignmask	= 0,
 957
 958	.min_keysize		= DES_KEY_SIZE,
 959	.max_keysize		= DES_KEY_SIZE,
 960	.ivsize			= DES_BLOCK_SIZE,
 961	.setkey			= atmel_des_setkey,
 962	.encrypt		= atmel_tdes_cfb8_encrypt,
 963	.decrypt		= atmel_tdes_cfb8_decrypt,
 964},
 965{
 966	.base.cra_name		= "cfb16(des)",
 967	.base.cra_driver_name	= "atmel-cfb16-des",
 968	.base.cra_blocksize	= CFB16_BLOCK_SIZE,
 969	.base.cra_alignmask	= 0x1,
 970
 971	.min_keysize		= DES_KEY_SIZE,
 972	.max_keysize		= DES_KEY_SIZE,
 973	.ivsize			= DES_BLOCK_SIZE,
 974	.setkey			= atmel_des_setkey,
 975	.encrypt		= atmel_tdes_cfb16_encrypt,
 976	.decrypt		= atmel_tdes_cfb16_decrypt,
 977},
 978{
 979	.base.cra_name		= "cfb32(des)",
 980	.base.cra_driver_name	= "atmel-cfb32-des",
 981	.base.cra_blocksize	= CFB32_BLOCK_SIZE,
 982	.base.cra_alignmask	= 0x3,
 983
 984	.min_keysize		= DES_KEY_SIZE,
 985	.max_keysize		= DES_KEY_SIZE,
 986	.ivsize			= DES_BLOCK_SIZE,
 987	.setkey			= atmel_des_setkey,
 988	.encrypt		= atmel_tdes_cfb32_encrypt,
 989	.decrypt		= atmel_tdes_cfb32_decrypt,
 990},
 991{
 992	.base.cra_name		= "ofb(des)",
 993	.base.cra_driver_name	= "atmel-ofb-des",
 994	.base.cra_blocksize	= 1,
 995	.base.cra_alignmask	= 0x7,
 996
 997	.min_keysize		= DES_KEY_SIZE,
 998	.max_keysize		= DES_KEY_SIZE,
 999	.ivsize			= DES_BLOCK_SIZE,
1000	.setkey			= atmel_des_setkey,
1001	.encrypt		= atmel_tdes_ofb_encrypt,
1002	.decrypt		= atmel_tdes_ofb_decrypt,
1003},
1004{
1005	.base.cra_name		= "ecb(des3_ede)",
1006	.base.cra_driver_name	= "atmel-ecb-tdes",
1007	.base.cra_blocksize	= DES_BLOCK_SIZE,
1008	.base.cra_alignmask	= 0x7,
1009
1010	.min_keysize		= DES3_EDE_KEY_SIZE,
1011	.max_keysize		= DES3_EDE_KEY_SIZE,
1012	.setkey			= atmel_tdes_setkey,
1013	.encrypt		= atmel_tdes_ecb_encrypt,
1014	.decrypt		= atmel_tdes_ecb_decrypt,
1015},
1016{
1017	.base.cra_name		= "cbc(des3_ede)",
1018	.base.cra_driver_name	= "atmel-cbc-tdes",
1019	.base.cra_blocksize	= DES_BLOCK_SIZE,
1020	.base.cra_alignmask	= 0x7,
1021
1022	.min_keysize		= DES3_EDE_KEY_SIZE,
1023	.max_keysize		= DES3_EDE_KEY_SIZE,
1024	.setkey			= atmel_tdes_setkey,
1025	.encrypt		= atmel_tdes_cbc_encrypt,
1026	.decrypt		= atmel_tdes_cbc_decrypt,
1027	.ivsize			= DES_BLOCK_SIZE,
1028},
1029{
1030	.base.cra_name		= "ofb(des3_ede)",
1031	.base.cra_driver_name	= "atmel-ofb-tdes",
1032	.base.cra_blocksize	= DES_BLOCK_SIZE,
1033	.base.cra_alignmask	= 0x7,
1034
1035	.min_keysize		= DES3_EDE_KEY_SIZE,
1036	.max_keysize		= DES3_EDE_KEY_SIZE,
1037	.setkey			= atmel_tdes_setkey,
1038	.encrypt		= atmel_tdes_ofb_encrypt,
1039	.decrypt		= atmel_tdes_ofb_decrypt,
1040	.ivsize			= DES_BLOCK_SIZE,
1041},
1042};
1043
1044static void atmel_tdes_queue_task(unsigned long data)
1045{
1046	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
1047
1048	atmel_tdes_handle_queue(dd, NULL);
1049}
1050
1051static void atmel_tdes_done_task(unsigned long data)
1052{
1053	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
1054	int err;
1055
1056	if (!(dd->flags & TDES_FLAGS_DMA))
1057		err = atmel_tdes_crypt_pdc_stop(dd);
1058	else
1059		err = atmel_tdes_crypt_dma_stop(dd);
1060
1061	if (dd->total && !err) {
1062		if (dd->flags & TDES_FLAGS_FAST) {
1063			dd->in_sg = sg_next(dd->in_sg);
1064			dd->out_sg = sg_next(dd->out_sg);
1065			if (!dd->in_sg || !dd->out_sg)
1066				err = -EINVAL;
1067		}
1068		if (!err)
1069			err = atmel_tdes_crypt_start(dd);
1070		if (!err)
1071			return; /* DMA started. Not fininishing. */
1072	}
1073
1074	atmel_tdes_finish_req(dd, err);
1075	atmel_tdes_handle_queue(dd, NULL);
1076}
1077
1078static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
1079{
1080	struct atmel_tdes_dev *tdes_dd = dev_id;
1081	u32 reg;
1082
1083	reg = atmel_tdes_read(tdes_dd, TDES_ISR);
1084	if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) {
1085		atmel_tdes_write(tdes_dd, TDES_IDR, reg);
1086		if (TDES_FLAGS_BUSY & tdes_dd->flags)
1087			tasklet_schedule(&tdes_dd->done_task);
1088		else
1089			dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n");
1090		return IRQ_HANDLED;
1091	}
1092
1093	return IRQ_NONE;
1094}
1095
1096static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
1097{
1098	int i;
1099
1100	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
1101		crypto_unregister_skcipher(&tdes_algs[i]);
1102}
1103
1104static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
1105{
1106	int err, i, j;
1107
1108	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
1109		atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
1110
1111		err = crypto_register_skcipher(&tdes_algs[i]);
1112		if (err)
1113			goto err_tdes_algs;
1114	}
1115
1116	return 0;
1117
1118err_tdes_algs:
1119	for (j = 0; j < i; j++)
1120		crypto_unregister_skcipher(&tdes_algs[j]);
1121
1122	return err;
1123}
1124
1125static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
1126{
1127
1128	dd->caps.has_dma = 0;
1129	dd->caps.has_cfb_3keys = 0;
1130
1131	/* keep only major version number */
1132	switch (dd->hw_version & 0xf00) {
1133	case 0x800:
1134	case 0x700:
1135		dd->caps.has_dma = 1;
1136		dd->caps.has_cfb_3keys = 1;
1137		break;
1138	case 0x600:
1139		break;
1140	default:
1141		dev_warn(dd->dev,
1142				"Unmanaged tdes version, set minimum capabilities\n");
1143		break;
1144	}
1145}
1146
1147#if defined(CONFIG_OF)
1148static const struct of_device_id atmel_tdes_dt_ids[] = {
1149	{ .compatible = "atmel,at91sam9g46-tdes" },
1150	{ /* sentinel */ }
1151};
1152MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
1153#endif
1154
1155static int atmel_tdes_probe(struct platform_device *pdev)
1156{
1157	struct atmel_tdes_dev *tdes_dd;
1158	struct device *dev = &pdev->dev;
1159	struct resource *tdes_res;
1160	int err;
1161
1162	tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
1163	if (!tdes_dd)
1164		return -ENOMEM;
1165
1166	tdes_dd->dev = dev;
1167
1168	platform_set_drvdata(pdev, tdes_dd);
1169
1170	INIT_LIST_HEAD(&tdes_dd->list);
1171	spin_lock_init(&tdes_dd->lock);
1172
1173	tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
1174					(unsigned long)tdes_dd);
1175	tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task,
1176					(unsigned long)tdes_dd);
1177
1178	crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
1179
1180	/* Get the base address */
1181	tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1182	if (!tdes_res) {
1183		dev_err(dev, "no MEM resource info\n");
1184		err = -ENODEV;
1185		goto err_tasklet_kill;
1186	}
1187	tdes_dd->phys_base = tdes_res->start;
1188
1189	/* Get the IRQ */
1190	tdes_dd->irq = platform_get_irq(pdev,  0);
1191	if (tdes_dd->irq < 0) {
1192		err = tdes_dd->irq;
1193		goto err_tasklet_kill;
1194	}
1195
1196	err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
1197			       IRQF_SHARED, "atmel-tdes", tdes_dd);
1198	if (err) {
1199		dev_err(dev, "unable to request tdes irq.\n");
1200		goto err_tasklet_kill;
1201	}
1202
1203	/* Initializing the clock */
1204	tdes_dd->iclk = devm_clk_get(&pdev->dev, "tdes_clk");
1205	if (IS_ERR(tdes_dd->iclk)) {
1206		dev_err(dev, "clock initialization failed.\n");
1207		err = PTR_ERR(tdes_dd->iclk);
1208		goto err_tasklet_kill;
1209	}
1210
1211	tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
1212	if (IS_ERR(tdes_dd->io_base)) {
1213		err = PTR_ERR(tdes_dd->io_base);
1214		goto err_tasklet_kill;
1215	}
1216
1217	err = atmel_tdes_hw_version_init(tdes_dd);
1218	if (err)
1219		goto err_tasklet_kill;
1220
1221	atmel_tdes_get_cap(tdes_dd);
1222
1223	err = atmel_tdes_buff_init(tdes_dd);
1224	if (err)
1225		goto err_tasklet_kill;
1226
1227	if (tdes_dd->caps.has_dma) {
1228		err = atmel_tdes_dma_init(tdes_dd);
1229		if (err)
1230			goto err_buff_cleanup;
1231
1232		dev_info(dev, "using %s, %s for DMA transfers\n",
1233				dma_chan_name(tdes_dd->dma_lch_in.chan),
1234				dma_chan_name(tdes_dd->dma_lch_out.chan));
1235	}
1236
1237	spin_lock(&atmel_tdes.lock);
1238	list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
1239	spin_unlock(&atmel_tdes.lock);
1240
1241	err = atmel_tdes_register_algs(tdes_dd);
1242	if (err)
1243		goto err_algs;
1244
1245	dev_info(dev, "Atmel DES/TDES\n");
1246
1247	return 0;
1248
1249err_algs:
1250	spin_lock(&atmel_tdes.lock);
1251	list_del(&tdes_dd->list);
1252	spin_unlock(&atmel_tdes.lock);
1253	if (tdes_dd->caps.has_dma)
1254		atmel_tdes_dma_cleanup(tdes_dd);
1255err_buff_cleanup:
1256	atmel_tdes_buff_cleanup(tdes_dd);
1257err_tasklet_kill:
1258	tasklet_kill(&tdes_dd->done_task);
1259	tasklet_kill(&tdes_dd->queue_task);
1260
1261	return err;
1262}
1263
1264static int atmel_tdes_remove(struct platform_device *pdev)
1265{
1266	struct atmel_tdes_dev *tdes_dd = platform_get_drvdata(pdev);
1267
1268	spin_lock(&atmel_tdes.lock);
1269	list_del(&tdes_dd->list);
1270	spin_unlock(&atmel_tdes.lock);
1271
1272	atmel_tdes_unregister_algs(tdes_dd);
1273
1274	tasklet_kill(&tdes_dd->done_task);
1275	tasklet_kill(&tdes_dd->queue_task);
1276
1277	if (tdes_dd->caps.has_dma)
1278		atmel_tdes_dma_cleanup(tdes_dd);
1279
1280	atmel_tdes_buff_cleanup(tdes_dd);
1281
1282	return 0;
1283}
1284
1285static struct platform_driver atmel_tdes_driver = {
1286	.probe		= atmel_tdes_probe,
1287	.remove		= atmel_tdes_remove,
1288	.driver		= {
1289		.name	= "atmel_tdes",
1290		.of_match_table = of_match_ptr(atmel_tdes_dt_ids),
1291	},
1292};
1293
1294module_platform_driver(atmel_tdes_driver);
1295
1296MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
1297MODULE_LICENSE("GPL v2");
1298MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");