Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for ATMEL DES/TDES HW acceleration.
   6 *
   7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
   8 * Author: Nicolas Royer <nicolas@eukrea.com>
   9 *
  10 * Some ideas are from omap-aes.c drivers.
  11 */
  12
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/err.h>
  18#include <linux/clk.h>
  19#include <linux/io.h>
  20#include <linux/hw_random.h>
  21#include <linux/platform_device.h>
  22
  23#include <linux/device.h>
  24#include <linux/dmaengine.h>
  25#include <linux/init.h>
  26#include <linux/errno.h>
  27#include <linux/interrupt.h>
  28#include <linux/irq.h>
  29#include <linux/scatterlist.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/mod_devicetable.h>
  32#include <linux/delay.h>
  33#include <linux/crypto.h>
  34#include <crypto/scatterwalk.h>
  35#include <crypto/algapi.h>
  36#include <crypto/internal/des.h>
  37#include <crypto/internal/skcipher.h>
  38#include "atmel-tdes-regs.h"
  39
  40#define ATMEL_TDES_PRIORITY	300
  41
  42/* TDES flags  */
  43/* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
  44#define TDES_FLAGS_ENCRYPT	TDES_MR_CYPHER_ENC
  45#define TDES_FLAGS_OPMODE_MASK	(TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
  46#define TDES_FLAGS_ECB		TDES_MR_OPMOD_ECB
  47#define TDES_FLAGS_CBC		TDES_MR_OPMOD_CBC
 
 
 
 
 
  48
  49#define TDES_FLAGS_MODE_MASK	(TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
  50
  51#define TDES_FLAGS_INIT		BIT(3)
  52#define TDES_FLAGS_FAST		BIT(4)
  53#define TDES_FLAGS_BUSY		BIT(5)
  54#define TDES_FLAGS_DMA		BIT(6)
  55
  56#define ATMEL_TDES_QUEUE_LENGTH	50
  57
 
 
 
 
  58struct atmel_tdes_caps {
  59	bool	has_dma;
 
  60};
  61
  62struct atmel_tdes_dev;
  63
  64struct atmel_tdes_ctx {
  65	struct atmel_tdes_dev *dd;
  66
  67	int		keylen;
  68	u32		key[DES3_EDE_KEY_SIZE / sizeof(u32)];
  69	unsigned long	flags;
  70
  71	u16		block_size;
  72};
  73
  74struct atmel_tdes_reqctx {
  75	unsigned long mode;
  76	u8 lastc[DES_BLOCK_SIZE];
  77};
  78
  79struct atmel_tdes_dma {
  80	struct dma_chan			*chan;
  81	struct dma_slave_config dma_conf;
  82};
  83
  84struct atmel_tdes_dev {
  85	struct list_head	list;
  86	unsigned long		phys_base;
  87	void __iomem		*io_base;
  88
  89	struct atmel_tdes_ctx	*ctx;
  90	struct device		*dev;
  91	struct clk			*iclk;
  92	int					irq;
  93
  94	unsigned long		flags;
  95
  96	spinlock_t		lock;
  97	struct crypto_queue	queue;
  98
  99	struct tasklet_struct	done_task;
 100	struct tasklet_struct	queue_task;
 101
 102	struct skcipher_request	*req;
 103	size_t				total;
 104
 105	struct scatterlist	*in_sg;
 106	unsigned int		nb_in_sg;
 107	size_t				in_offset;
 108	struct scatterlist	*out_sg;
 109	unsigned int		nb_out_sg;
 110	size_t				out_offset;
 111
 112	size_t	buflen;
 113	size_t	dma_size;
 114
 115	void	*buf_in;
 116	int		dma_in;
 117	dma_addr_t	dma_addr_in;
 118	struct atmel_tdes_dma	dma_lch_in;
 119
 120	void	*buf_out;
 121	int		dma_out;
 122	dma_addr_t	dma_addr_out;
 123	struct atmel_tdes_dma	dma_lch_out;
 124
 125	struct atmel_tdes_caps	caps;
 126
 127	u32	hw_version;
 128};
 129
 130struct atmel_tdes_drv {
 131	struct list_head	dev_list;
 132	spinlock_t		lock;
 133};
 134
 135static struct atmel_tdes_drv atmel_tdes = {
 136	.dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list),
 137	.lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock),
 138};
 139
 140static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
 141			void *buf, size_t buflen, size_t total, int out)
 142{
 143	size_t count, off = 0;
 144
 145	while (buflen && total) {
 146		count = min((*sg)->length - *offset, total);
 147		count = min(count, buflen);
 148
 149		if (!count)
 150			return off;
 151
 152		scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
 153
 154		off += count;
 155		buflen -= count;
 156		*offset += count;
 157		total -= count;
 158
 159		if (*offset == (*sg)->length) {
 160			*sg = sg_next(*sg);
 161			if (*sg)
 162				*offset = 0;
 163			else
 164				total = 0;
 165		}
 166	}
 167
 168	return off;
 169}
 170
 171static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
 172{
 173	return readl_relaxed(dd->io_base + offset);
 174}
 175
 176static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
 177					u32 offset, u32 value)
 178{
 179	writel_relaxed(value, dd->io_base + offset);
 180}
 181
 182static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
 183			       const u32 *value, int count)
 184{
 185	for (; count--; value++, offset += 4)
 186		atmel_tdes_write(dd, offset, *value);
 187}
 188
 189static struct atmel_tdes_dev *atmel_tdes_dev_alloc(void)
 190{
 191	struct atmel_tdes_dev *tdes_dd;
 
 192
 193	spin_lock_bh(&atmel_tdes.lock);
 194	/* One TDES IP per SoC. */
 195	tdes_dd = list_first_entry_or_null(&atmel_tdes.dev_list,
 196					   struct atmel_tdes_dev, list);
 
 
 
 
 
 
 197	spin_unlock_bh(&atmel_tdes.lock);
 
 198	return tdes_dd;
 199}
 200
 201static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
 202{
 203	int err;
 204
 205	err = clk_prepare_enable(dd->iclk);
 206	if (err)
 207		return err;
 208
 209	if (!(dd->flags & TDES_FLAGS_INIT)) {
 210		atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
 211		dd->flags |= TDES_FLAGS_INIT;
 212	}
 213
 214	return 0;
 215}
 216
 217static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
 218{
 219	return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
 220}
 221
 222static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
 223{
 224	int err;
 225
 226	err = atmel_tdes_hw_init(dd);
 227	if (err)
 228		return err;
 229
 230	dd->hw_version = atmel_tdes_get_version(dd);
 231
 232	dev_info(dd->dev,
 233			"version: 0x%x\n", dd->hw_version);
 234
 235	clk_disable_unprepare(dd->iclk);
 236
 237	return 0;
 238}
 239
 240static void atmel_tdes_dma_callback(void *data)
 241{
 242	struct atmel_tdes_dev *dd = data;
 243
 244	/* dma_lch_out - completed */
 245	tasklet_schedule(&dd->done_task);
 246}
 247
 248static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
 249{
 250	int err;
 251	u32 valmr = TDES_MR_SMOD_PDC;
 252
 253	err = atmel_tdes_hw_init(dd);
 254
 255	if (err)
 256		return err;
 257
 258	if (!dd->caps.has_dma)
 259		atmel_tdes_write(dd, TDES_PTCR,
 260			TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS);
 261
 262	/* MR register must be set before IV registers */
 263	if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
 264		valmr |= TDES_MR_KEYMOD_3KEY;
 265		valmr |= TDES_MR_TDESMOD_TDES;
 266	} else if (dd->ctx->keylen > DES_KEY_SIZE) {
 267		valmr |= TDES_MR_KEYMOD_2KEY;
 268		valmr |= TDES_MR_TDESMOD_TDES;
 269	} else {
 270		valmr |= TDES_MR_TDESMOD_DES;
 271	}
 272
 273	valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
 274
 275	atmel_tdes_write(dd, TDES_MR, valmr);
 276
 277	atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
 278						dd->ctx->keylen >> 2);
 279
 280	if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
 281		atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
 282
 283	return 0;
 284}
 285
 286static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
 287{
 288	int err = 0;
 289	size_t count;
 290
 291	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
 292
 293	if (dd->flags & TDES_FLAGS_FAST) {
 294		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
 295		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 296	} else {
 297		dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
 298					   dd->dma_size, DMA_FROM_DEVICE);
 299
 300		/* copy data */
 301		count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
 302				dd->buf_out, dd->buflen, dd->dma_size, 1);
 303		if (count != dd->dma_size) {
 304			err = -EINVAL;
 305			dev_dbg(dd->dev, "not all data converted: %zu\n", count);
 306		}
 307	}
 308
 309	return err;
 310}
 311
 312static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
 313{
 314	int err = -ENOMEM;
 315
 316	dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
 317	dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
 318	dd->buflen = PAGE_SIZE;
 319	dd->buflen &= ~(DES_BLOCK_SIZE - 1);
 320
 321	if (!dd->buf_in || !dd->buf_out) {
 322		dev_dbg(dd->dev, "unable to alloc pages.\n");
 323		goto err_alloc;
 324	}
 325
 326	/* MAP here */
 327	dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
 328					dd->buflen, DMA_TO_DEVICE);
 329	err = dma_mapping_error(dd->dev, dd->dma_addr_in);
 330	if (err) {
 331		dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
 332		goto err_map_in;
 333	}
 334
 335	dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
 336					dd->buflen, DMA_FROM_DEVICE);
 337	err = dma_mapping_error(dd->dev, dd->dma_addr_out);
 338	if (err) {
 339		dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
 340		goto err_map_out;
 341	}
 342
 343	return 0;
 344
 345err_map_out:
 346	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
 347		DMA_TO_DEVICE);
 348err_map_in:
 349err_alloc:
 350	free_page((unsigned long)dd->buf_out);
 351	free_page((unsigned long)dd->buf_in);
 
 
 352	return err;
 353}
 354
 355static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
 356{
 357	dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
 358			 DMA_FROM_DEVICE);
 359	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
 360		DMA_TO_DEVICE);
 361	free_page((unsigned long)dd->buf_out);
 362	free_page((unsigned long)dd->buf_in);
 363}
 364
 365static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
 366				dma_addr_t dma_addr_in,
 367				dma_addr_t dma_addr_out, int length)
 368{
 
 369	int len32;
 370
 371	dd->dma_size = length;
 372
 373	if (!(dd->flags & TDES_FLAGS_FAST)) {
 374		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
 375					   DMA_TO_DEVICE);
 376	}
 377
 378	len32 = DIV_ROUND_UP(length, sizeof(u32));
 
 
 
 
 
 
 
 
 
 
 
 
 379
 380	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
 381	atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
 382	atmel_tdes_write(dd, TDES_TCR, len32);
 383	atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
 384	atmel_tdes_write(dd, TDES_RCR, len32);
 385
 386	/* Enable Interrupt */
 387	atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
 388
 389	/* Start DMA transfer */
 390	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
 391
 392	return 0;
 393}
 394
 395static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
 396				dma_addr_t dma_addr_in,
 397				dma_addr_t dma_addr_out, int length)
 398{
 
 399	struct scatterlist sg[2];
 400	struct dma_async_tx_descriptor	*in_desc, *out_desc;
 401	enum dma_slave_buswidth addr_width;
 402
 403	dd->dma_size = length;
 404
 405	if (!(dd->flags & TDES_FLAGS_FAST)) {
 406		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
 407					   DMA_TO_DEVICE);
 408	}
 409
 410	addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 
 
 
 
 
 
 
 
 
 
 
 
 411
 412	dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
 413	dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
 414
 415	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
 416	dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
 417
 418	dd->flags |= TDES_FLAGS_DMA;
 419
 420	sg_init_table(&sg[0], 1);
 421	sg_dma_address(&sg[0]) = dma_addr_in;
 422	sg_dma_len(&sg[0]) = length;
 423
 424	sg_init_table(&sg[1], 1);
 425	sg_dma_address(&sg[1]) = dma_addr_out;
 426	sg_dma_len(&sg[1]) = length;
 427
 428	in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
 429				1, DMA_MEM_TO_DEV,
 430				DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
 431	if (!in_desc)
 432		return -EINVAL;
 433
 434	out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
 435				1, DMA_DEV_TO_MEM,
 436				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 437	if (!out_desc)
 438		return -EINVAL;
 439
 440	out_desc->callback = atmel_tdes_dma_callback;
 441	out_desc->callback_param = dd;
 442
 443	dmaengine_submit(out_desc);
 444	dma_async_issue_pending(dd->dma_lch_out.chan);
 445
 446	dmaengine_submit(in_desc);
 447	dma_async_issue_pending(dd->dma_lch_in.chan);
 448
 449	return 0;
 450}
 451
 452static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
 453{
 454	int err, fast = 0, in, out;
 455	size_t count;
 456	dma_addr_t addr_in, addr_out;
 457
 458	if ((!dd->in_offset) && (!dd->out_offset)) {
 459		/* check for alignment */
 460		in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
 461			IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
 462		out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
 463			IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
 464		fast = in && out;
 465
 466		if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
 467			fast = 0;
 468	}
 469
 470
 471	if (fast)  {
 472		count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
 473		count = min_t(size_t, count, sg_dma_len(dd->out_sg));
 474
 475		err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 476		if (!err) {
 477			dev_dbg(dd->dev, "dma_map_sg() error\n");
 478			return -EINVAL;
 479		}
 480
 481		err = dma_map_sg(dd->dev, dd->out_sg, 1,
 482				DMA_FROM_DEVICE);
 483		if (!err) {
 484			dev_dbg(dd->dev, "dma_map_sg() error\n");
 485			dma_unmap_sg(dd->dev, dd->in_sg, 1,
 486				DMA_TO_DEVICE);
 487			return -EINVAL;
 488		}
 489
 490		addr_in = sg_dma_address(dd->in_sg);
 491		addr_out = sg_dma_address(dd->out_sg);
 492
 493		dd->flags |= TDES_FLAGS_FAST;
 494
 495	} else {
 496		/* use cache buffers */
 497		count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
 498				dd->buf_in, dd->buflen, dd->total, 0);
 499
 500		addr_in = dd->dma_addr_in;
 501		addr_out = dd->dma_addr_out;
 502
 503		dd->flags &= ~TDES_FLAGS_FAST;
 504	}
 505
 506	dd->total -= count;
 507
 508	if (dd->caps.has_dma)
 509		err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count);
 510	else
 511		err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count);
 512
 513	if (err && (dd->flags & TDES_FLAGS_FAST)) {
 514		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 515		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
 516	}
 517
 518	return err;
 519}
 520
 521static void
 522atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
 523{
 524	struct skcipher_request *req = dd->req;
 525	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 526	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 527	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 528
 529	if (req->cryptlen < ivsize)
 530		return;
 531
 532	if (rctx->mode & TDES_FLAGS_ENCRYPT)
 533		scatterwalk_map_and_copy(req->iv, req->dst,
 534					 req->cryptlen - ivsize, ivsize, 0);
 535	else
 536		memcpy(req->iv, rctx->lastc, ivsize);
 537
 
 
 
 
 
 538}
 539
 540static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
 541{
 542	struct skcipher_request *req = dd->req;
 543	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 544
 545	clk_disable_unprepare(dd->iclk);
 546
 547	dd->flags &= ~TDES_FLAGS_BUSY;
 548
 549	if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
 550		atmel_tdes_set_iv_as_last_ciphertext_block(dd);
 551
 552	skcipher_request_complete(req, err);
 553}
 554
 555static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
 556			       struct skcipher_request *req)
 557{
 558	struct crypto_async_request *async_req, *backlog;
 559	struct atmel_tdes_ctx *ctx;
 560	struct atmel_tdes_reqctx *rctx;
 561	unsigned long flags;
 562	int err, ret = 0;
 563
 564	spin_lock_irqsave(&dd->lock, flags);
 565	if (req)
 566		ret = crypto_enqueue_request(&dd->queue, &req->base);
 567	if (dd->flags & TDES_FLAGS_BUSY) {
 568		spin_unlock_irqrestore(&dd->lock, flags);
 569		return ret;
 570	}
 571	backlog = crypto_get_backlog(&dd->queue);
 572	async_req = crypto_dequeue_request(&dd->queue);
 573	if (async_req)
 574		dd->flags |= TDES_FLAGS_BUSY;
 575	spin_unlock_irqrestore(&dd->lock, flags);
 576
 577	if (!async_req)
 578		return ret;
 579
 580	if (backlog)
 581		crypto_request_complete(backlog, -EINPROGRESS);
 582
 583	req = skcipher_request_cast(async_req);
 584
 585	/* assign new request to device */
 586	dd->req = req;
 587	dd->total = req->cryptlen;
 588	dd->in_offset = 0;
 589	dd->in_sg = req->src;
 590	dd->out_offset = 0;
 591	dd->out_sg = req->dst;
 592
 593	rctx = skcipher_request_ctx(req);
 594	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 595	rctx->mode &= TDES_FLAGS_MODE_MASK;
 596	dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
 597	dd->ctx = ctx;
 
 598
 599	err = atmel_tdes_write_ctrl(dd);
 600	if (!err)
 601		err = atmel_tdes_crypt_start(dd);
 602	if (err) {
 603		/* des_task will not finish it, so do it here */
 604		atmel_tdes_finish_req(dd, err);
 605		tasklet_schedule(&dd->queue_task);
 606	}
 607
 608	return ret;
 609}
 610
 611static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
 612{
 613	int err = -EINVAL;
 614	size_t count;
 615
 616	if (dd->flags & TDES_FLAGS_DMA) {
 617		err = 0;
 618		if  (dd->flags & TDES_FLAGS_FAST) {
 619			dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
 620			dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 621		} else {
 622			dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
 623				dd->dma_size, DMA_FROM_DEVICE);
 624
 625			/* copy data */
 626			count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
 627				dd->buf_out, dd->buflen, dd->dma_size, 1);
 628			if (count != dd->dma_size) {
 629				err = -EINVAL;
 630				dev_dbg(dd->dev, "not all data converted: %zu\n", count);
 631			}
 632		}
 633	}
 634	return err;
 635}
 636
 637static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
 638{
 639	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 640	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
 641	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 642	struct device *dev = ctx->dd->dev;
 643
 644	if (!req->cryptlen)
 645		return 0;
 
 
 
 
 
 
 646
 647	if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
 648		dev_dbg(dev, "request size is not exact amount of DES blocks\n");
 649		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 650	}
 651	ctx->block_size = DES_BLOCK_SIZE;
 652
 653	rctx->mode = mode;
 654
 655	if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
 656	    !(mode & TDES_FLAGS_ENCRYPT)) {
 657		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 658
 659		if (req->cryptlen >= ivsize)
 660			scatterwalk_map_and_copy(rctx->lastc, req->src,
 661						 req->cryptlen - ivsize,
 662						 ivsize, 0);
 663	}
 664
 665	return atmel_tdes_handle_queue(ctx->dd, req);
 666}
 667
 668static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
 669{
 670	int ret;
 671
 672	/* Try to grab 2 DMA channels */
 673	dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
 674	if (IS_ERR(dd->dma_lch_in.chan)) {
 675		ret = PTR_ERR(dd->dma_lch_in.chan);
 676		goto err_dma_in;
 677	}
 678
 679	dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
 680		TDES_IDATA1R;
 681	dd->dma_lch_in.dma_conf.src_maxburst = 1;
 682	dd->dma_lch_in.dma_conf.src_addr_width =
 683		DMA_SLAVE_BUSWIDTH_4_BYTES;
 684	dd->dma_lch_in.dma_conf.dst_maxburst = 1;
 685	dd->dma_lch_in.dma_conf.dst_addr_width =
 686		DMA_SLAVE_BUSWIDTH_4_BYTES;
 687	dd->dma_lch_in.dma_conf.device_fc = false;
 688
 689	dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx");
 690	if (IS_ERR(dd->dma_lch_out.chan)) {
 691		ret = PTR_ERR(dd->dma_lch_out.chan);
 692		goto err_dma_out;
 693	}
 694
 695	dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
 696		TDES_ODATA1R;
 697	dd->dma_lch_out.dma_conf.src_maxburst = 1;
 698	dd->dma_lch_out.dma_conf.src_addr_width =
 699		DMA_SLAVE_BUSWIDTH_4_BYTES;
 700	dd->dma_lch_out.dma_conf.dst_maxburst = 1;
 701	dd->dma_lch_out.dma_conf.dst_addr_width =
 702		DMA_SLAVE_BUSWIDTH_4_BYTES;
 703	dd->dma_lch_out.dma_conf.device_fc = false;
 704
 705	return 0;
 706
 707err_dma_out:
 708	dma_release_channel(dd->dma_lch_in.chan);
 709err_dma_in:
 710	dev_err(dd->dev, "no DMA channel available\n");
 711	return ret;
 712}
 713
 714static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
 715{
 716	dma_release_channel(dd->dma_lch_in.chan);
 717	dma_release_channel(dd->dma_lch_out.chan);
 718}
 719
 720static int atmel_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
 721			   unsigned int keylen)
 722{
 723	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 724	int err;
 725
 726	err = verify_skcipher_des_key(tfm, key);
 727	if (err)
 728		return err;
 729
 730	memcpy(ctx->key, key, keylen);
 731	ctx->keylen = keylen;
 732
 733	return 0;
 734}
 735
 736static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 737			   unsigned int keylen)
 738{
 739	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 740	int err;
 741
 742	err = verify_skcipher_des3_key(tfm, key);
 743	if (err)
 744		return err;
 745
 746	memcpy(ctx->key, key, keylen);
 747	ctx->keylen = keylen;
 748
 749	return 0;
 750}
 751
 752static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
 753{
 754	return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT);
 755}
 756
 757static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
 758{
 759	return atmel_tdes_crypt(req, TDES_FLAGS_ECB);
 760}
 761
 762static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
 763{
 764	return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT);
 765}
 766
 767static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
 768{
 769	return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
 770}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 771
 772static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
 773{
 774	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 775
 776	ctx->dd = atmel_tdes_dev_alloc();
 777	if (!ctx->dd)
 778		return -ENODEV;
 779
 780	crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
 781
 
 
 
 
 782	return 0;
 783}
 784
 785static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
 786{
 787	alg->base.cra_priority = ATMEL_TDES_PRIORITY;
 788	alg->base.cra_flags = CRYPTO_ALG_ASYNC;
 789	alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx);
 790	alg->base.cra_module = THIS_MODULE;
 791
 792	alg->init = atmel_tdes_init_tfm;
 793}
 794
 795static struct skcipher_alg tdes_algs[] = {
 796{
 797	.base.cra_name		= "ecb(des)",
 798	.base.cra_driver_name	= "atmel-ecb-des",
 799	.base.cra_blocksize	= DES_BLOCK_SIZE,
 800	.base.cra_alignmask	= 0x7,
 801
 802	.min_keysize		= DES_KEY_SIZE,
 803	.max_keysize		= DES_KEY_SIZE,
 804	.setkey			= atmel_des_setkey,
 805	.encrypt		= atmel_tdes_ecb_encrypt,
 806	.decrypt		= atmel_tdes_ecb_decrypt,
 807},
 808{
 809	.base.cra_name		= "cbc(des)",
 810	.base.cra_driver_name	= "atmel-cbc-des",
 811	.base.cra_blocksize	= DES_BLOCK_SIZE,
 812	.base.cra_alignmask	= 0x7,
 813
 814	.min_keysize		= DES_KEY_SIZE,
 815	.max_keysize		= DES_KEY_SIZE,
 816	.ivsize			= DES_BLOCK_SIZE,
 817	.setkey			= atmel_des_setkey,
 818	.encrypt		= atmel_tdes_cbc_encrypt,
 819	.decrypt		= atmel_tdes_cbc_decrypt,
 820},
 821{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 822	.base.cra_name		= "ecb(des3_ede)",
 823	.base.cra_driver_name	= "atmel-ecb-tdes",
 824	.base.cra_blocksize	= DES_BLOCK_SIZE,
 825	.base.cra_alignmask	= 0x7,
 826
 827	.min_keysize		= DES3_EDE_KEY_SIZE,
 828	.max_keysize		= DES3_EDE_KEY_SIZE,
 829	.setkey			= atmel_tdes_setkey,
 830	.encrypt		= atmel_tdes_ecb_encrypt,
 831	.decrypt		= atmel_tdes_ecb_decrypt,
 832},
 833{
 834	.base.cra_name		= "cbc(des3_ede)",
 835	.base.cra_driver_name	= "atmel-cbc-tdes",
 836	.base.cra_blocksize	= DES_BLOCK_SIZE,
 837	.base.cra_alignmask	= 0x7,
 838
 839	.min_keysize		= DES3_EDE_KEY_SIZE,
 840	.max_keysize		= DES3_EDE_KEY_SIZE,
 841	.setkey			= atmel_tdes_setkey,
 842	.encrypt		= atmel_tdes_cbc_encrypt,
 843	.decrypt		= atmel_tdes_cbc_decrypt,
 844	.ivsize			= DES_BLOCK_SIZE,
 845},
 
 
 
 
 
 
 
 
 
 
 
 
 
 846};
 847
 848static void atmel_tdes_queue_task(unsigned long data)
 849{
 850	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
 851
 852	atmel_tdes_handle_queue(dd, NULL);
 853}
 854
 855static void atmel_tdes_done_task(unsigned long data)
 856{
 857	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
 858	int err;
 859
 860	if (!(dd->flags & TDES_FLAGS_DMA))
 861		err = atmel_tdes_crypt_pdc_stop(dd);
 862	else
 863		err = atmel_tdes_crypt_dma_stop(dd);
 864
 865	if (dd->total && !err) {
 866		if (dd->flags & TDES_FLAGS_FAST) {
 867			dd->in_sg = sg_next(dd->in_sg);
 868			dd->out_sg = sg_next(dd->out_sg);
 869			if (!dd->in_sg || !dd->out_sg)
 870				err = -EINVAL;
 871		}
 872		if (!err)
 873			err = atmel_tdes_crypt_start(dd);
 874		if (!err)
 875			return; /* DMA started. Not fininishing. */
 876	}
 877
 878	atmel_tdes_finish_req(dd, err);
 879	atmel_tdes_handle_queue(dd, NULL);
 880}
 881
 882static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
 883{
 884	struct atmel_tdes_dev *tdes_dd = dev_id;
 885	u32 reg;
 886
 887	reg = atmel_tdes_read(tdes_dd, TDES_ISR);
 888	if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) {
 889		atmel_tdes_write(tdes_dd, TDES_IDR, reg);
 890		if (TDES_FLAGS_BUSY & tdes_dd->flags)
 891			tasklet_schedule(&tdes_dd->done_task);
 892		else
 893			dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n");
 894		return IRQ_HANDLED;
 895	}
 896
 897	return IRQ_NONE;
 898}
 899
 900static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
 901{
 902	int i;
 903
 904	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
 905		crypto_unregister_skcipher(&tdes_algs[i]);
 906}
 907
 908static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
 909{
 910	int err, i, j;
 911
 912	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
 913		atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
 914
 915		err = crypto_register_skcipher(&tdes_algs[i]);
 916		if (err)
 917			goto err_tdes_algs;
 918	}
 919
 920	return 0;
 921
 922err_tdes_algs:
 923	for (j = 0; j < i; j++)
 924		crypto_unregister_skcipher(&tdes_algs[j]);
 925
 926	return err;
 927}
 928
 929static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
 930{
 931
 932	dd->caps.has_dma = 0;
 
 933
 934	/* keep only major version number */
 935	switch (dd->hw_version & 0xf00) {
 936	case 0x800:
 937	case 0x700:
 938		dd->caps.has_dma = 1;
 
 939		break;
 940	case 0x600:
 941		break;
 942	default:
 943		dev_warn(dd->dev,
 944				"Unmanaged tdes version, set minimum capabilities\n");
 945		break;
 946	}
 947}
 948
 
 949static const struct of_device_id atmel_tdes_dt_ids[] = {
 950	{ .compatible = "atmel,at91sam9g46-tdes" },
 951	{ /* sentinel */ }
 952};
 953MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
 
 954
 955static int atmel_tdes_probe(struct platform_device *pdev)
 956{
 957	struct atmel_tdes_dev *tdes_dd;
 958	struct device *dev = &pdev->dev;
 959	struct resource *tdes_res;
 960	int err;
 961
 962	tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
 963	if (!tdes_dd)
 964		return -ENOMEM;
 965
 966	tdes_dd->dev = dev;
 967
 968	platform_set_drvdata(pdev, tdes_dd);
 969
 970	INIT_LIST_HEAD(&tdes_dd->list);
 971	spin_lock_init(&tdes_dd->lock);
 972
 973	tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
 974					(unsigned long)tdes_dd);
 975	tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task,
 976					(unsigned long)tdes_dd);
 977
 978	crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
 979
 980	tdes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &tdes_res);
 981	if (IS_ERR(tdes_dd->io_base)) {
 982		err = PTR_ERR(tdes_dd->io_base);
 
 
 983		goto err_tasklet_kill;
 984	}
 985	tdes_dd->phys_base = tdes_res->start;
 986
 987	/* Get the IRQ */
 988	tdes_dd->irq = platform_get_irq(pdev,  0);
 989	if (tdes_dd->irq < 0) {
 990		err = tdes_dd->irq;
 991		goto err_tasklet_kill;
 992	}
 993
 994	err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
 995			       IRQF_SHARED, "atmel-tdes", tdes_dd);
 996	if (err) {
 997		dev_err(dev, "unable to request tdes irq.\n");
 998		goto err_tasklet_kill;
 999	}
1000
1001	/* Initializing the clock */
1002	tdes_dd->iclk = devm_clk_get(&pdev->dev, "tdes_clk");
1003	if (IS_ERR(tdes_dd->iclk)) {
1004		dev_err(dev, "clock initialization failed.\n");
1005		err = PTR_ERR(tdes_dd->iclk);
1006		goto err_tasklet_kill;
1007	}
1008
 
 
 
 
 
 
 
1009	err = atmel_tdes_hw_version_init(tdes_dd);
1010	if (err)
1011		goto err_tasklet_kill;
1012
1013	atmel_tdes_get_cap(tdes_dd);
1014
1015	err = atmel_tdes_buff_init(tdes_dd);
1016	if (err)
1017		goto err_tasklet_kill;
1018
1019	if (tdes_dd->caps.has_dma) {
1020		err = atmel_tdes_dma_init(tdes_dd);
1021		if (err)
1022			goto err_buff_cleanup;
1023
1024		dev_info(dev, "using %s, %s for DMA transfers\n",
1025				dma_chan_name(tdes_dd->dma_lch_in.chan),
1026				dma_chan_name(tdes_dd->dma_lch_out.chan));
1027	}
1028
1029	spin_lock(&atmel_tdes.lock);
1030	list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
1031	spin_unlock(&atmel_tdes.lock);
1032
1033	err = atmel_tdes_register_algs(tdes_dd);
1034	if (err)
1035		goto err_algs;
1036
1037	dev_info(dev, "Atmel DES/TDES\n");
1038
1039	return 0;
1040
1041err_algs:
1042	spin_lock(&atmel_tdes.lock);
1043	list_del(&tdes_dd->list);
1044	spin_unlock(&atmel_tdes.lock);
1045	if (tdes_dd->caps.has_dma)
1046		atmel_tdes_dma_cleanup(tdes_dd);
1047err_buff_cleanup:
1048	atmel_tdes_buff_cleanup(tdes_dd);
1049err_tasklet_kill:
1050	tasklet_kill(&tdes_dd->done_task);
1051	tasklet_kill(&tdes_dd->queue_task);
1052
1053	return err;
1054}
1055
1056static void atmel_tdes_remove(struct platform_device *pdev)
1057{
1058	struct atmel_tdes_dev *tdes_dd = platform_get_drvdata(pdev);
1059
 
 
 
1060	spin_lock(&atmel_tdes.lock);
1061	list_del(&tdes_dd->list);
1062	spin_unlock(&atmel_tdes.lock);
1063
1064	atmel_tdes_unregister_algs(tdes_dd);
1065
1066	tasklet_kill(&tdes_dd->done_task);
1067	tasklet_kill(&tdes_dd->queue_task);
1068
1069	if (tdes_dd->caps.has_dma)
1070		atmel_tdes_dma_cleanup(tdes_dd);
1071
1072	atmel_tdes_buff_cleanup(tdes_dd);
 
 
1073}
1074
1075static struct platform_driver atmel_tdes_driver = {
1076	.probe		= atmel_tdes_probe,
1077	.remove_new	= atmel_tdes_remove,
1078	.driver		= {
1079		.name	= "atmel_tdes",
1080		.of_match_table = atmel_tdes_dt_ids,
1081	},
1082};
1083
1084module_platform_driver(atmel_tdes_driver);
1085
1086MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
1087MODULE_LICENSE("GPL v2");
1088MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for ATMEL DES/TDES HW acceleration.
   6 *
   7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
   8 * Author: Nicolas Royer <nicolas@eukrea.com>
   9 *
  10 * Some ideas are from omap-aes.c drivers.
  11 */
  12
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/err.h>
  18#include <linux/clk.h>
  19#include <linux/io.h>
  20#include <linux/hw_random.h>
  21#include <linux/platform_device.h>
  22
  23#include <linux/device.h>
  24#include <linux/dmaengine.h>
  25#include <linux/init.h>
  26#include <linux/errno.h>
  27#include <linux/interrupt.h>
  28#include <linux/irq.h>
  29#include <linux/scatterlist.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/of_device.h>
  32#include <linux/delay.h>
  33#include <linux/crypto.h>
  34#include <crypto/scatterwalk.h>
  35#include <crypto/algapi.h>
  36#include <crypto/internal/des.h>
  37#include <crypto/internal/skcipher.h>
  38#include "atmel-tdes-regs.h"
  39
  40#define ATMEL_TDES_PRIORITY	300
  41
  42/* TDES flags  */
  43/* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
  44#define TDES_FLAGS_ENCRYPT	TDES_MR_CYPHER_ENC
  45#define TDES_FLAGS_OPMODE_MASK	(TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
  46#define TDES_FLAGS_ECB		TDES_MR_OPMOD_ECB
  47#define TDES_FLAGS_CBC		TDES_MR_OPMOD_CBC
  48#define TDES_FLAGS_OFB		TDES_MR_OPMOD_OFB
  49#define TDES_FLAGS_CFB64	(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
  50#define TDES_FLAGS_CFB32	(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
  51#define TDES_FLAGS_CFB16	(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
  52#define TDES_FLAGS_CFB8		(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
  53
  54#define TDES_FLAGS_MODE_MASK	(TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
  55
  56#define TDES_FLAGS_INIT		BIT(3)
  57#define TDES_FLAGS_FAST		BIT(4)
  58#define TDES_FLAGS_BUSY		BIT(5)
  59#define TDES_FLAGS_DMA		BIT(6)
  60
  61#define ATMEL_TDES_QUEUE_LENGTH	50
  62
  63#define CFB8_BLOCK_SIZE		1
  64#define CFB16_BLOCK_SIZE	2
  65#define CFB32_BLOCK_SIZE	4
  66
  67struct atmel_tdes_caps {
  68	bool	has_dma;
  69	u32		has_cfb_3keys;
  70};
  71
  72struct atmel_tdes_dev;
  73
  74struct atmel_tdes_ctx {
  75	struct atmel_tdes_dev *dd;
  76
  77	int		keylen;
  78	u32		key[DES3_EDE_KEY_SIZE / sizeof(u32)];
  79	unsigned long	flags;
  80
  81	u16		block_size;
  82};
  83
  84struct atmel_tdes_reqctx {
  85	unsigned long mode;
  86	u8 lastc[DES_BLOCK_SIZE];
  87};
  88
  89struct atmel_tdes_dma {
  90	struct dma_chan			*chan;
  91	struct dma_slave_config dma_conf;
  92};
  93
  94struct atmel_tdes_dev {
  95	struct list_head	list;
  96	unsigned long		phys_base;
  97	void __iomem		*io_base;
  98
  99	struct atmel_tdes_ctx	*ctx;
 100	struct device		*dev;
 101	struct clk			*iclk;
 102	int					irq;
 103
 104	unsigned long		flags;
 105
 106	spinlock_t		lock;
 107	struct crypto_queue	queue;
 108
 109	struct tasklet_struct	done_task;
 110	struct tasklet_struct	queue_task;
 111
 112	struct skcipher_request	*req;
 113	size_t				total;
 114
 115	struct scatterlist	*in_sg;
 116	unsigned int		nb_in_sg;
 117	size_t				in_offset;
 118	struct scatterlist	*out_sg;
 119	unsigned int		nb_out_sg;
 120	size_t				out_offset;
 121
 122	size_t	buflen;
 123	size_t	dma_size;
 124
 125	void	*buf_in;
 126	int		dma_in;
 127	dma_addr_t	dma_addr_in;
 128	struct atmel_tdes_dma	dma_lch_in;
 129
 130	void	*buf_out;
 131	int		dma_out;
 132	dma_addr_t	dma_addr_out;
 133	struct atmel_tdes_dma	dma_lch_out;
 134
 135	struct atmel_tdes_caps	caps;
 136
 137	u32	hw_version;
 138};
 139
 140struct atmel_tdes_drv {
 141	struct list_head	dev_list;
 142	spinlock_t		lock;
 143};
 144
 145static struct atmel_tdes_drv atmel_tdes = {
 146	.dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list),
 147	.lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock),
 148};
 149
 150static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
 151			void *buf, size_t buflen, size_t total, int out)
 152{
 153	size_t count, off = 0;
 154
 155	while (buflen && total) {
 156		count = min((*sg)->length - *offset, total);
 157		count = min(count, buflen);
 158
 159		if (!count)
 160			return off;
 161
 162		scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
 163
 164		off += count;
 165		buflen -= count;
 166		*offset += count;
 167		total -= count;
 168
 169		if (*offset == (*sg)->length) {
 170			*sg = sg_next(*sg);
 171			if (*sg)
 172				*offset = 0;
 173			else
 174				total = 0;
 175		}
 176	}
 177
 178	return off;
 179}
 180
 181static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
 182{
 183	return readl_relaxed(dd->io_base + offset);
 184}
 185
 186static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
 187					u32 offset, u32 value)
 188{
 189	writel_relaxed(value, dd->io_base + offset);
 190}
 191
 192static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
 193			       const u32 *value, int count)
 194{
 195	for (; count--; value++, offset += 4)
 196		atmel_tdes_write(dd, offset, *value);
 197}
 198
 199static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx)
 200{
 201	struct atmel_tdes_dev *tdes_dd = NULL;
 202	struct atmel_tdes_dev *tmp;
 203
 204	spin_lock_bh(&atmel_tdes.lock);
 205	if (!ctx->dd) {
 206		list_for_each_entry(tmp, &atmel_tdes.dev_list, list) {
 207			tdes_dd = tmp;
 208			break;
 209		}
 210		ctx->dd = tdes_dd;
 211	} else {
 212		tdes_dd = ctx->dd;
 213	}
 214	spin_unlock_bh(&atmel_tdes.lock);
 215
 216	return tdes_dd;
 217}
 218
 219static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
 220{
 221	int err;
 222
 223	err = clk_prepare_enable(dd->iclk);
 224	if (err)
 225		return err;
 226
 227	if (!(dd->flags & TDES_FLAGS_INIT)) {
 228		atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
 229		dd->flags |= TDES_FLAGS_INIT;
 230	}
 231
 232	return 0;
 233}
 234
 235static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
 236{
 237	return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
 238}
 239
 240static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
 241{
 242	int err;
 243
 244	err = atmel_tdes_hw_init(dd);
 245	if (err)
 246		return err;
 247
 248	dd->hw_version = atmel_tdes_get_version(dd);
 249
 250	dev_info(dd->dev,
 251			"version: 0x%x\n", dd->hw_version);
 252
 253	clk_disable_unprepare(dd->iclk);
 254
 255	return 0;
 256}
 257
 258static void atmel_tdes_dma_callback(void *data)
 259{
 260	struct atmel_tdes_dev *dd = data;
 261
 262	/* dma_lch_out - completed */
 263	tasklet_schedule(&dd->done_task);
 264}
 265
 266static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
 267{
 268	int err;
 269	u32 valmr = TDES_MR_SMOD_PDC;
 270
 271	err = atmel_tdes_hw_init(dd);
 272
 273	if (err)
 274		return err;
 275
 276	if (!dd->caps.has_dma)
 277		atmel_tdes_write(dd, TDES_PTCR,
 278			TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS);
 279
 280	/* MR register must be set before IV registers */
 281	if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
 282		valmr |= TDES_MR_KEYMOD_3KEY;
 283		valmr |= TDES_MR_TDESMOD_TDES;
 284	} else if (dd->ctx->keylen > DES_KEY_SIZE) {
 285		valmr |= TDES_MR_KEYMOD_2KEY;
 286		valmr |= TDES_MR_TDESMOD_TDES;
 287	} else {
 288		valmr |= TDES_MR_TDESMOD_DES;
 289	}
 290
 291	valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
 292
 293	atmel_tdes_write(dd, TDES_MR, valmr);
 294
 295	atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
 296						dd->ctx->keylen >> 2);
 297
 298	if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
 299		atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
 300
 301	return 0;
 302}
 303
 304static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
 305{
 306	int err = 0;
 307	size_t count;
 308
 309	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
 310
 311	if (dd->flags & TDES_FLAGS_FAST) {
 312		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
 313		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 314	} else {
 315		dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
 316					   dd->dma_size, DMA_FROM_DEVICE);
 317
 318		/* copy data */
 319		count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
 320				dd->buf_out, dd->buflen, dd->dma_size, 1);
 321		if (count != dd->dma_size) {
 322			err = -EINVAL;
 323			pr_err("not all data converted: %zu\n", count);
 324		}
 325	}
 326
 327	return err;
 328}
 329
 330static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
 331{
 332	int err = -ENOMEM;
 333
 334	dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
 335	dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
 336	dd->buflen = PAGE_SIZE;
 337	dd->buflen &= ~(DES_BLOCK_SIZE - 1);
 338
 339	if (!dd->buf_in || !dd->buf_out) {
 340		dev_err(dd->dev, "unable to alloc pages.\n");
 341		goto err_alloc;
 342	}
 343
 344	/* MAP here */
 345	dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
 346					dd->buflen, DMA_TO_DEVICE);
 347	if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
 348		dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
 349		err = -EINVAL;
 350		goto err_map_in;
 351	}
 352
 353	dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
 354					dd->buflen, DMA_FROM_DEVICE);
 355	if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
 356		dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
 357		err = -EINVAL;
 358		goto err_map_out;
 359	}
 360
 361	return 0;
 362
 363err_map_out:
 364	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
 365		DMA_TO_DEVICE);
 366err_map_in:
 367err_alloc:
 368	free_page((unsigned long)dd->buf_out);
 369	free_page((unsigned long)dd->buf_in);
 370	if (err)
 371		pr_err("error: %d\n", err);
 372	return err;
 373}
 374
 375static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
 376{
 377	dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
 378			 DMA_FROM_DEVICE);
 379	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
 380		DMA_TO_DEVICE);
 381	free_page((unsigned long)dd->buf_out);
 382	free_page((unsigned long)dd->buf_in);
 383}
 384
 385static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
 386				dma_addr_t dma_addr_in,
 387				dma_addr_t dma_addr_out, int length)
 388{
 389	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
 390	int len32;
 391
 392	dd->dma_size = length;
 393
 394	if (!(dd->flags & TDES_FLAGS_FAST)) {
 395		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
 396					   DMA_TO_DEVICE);
 397	}
 398
 399	switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
 400	case TDES_FLAGS_CFB8:
 401		len32 = DIV_ROUND_UP(length, sizeof(u8));
 402		break;
 403
 404	case TDES_FLAGS_CFB16:
 405		len32 = DIV_ROUND_UP(length, sizeof(u16));
 406		break;
 407
 408	default:
 409		len32 = DIV_ROUND_UP(length, sizeof(u32));
 410		break;
 411	}
 412
 413	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
 414	atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
 415	atmel_tdes_write(dd, TDES_TCR, len32);
 416	atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
 417	atmel_tdes_write(dd, TDES_RCR, len32);
 418
 419	/* Enable Interrupt */
 420	atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
 421
 422	/* Start DMA transfer */
 423	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
 424
 425	return 0;
 426}
 427
 428static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
 429				dma_addr_t dma_addr_in,
 430				dma_addr_t dma_addr_out, int length)
 431{
 432	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
 433	struct scatterlist sg[2];
 434	struct dma_async_tx_descriptor	*in_desc, *out_desc;
 435	enum dma_slave_buswidth addr_width;
 436
 437	dd->dma_size = length;
 438
 439	if (!(dd->flags & TDES_FLAGS_FAST)) {
 440		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
 441					   DMA_TO_DEVICE);
 442	}
 443
 444	switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
 445	case TDES_FLAGS_CFB8:
 446		addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 447		break;
 448
 449	case TDES_FLAGS_CFB16:
 450		addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 451		break;
 452
 453	default:
 454		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 455		break;
 456	}
 457
 458	dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
 459	dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
 460
 461	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
 462	dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
 463
 464	dd->flags |= TDES_FLAGS_DMA;
 465
 466	sg_init_table(&sg[0], 1);
 467	sg_dma_address(&sg[0]) = dma_addr_in;
 468	sg_dma_len(&sg[0]) = length;
 469
 470	sg_init_table(&sg[1], 1);
 471	sg_dma_address(&sg[1]) = dma_addr_out;
 472	sg_dma_len(&sg[1]) = length;
 473
 474	in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
 475				1, DMA_MEM_TO_DEV,
 476				DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
 477	if (!in_desc)
 478		return -EINVAL;
 479
 480	out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
 481				1, DMA_DEV_TO_MEM,
 482				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 483	if (!out_desc)
 484		return -EINVAL;
 485
 486	out_desc->callback = atmel_tdes_dma_callback;
 487	out_desc->callback_param = dd;
 488
 489	dmaengine_submit(out_desc);
 490	dma_async_issue_pending(dd->dma_lch_out.chan);
 491
 492	dmaengine_submit(in_desc);
 493	dma_async_issue_pending(dd->dma_lch_in.chan);
 494
 495	return 0;
 496}
 497
 498static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
 499{
 500	int err, fast = 0, in, out;
 501	size_t count;
 502	dma_addr_t addr_in, addr_out;
 503
 504	if ((!dd->in_offset) && (!dd->out_offset)) {
 505		/* check for alignment */
 506		in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
 507			IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
 508		out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
 509			IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
 510		fast = in && out;
 511
 512		if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
 513			fast = 0;
 514	}
 515
 516
 517	if (fast)  {
 518		count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
 519		count = min_t(size_t, count, sg_dma_len(dd->out_sg));
 520
 521		err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 522		if (!err) {
 523			dev_err(dd->dev, "dma_map_sg() error\n");
 524			return -EINVAL;
 525		}
 526
 527		err = dma_map_sg(dd->dev, dd->out_sg, 1,
 528				DMA_FROM_DEVICE);
 529		if (!err) {
 530			dev_err(dd->dev, "dma_map_sg() error\n");
 531			dma_unmap_sg(dd->dev, dd->in_sg, 1,
 532				DMA_TO_DEVICE);
 533			return -EINVAL;
 534		}
 535
 536		addr_in = sg_dma_address(dd->in_sg);
 537		addr_out = sg_dma_address(dd->out_sg);
 538
 539		dd->flags |= TDES_FLAGS_FAST;
 540
 541	} else {
 542		/* use cache buffers */
 543		count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
 544				dd->buf_in, dd->buflen, dd->total, 0);
 545
 546		addr_in = dd->dma_addr_in;
 547		addr_out = dd->dma_addr_out;
 548
 549		dd->flags &= ~TDES_FLAGS_FAST;
 550	}
 551
 552	dd->total -= count;
 553
 554	if (dd->caps.has_dma)
 555		err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count);
 556	else
 557		err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count);
 558
 559	if (err && (dd->flags & TDES_FLAGS_FAST)) {
 560		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 561		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
 562	}
 563
 564	return err;
 565}
 566
 567static void
 568atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
 569{
 570	struct skcipher_request *req = dd->req;
 571	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 572	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 573	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 574
 575	if (req->cryptlen < ivsize)
 576		return;
 577
 578	if (rctx->mode & TDES_FLAGS_ENCRYPT) {
 579		scatterwalk_map_and_copy(req->iv, req->dst,
 580					 req->cryptlen - ivsize, ivsize, 0);
 581	} else {
 582		if (req->src == req->dst)
 583			memcpy(req->iv, rctx->lastc, ivsize);
 584		else
 585			scatterwalk_map_and_copy(req->iv, req->src,
 586						 req->cryptlen - ivsize,
 587						 ivsize, 0);
 588	}
 589}
 590
 591static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
 592{
 593	struct skcipher_request *req = dd->req;
 594	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 595
 596	clk_disable_unprepare(dd->iclk);
 597
 598	dd->flags &= ~TDES_FLAGS_BUSY;
 599
 600	if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
 601		atmel_tdes_set_iv_as_last_ciphertext_block(dd);
 602
 603	req->base.complete(&req->base, err);
 604}
 605
 606static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
 607			       struct skcipher_request *req)
 608{
 609	struct crypto_async_request *async_req, *backlog;
 610	struct atmel_tdes_ctx *ctx;
 611	struct atmel_tdes_reqctx *rctx;
 612	unsigned long flags;
 613	int err, ret = 0;
 614
 615	spin_lock_irqsave(&dd->lock, flags);
 616	if (req)
 617		ret = crypto_enqueue_request(&dd->queue, &req->base);
 618	if (dd->flags & TDES_FLAGS_BUSY) {
 619		spin_unlock_irqrestore(&dd->lock, flags);
 620		return ret;
 621	}
 622	backlog = crypto_get_backlog(&dd->queue);
 623	async_req = crypto_dequeue_request(&dd->queue);
 624	if (async_req)
 625		dd->flags |= TDES_FLAGS_BUSY;
 626	spin_unlock_irqrestore(&dd->lock, flags);
 627
 628	if (!async_req)
 629		return ret;
 630
 631	if (backlog)
 632		backlog->complete(backlog, -EINPROGRESS);
 633
 634	req = skcipher_request_cast(async_req);
 635
 636	/* assign new request to device */
 637	dd->req = req;
 638	dd->total = req->cryptlen;
 639	dd->in_offset = 0;
 640	dd->in_sg = req->src;
 641	dd->out_offset = 0;
 642	dd->out_sg = req->dst;
 643
 644	rctx = skcipher_request_ctx(req);
 645	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 646	rctx->mode &= TDES_FLAGS_MODE_MASK;
 647	dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
 648	dd->ctx = ctx;
 649	ctx->dd = dd;
 650
 651	err = atmel_tdes_write_ctrl(dd);
 652	if (!err)
 653		err = atmel_tdes_crypt_start(dd);
 654	if (err) {
 655		/* des_task will not finish it, so do it here */
 656		atmel_tdes_finish_req(dd, err);
 657		tasklet_schedule(&dd->queue_task);
 658	}
 659
 660	return ret;
 661}
 662
 663static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
 664{
 665	int err = -EINVAL;
 666	size_t count;
 667
 668	if (dd->flags & TDES_FLAGS_DMA) {
 669		err = 0;
 670		if  (dd->flags & TDES_FLAGS_FAST) {
 671			dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
 672			dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 673		} else {
 674			dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
 675				dd->dma_size, DMA_FROM_DEVICE);
 676
 677			/* copy data */
 678			count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
 679				dd->buf_out, dd->buflen, dd->dma_size, 1);
 680			if (count != dd->dma_size) {
 681				err = -EINVAL;
 682				pr_err("not all data converted: %zu\n", count);
 683			}
 684		}
 685	}
 686	return err;
 687}
 688
 689static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
 690{
 691	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 692	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
 693	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
 
 694
 695	switch (mode & TDES_FLAGS_OPMODE_MASK) {
 696	case TDES_FLAGS_CFB8:
 697		if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
 698			pr_err("request size is not exact amount of CFB8 blocks\n");
 699			return -EINVAL;
 700		}
 701		ctx->block_size = CFB8_BLOCK_SIZE;
 702		break;
 703
 704	case TDES_FLAGS_CFB16:
 705		if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
 706			pr_err("request size is not exact amount of CFB16 blocks\n");
 707			return -EINVAL;
 708		}
 709		ctx->block_size = CFB16_BLOCK_SIZE;
 710		break;
 711
 712	case TDES_FLAGS_CFB32:
 713		if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
 714			pr_err("request size is not exact amount of CFB32 blocks\n");
 715			return -EINVAL;
 716		}
 717		ctx->block_size = CFB32_BLOCK_SIZE;
 718		break;
 719
 720	default:
 721		if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
 722			pr_err("request size is not exact amount of DES blocks\n");
 723			return -EINVAL;
 724		}
 725		ctx->block_size = DES_BLOCK_SIZE;
 726		break;
 727	}
 
 728
 729	rctx->mode = mode;
 730
 731	if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
 732	    !(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
 733		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 734
 735		if (req->cryptlen >= ivsize)
 736			scatterwalk_map_and_copy(rctx->lastc, req->src,
 737						 req->cryptlen - ivsize,
 738						 ivsize, 0);
 739	}
 740
 741	return atmel_tdes_handle_queue(ctx->dd, req);
 742}
 743
 744static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
 745{
 746	int ret;
 747
 748	/* Try to grab 2 DMA channels */
 749	dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
 750	if (IS_ERR(dd->dma_lch_in.chan)) {
 751		ret = PTR_ERR(dd->dma_lch_in.chan);
 752		goto err_dma_in;
 753	}
 754
 755	dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
 756		TDES_IDATA1R;
 757	dd->dma_lch_in.dma_conf.src_maxburst = 1;
 758	dd->dma_lch_in.dma_conf.src_addr_width =
 759		DMA_SLAVE_BUSWIDTH_4_BYTES;
 760	dd->dma_lch_in.dma_conf.dst_maxburst = 1;
 761	dd->dma_lch_in.dma_conf.dst_addr_width =
 762		DMA_SLAVE_BUSWIDTH_4_BYTES;
 763	dd->dma_lch_in.dma_conf.device_fc = false;
 764
 765	dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx");
 766	if (IS_ERR(dd->dma_lch_out.chan)) {
 767		ret = PTR_ERR(dd->dma_lch_out.chan);
 768		goto err_dma_out;
 769	}
 770
 771	dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
 772		TDES_ODATA1R;
 773	dd->dma_lch_out.dma_conf.src_maxburst = 1;
 774	dd->dma_lch_out.dma_conf.src_addr_width =
 775		DMA_SLAVE_BUSWIDTH_4_BYTES;
 776	dd->dma_lch_out.dma_conf.dst_maxburst = 1;
 777	dd->dma_lch_out.dma_conf.dst_addr_width =
 778		DMA_SLAVE_BUSWIDTH_4_BYTES;
 779	dd->dma_lch_out.dma_conf.device_fc = false;
 780
 781	return 0;
 782
 783err_dma_out:
 784	dma_release_channel(dd->dma_lch_in.chan);
 785err_dma_in:
 786	dev_err(dd->dev, "no DMA channel available\n");
 787	return ret;
 788}
 789
 790static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
 791{
 792	dma_release_channel(dd->dma_lch_in.chan);
 793	dma_release_channel(dd->dma_lch_out.chan);
 794}
 795
 796static int atmel_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
 797			   unsigned int keylen)
 798{
 799	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 800	int err;
 801
 802	err = verify_skcipher_des_key(tfm, key);
 803	if (err)
 804		return err;
 805
 806	memcpy(ctx->key, key, keylen);
 807	ctx->keylen = keylen;
 808
 809	return 0;
 810}
 811
 812static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 813			   unsigned int keylen)
 814{
 815	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 816	int err;
 817
 818	err = verify_skcipher_des3_key(tfm, key);
 819	if (err)
 820		return err;
 821
 822	memcpy(ctx->key, key, keylen);
 823	ctx->keylen = keylen;
 824
 825	return 0;
 826}
 827
 828static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
 829{
 830	return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT);
 831}
 832
 833static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
 834{
 835	return atmel_tdes_crypt(req, TDES_FLAGS_ECB);
 836}
 837
 838static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
 839{
 840	return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT);
 841}
 842
 843static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
 844{
 845	return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
 846}
 847static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
 848{
 849	return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT);
 850}
 851
 852static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
 853{
 854	return atmel_tdes_crypt(req, TDES_FLAGS_CFB64);
 855}
 856
 857static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
 858{
 859	return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT);
 860}
 861
 862static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
 863{
 864	return atmel_tdes_crypt(req, TDES_FLAGS_CFB8);
 865}
 866
 867static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
 868{
 869	return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT);
 870}
 871
 872static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
 873{
 874	return atmel_tdes_crypt(req, TDES_FLAGS_CFB16);
 875}
 876
 877static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
 878{
 879	return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT);
 880}
 881
 882static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
 883{
 884	return atmel_tdes_crypt(req, TDES_FLAGS_CFB32);
 885}
 886
 887static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
 888{
 889	return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT);
 890}
 891
 892static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
 893{
 894	return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
 895}
 896
 897static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
 898{
 899	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
 900	struct atmel_tdes_dev *dd;
 
 
 
 901
 902	crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
 903
 904	dd = atmel_tdes_find_dev(ctx);
 905	if (!dd)
 906		return -ENODEV;
 907
 908	return 0;
 909}
 910
 911static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
 912{
 913	alg->base.cra_priority = ATMEL_TDES_PRIORITY;
 914	alg->base.cra_flags = CRYPTO_ALG_ASYNC;
 915	alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
 916	alg->base.cra_module = THIS_MODULE;
 917
 918	alg->init = atmel_tdes_init_tfm;
 919}
 920
 921static struct skcipher_alg tdes_algs[] = {
 922{
 923	.base.cra_name		= "ecb(des)",
 924	.base.cra_driver_name	= "atmel-ecb-des",
 925	.base.cra_blocksize	= DES_BLOCK_SIZE,
 926	.base.cra_alignmask	= 0x7,
 927
 928	.min_keysize		= DES_KEY_SIZE,
 929	.max_keysize		= DES_KEY_SIZE,
 930	.setkey			= atmel_des_setkey,
 931	.encrypt		= atmel_tdes_ecb_encrypt,
 932	.decrypt		= atmel_tdes_ecb_decrypt,
 933},
 934{
 935	.base.cra_name		= "cbc(des)",
 936	.base.cra_driver_name	= "atmel-cbc-des",
 937	.base.cra_blocksize	= DES_BLOCK_SIZE,
 938	.base.cra_alignmask	= 0x7,
 939
 940	.min_keysize		= DES_KEY_SIZE,
 941	.max_keysize		= DES_KEY_SIZE,
 942	.ivsize			= DES_BLOCK_SIZE,
 943	.setkey			= atmel_des_setkey,
 944	.encrypt		= atmel_tdes_cbc_encrypt,
 945	.decrypt		= atmel_tdes_cbc_decrypt,
 946},
 947{
 948	.base.cra_name		= "cfb(des)",
 949	.base.cra_driver_name	= "atmel-cfb-des",
 950	.base.cra_blocksize	= DES_BLOCK_SIZE,
 951	.base.cra_alignmask	= 0x7,
 952
 953	.min_keysize		= DES_KEY_SIZE,
 954	.max_keysize		= DES_KEY_SIZE,
 955	.ivsize			= DES_BLOCK_SIZE,
 956	.setkey			= atmel_des_setkey,
 957	.encrypt		= atmel_tdes_cfb_encrypt,
 958	.decrypt		= atmel_tdes_cfb_decrypt,
 959},
 960{
 961	.base.cra_name		= "cfb8(des)",
 962	.base.cra_driver_name	= "atmel-cfb8-des",
 963	.base.cra_blocksize	= CFB8_BLOCK_SIZE,
 964	.base.cra_alignmask	= 0,
 965
 966	.min_keysize		= DES_KEY_SIZE,
 967	.max_keysize		= DES_KEY_SIZE,
 968	.ivsize			= DES_BLOCK_SIZE,
 969	.setkey			= atmel_des_setkey,
 970	.encrypt		= atmel_tdes_cfb8_encrypt,
 971	.decrypt		= atmel_tdes_cfb8_decrypt,
 972},
 973{
 974	.base.cra_name		= "cfb16(des)",
 975	.base.cra_driver_name	= "atmel-cfb16-des",
 976	.base.cra_blocksize	= CFB16_BLOCK_SIZE,
 977	.base.cra_alignmask	= 0x1,
 978
 979	.min_keysize		= DES_KEY_SIZE,
 980	.max_keysize		= DES_KEY_SIZE,
 981	.ivsize			= DES_BLOCK_SIZE,
 982	.setkey			= atmel_des_setkey,
 983	.encrypt		= atmel_tdes_cfb16_encrypt,
 984	.decrypt		= atmel_tdes_cfb16_decrypt,
 985},
 986{
 987	.base.cra_name		= "cfb32(des)",
 988	.base.cra_driver_name	= "atmel-cfb32-des",
 989	.base.cra_blocksize	= CFB32_BLOCK_SIZE,
 990	.base.cra_alignmask	= 0x3,
 991
 992	.min_keysize		= DES_KEY_SIZE,
 993	.max_keysize		= DES_KEY_SIZE,
 994	.ivsize			= DES_BLOCK_SIZE,
 995	.setkey			= atmel_des_setkey,
 996	.encrypt		= atmel_tdes_cfb32_encrypt,
 997	.decrypt		= atmel_tdes_cfb32_decrypt,
 998},
 999{
1000	.base.cra_name		= "ofb(des)",
1001	.base.cra_driver_name	= "atmel-ofb-des",
1002	.base.cra_blocksize	= DES_BLOCK_SIZE,
1003	.base.cra_alignmask	= 0x7,
1004
1005	.min_keysize		= DES_KEY_SIZE,
1006	.max_keysize		= DES_KEY_SIZE,
1007	.ivsize			= DES_BLOCK_SIZE,
1008	.setkey			= atmel_des_setkey,
1009	.encrypt		= atmel_tdes_ofb_encrypt,
1010	.decrypt		= atmel_tdes_ofb_decrypt,
1011},
1012{
1013	.base.cra_name		= "ecb(des3_ede)",
1014	.base.cra_driver_name	= "atmel-ecb-tdes",
1015	.base.cra_blocksize	= DES_BLOCK_SIZE,
1016	.base.cra_alignmask	= 0x7,
1017
1018	.min_keysize		= DES3_EDE_KEY_SIZE,
1019	.max_keysize		= DES3_EDE_KEY_SIZE,
1020	.setkey			= atmel_tdes_setkey,
1021	.encrypt		= atmel_tdes_ecb_encrypt,
1022	.decrypt		= atmel_tdes_ecb_decrypt,
1023},
1024{
1025	.base.cra_name		= "cbc(des3_ede)",
1026	.base.cra_driver_name	= "atmel-cbc-tdes",
1027	.base.cra_blocksize	= DES_BLOCK_SIZE,
1028	.base.cra_alignmask	= 0x7,
1029
1030	.min_keysize		= DES3_EDE_KEY_SIZE,
1031	.max_keysize		= DES3_EDE_KEY_SIZE,
1032	.setkey			= atmel_tdes_setkey,
1033	.encrypt		= atmel_tdes_cbc_encrypt,
1034	.decrypt		= atmel_tdes_cbc_decrypt,
1035	.ivsize			= DES_BLOCK_SIZE,
1036},
1037{
1038	.base.cra_name		= "ofb(des3_ede)",
1039	.base.cra_driver_name	= "atmel-ofb-tdes",
1040	.base.cra_blocksize	= DES_BLOCK_SIZE,
1041	.base.cra_alignmask	= 0x7,
1042
1043	.min_keysize		= DES3_EDE_KEY_SIZE,
1044	.max_keysize		= DES3_EDE_KEY_SIZE,
1045	.setkey			= atmel_tdes_setkey,
1046	.encrypt		= atmel_tdes_ofb_encrypt,
1047	.decrypt		= atmel_tdes_ofb_decrypt,
1048	.ivsize			= DES_BLOCK_SIZE,
1049},
1050};
1051
1052static void atmel_tdes_queue_task(unsigned long data)
1053{
1054	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
1055
1056	atmel_tdes_handle_queue(dd, NULL);
1057}
1058
1059static void atmel_tdes_done_task(unsigned long data)
1060{
1061	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
1062	int err;
1063
1064	if (!(dd->flags & TDES_FLAGS_DMA))
1065		err = atmel_tdes_crypt_pdc_stop(dd);
1066	else
1067		err = atmel_tdes_crypt_dma_stop(dd);
1068
1069	if (dd->total && !err) {
1070		if (dd->flags & TDES_FLAGS_FAST) {
1071			dd->in_sg = sg_next(dd->in_sg);
1072			dd->out_sg = sg_next(dd->out_sg);
1073			if (!dd->in_sg || !dd->out_sg)
1074				err = -EINVAL;
1075		}
1076		if (!err)
1077			err = atmel_tdes_crypt_start(dd);
1078		if (!err)
1079			return; /* DMA started. Not fininishing. */
1080	}
1081
1082	atmel_tdes_finish_req(dd, err);
1083	atmel_tdes_handle_queue(dd, NULL);
1084}
1085
1086static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
1087{
1088	struct atmel_tdes_dev *tdes_dd = dev_id;
1089	u32 reg;
1090
1091	reg = atmel_tdes_read(tdes_dd, TDES_ISR);
1092	if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) {
1093		atmel_tdes_write(tdes_dd, TDES_IDR, reg);
1094		if (TDES_FLAGS_BUSY & tdes_dd->flags)
1095			tasklet_schedule(&tdes_dd->done_task);
1096		else
1097			dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n");
1098		return IRQ_HANDLED;
1099	}
1100
1101	return IRQ_NONE;
1102}
1103
1104static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
1105{
1106	int i;
1107
1108	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
1109		crypto_unregister_skcipher(&tdes_algs[i]);
1110}
1111
1112static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
1113{
1114	int err, i, j;
1115
1116	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
1117		atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
1118
1119		err = crypto_register_skcipher(&tdes_algs[i]);
1120		if (err)
1121			goto err_tdes_algs;
1122	}
1123
1124	return 0;
1125
1126err_tdes_algs:
1127	for (j = 0; j < i; j++)
1128		crypto_unregister_skcipher(&tdes_algs[j]);
1129
1130	return err;
1131}
1132
1133static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
1134{
1135
1136	dd->caps.has_dma = 0;
1137	dd->caps.has_cfb_3keys = 0;
1138
1139	/* keep only major version number */
1140	switch (dd->hw_version & 0xf00) {
 
1141	case 0x700:
1142		dd->caps.has_dma = 1;
1143		dd->caps.has_cfb_3keys = 1;
1144		break;
1145	case 0x600:
1146		break;
1147	default:
1148		dev_warn(dd->dev,
1149				"Unmanaged tdes version, set minimum capabilities\n");
1150		break;
1151	}
1152}
1153
1154#if defined(CONFIG_OF)
1155static const struct of_device_id atmel_tdes_dt_ids[] = {
1156	{ .compatible = "atmel,at91sam9g46-tdes" },
1157	{ /* sentinel */ }
1158};
1159MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
1160#endif
1161
1162static int atmel_tdes_probe(struct platform_device *pdev)
1163{
1164	struct atmel_tdes_dev *tdes_dd;
1165	struct device *dev = &pdev->dev;
1166	struct resource *tdes_res;
1167	int err;
1168
1169	tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
1170	if (!tdes_dd)
1171		return -ENOMEM;
1172
1173	tdes_dd->dev = dev;
1174
1175	platform_set_drvdata(pdev, tdes_dd);
1176
1177	INIT_LIST_HEAD(&tdes_dd->list);
1178	spin_lock_init(&tdes_dd->lock);
1179
1180	tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
1181					(unsigned long)tdes_dd);
1182	tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task,
1183					(unsigned long)tdes_dd);
1184
1185	crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
1186
1187	/* Get the base address */
1188	tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1189	if (!tdes_res) {
1190		dev_err(dev, "no MEM resource info\n");
1191		err = -ENODEV;
1192		goto err_tasklet_kill;
1193	}
1194	tdes_dd->phys_base = tdes_res->start;
1195
1196	/* Get the IRQ */
1197	tdes_dd->irq = platform_get_irq(pdev,  0);
1198	if (tdes_dd->irq < 0) {
1199		err = tdes_dd->irq;
1200		goto err_tasklet_kill;
1201	}
1202
1203	err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
1204			       IRQF_SHARED, "atmel-tdes", tdes_dd);
1205	if (err) {
1206		dev_err(dev, "unable to request tdes irq.\n");
1207		goto err_tasklet_kill;
1208	}
1209
1210	/* Initializing the clock */
1211	tdes_dd->iclk = devm_clk_get(&pdev->dev, "tdes_clk");
1212	if (IS_ERR(tdes_dd->iclk)) {
1213		dev_err(dev, "clock initialization failed.\n");
1214		err = PTR_ERR(tdes_dd->iclk);
1215		goto err_tasklet_kill;
1216	}
1217
1218	tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
1219	if (IS_ERR(tdes_dd->io_base)) {
1220		dev_err(dev, "can't ioremap\n");
1221		err = PTR_ERR(tdes_dd->io_base);
1222		goto err_tasklet_kill;
1223	}
1224
1225	err = atmel_tdes_hw_version_init(tdes_dd);
1226	if (err)
1227		goto err_tasklet_kill;
1228
1229	atmel_tdes_get_cap(tdes_dd);
1230
1231	err = atmel_tdes_buff_init(tdes_dd);
1232	if (err)
1233		goto err_tasklet_kill;
1234
1235	if (tdes_dd->caps.has_dma) {
1236		err = atmel_tdes_dma_init(tdes_dd);
1237		if (err)
1238			goto err_buff_cleanup;
1239
1240		dev_info(dev, "using %s, %s for DMA transfers\n",
1241				dma_chan_name(tdes_dd->dma_lch_in.chan),
1242				dma_chan_name(tdes_dd->dma_lch_out.chan));
1243	}
1244
1245	spin_lock(&atmel_tdes.lock);
1246	list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
1247	spin_unlock(&atmel_tdes.lock);
1248
1249	err = atmel_tdes_register_algs(tdes_dd);
1250	if (err)
1251		goto err_algs;
1252
1253	dev_info(dev, "Atmel DES/TDES\n");
1254
1255	return 0;
1256
1257err_algs:
1258	spin_lock(&atmel_tdes.lock);
1259	list_del(&tdes_dd->list);
1260	spin_unlock(&atmel_tdes.lock);
1261	if (tdes_dd->caps.has_dma)
1262		atmel_tdes_dma_cleanup(tdes_dd);
1263err_buff_cleanup:
1264	atmel_tdes_buff_cleanup(tdes_dd);
1265err_tasklet_kill:
1266	tasklet_kill(&tdes_dd->done_task);
1267	tasklet_kill(&tdes_dd->queue_task);
1268
1269	return err;
1270}
1271
1272static int atmel_tdes_remove(struct platform_device *pdev)
1273{
1274	struct atmel_tdes_dev *tdes_dd;
1275
1276	tdes_dd = platform_get_drvdata(pdev);
1277	if (!tdes_dd)
1278		return -ENODEV;
1279	spin_lock(&atmel_tdes.lock);
1280	list_del(&tdes_dd->list);
1281	spin_unlock(&atmel_tdes.lock);
1282
1283	atmel_tdes_unregister_algs(tdes_dd);
1284
1285	tasklet_kill(&tdes_dd->done_task);
1286	tasklet_kill(&tdes_dd->queue_task);
1287
1288	if (tdes_dd->caps.has_dma)
1289		atmel_tdes_dma_cleanup(tdes_dd);
1290
1291	atmel_tdes_buff_cleanup(tdes_dd);
1292
1293	return 0;
1294}
1295
1296static struct platform_driver atmel_tdes_driver = {
1297	.probe		= atmel_tdes_probe,
1298	.remove		= atmel_tdes_remove,
1299	.driver		= {
1300		.name	= "atmel_tdes",
1301		.of_match_table = of_match_ptr(atmel_tdes_dt_ids),
1302	},
1303};
1304
1305module_platform_driver(atmel_tdes_driver);
1306
1307MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
1308MODULE_LICENSE("GPL v2");
1309MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");