Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * talitos - Freescale Integrated Security Engine (SEC) device driver
   4 *
   5 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
   6 *
   7 * Scatterlist Crypto API glue code copied from files with the following:
   8 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
   9 *
  10 * Crypto algorithm registration code copied from hifn driver:
  11 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
  12 * All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  13 */
  14
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/mod_devicetable.h>
  18#include <linux/device.h>
  19#include <linux/interrupt.h>
  20#include <linux/crypto.h>
  21#include <linux/hw_random.h>
  22#include <linux/of.h>
  23#include <linux/of_irq.h>
  24#include <linux/platform_device.h>
  25#include <linux/dma-mapping.h>
  26#include <linux/io.h>
  27#include <linux/spinlock.h>
  28#include <linux/rtnetlink.h>
  29#include <linux/slab.h>
  30
  31#include <crypto/algapi.h>
  32#include <crypto/aes.h>
  33#include <crypto/internal/des.h>
  34#include <crypto/sha1.h>
  35#include <crypto/sha2.h>
  36#include <crypto/md5.h>
  37#include <crypto/internal/aead.h>
  38#include <crypto/authenc.h>
  39#include <crypto/internal/skcipher.h>
  40#include <crypto/hash.h>
  41#include <crypto/internal/hash.h>
  42#include <crypto/scatterwalk.h>
  43
  44#include "talitos.h"
  45
  46static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
  47			   unsigned int len, bool is_sec1)
  48{
  49	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
  50	if (is_sec1) {
  51		ptr->len1 = cpu_to_be16(len);
  52	} else {
  53		ptr->len = cpu_to_be16(len);
  54		ptr->eptr = upper_32_bits(dma_addr);
  55	}
  56}
  57
  58static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
  59			     struct talitos_ptr *src_ptr, bool is_sec1)
  60{
  61	dst_ptr->ptr = src_ptr->ptr;
  62	if (is_sec1) {
  63		dst_ptr->len1 = src_ptr->len1;
  64	} else {
  65		dst_ptr->len = src_ptr->len;
  66		dst_ptr->eptr = src_ptr->eptr;
  67	}
  68}
  69
  70static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
  71					   bool is_sec1)
  72{
  73	if (is_sec1)
  74		return be16_to_cpu(ptr->len1);
  75	else
  76		return be16_to_cpu(ptr->len);
  77}
  78
  79static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
  80				   bool is_sec1)
  81{
  82	if (!is_sec1)
  83		ptr->j_extent = val;
  84}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  85
  86static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  87{
  88	if (!is_sec1)
  89		ptr->j_extent |= val;
  90}
  91
  92/*
  93 * map virtual single (contiguous) pointer to h/w descriptor pointer
  94 */
  95static void __map_single_talitos_ptr(struct device *dev,
  96				     struct talitos_ptr *ptr,
  97				     unsigned int len, void *data,
  98				     enum dma_data_direction dir,
  99				     unsigned long attrs)
 100{
 101	dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
 102	struct talitos_private *priv = dev_get_drvdata(dev);
 103	bool is_sec1 = has_ftr_sec1(priv);
 104
 105	to_talitos_ptr(ptr, dma_addr, len, is_sec1);
 106}
 107
 108static void map_single_talitos_ptr(struct device *dev,
 109				   struct talitos_ptr *ptr,
 110				   unsigned int len, void *data,
 
 111				   enum dma_data_direction dir)
 112{
 113	__map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
 114}
 115
 116static void map_single_talitos_ptr_nosync(struct device *dev,
 117					  struct talitos_ptr *ptr,
 118					  unsigned int len, void *data,
 119					  enum dma_data_direction dir)
 120{
 121	__map_single_talitos_ptr(dev, ptr, len, data, dir,
 122				 DMA_ATTR_SKIP_CPU_SYNC);
 123}
 124
 125/*
 126 * unmap bus single (contiguous) h/w descriptor pointer
 127 */
 128static void unmap_single_talitos_ptr(struct device *dev,
 129				     struct talitos_ptr *ptr,
 130				     enum dma_data_direction dir)
 131{
 132	struct talitos_private *priv = dev_get_drvdata(dev);
 133	bool is_sec1 = has_ftr_sec1(priv);
 134
 135	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
 136			 from_talitos_ptr_len(ptr, is_sec1), dir);
 137}
 138
 139static int reset_channel(struct device *dev, int ch)
 140{
 141	struct talitos_private *priv = dev_get_drvdata(dev);
 142	unsigned int timeout = TALITOS_TIMEOUT;
 143	bool is_sec1 = has_ftr_sec1(priv);
 144
 145	if (is_sec1) {
 146		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
 147			  TALITOS1_CCCR_LO_RESET);
 148
 149		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
 150			TALITOS1_CCCR_LO_RESET) && --timeout)
 151			cpu_relax();
 152	} else {
 153		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
 154			  TALITOS2_CCCR_RESET);
 155
 156		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
 157			TALITOS2_CCCR_RESET) && --timeout)
 158			cpu_relax();
 159	}
 160
 161	if (timeout == 0) {
 162		dev_err(dev, "failed to reset channel %d\n", ch);
 163		return -EIO;
 164	}
 165
 166	/* set 36-bit addressing, done writeback enable and done IRQ enable */
 167	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
 168		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
 169	/* enable chaining descriptors */
 170	if (is_sec1)
 171		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
 172			  TALITOS_CCCR_LO_NE);
 173
 174	/* and ICCR writeback, if available */
 175	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
 176		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
 177		          TALITOS_CCCR_LO_IWSE);
 178
 179	return 0;
 180}
 181
 182static int reset_device(struct device *dev)
 183{
 184	struct talitos_private *priv = dev_get_drvdata(dev);
 185	unsigned int timeout = TALITOS_TIMEOUT;
 186	bool is_sec1 = has_ftr_sec1(priv);
 187	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
 188
 189	setbits32(priv->reg + TALITOS_MCR, mcr);
 190
 191	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
 192	       && --timeout)
 193		cpu_relax();
 194
 195	if (priv->irq[1]) {
 196		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
 197		setbits32(priv->reg + TALITOS_MCR, mcr);
 198	}
 199
 200	if (timeout == 0) {
 201		dev_err(dev, "failed to reset device\n");
 202		return -EIO;
 203	}
 204
 205	return 0;
 206}
 207
 208/*
 209 * Reset and initialize the device
 210 */
 211static int init_device(struct device *dev)
 212{
 213	struct talitos_private *priv = dev_get_drvdata(dev);
 214	int ch, err;
 215	bool is_sec1 = has_ftr_sec1(priv);
 216
 217	/*
 218	 * Master reset
 219	 * errata documentation: warning: certain SEC interrupts
 220	 * are not fully cleared by writing the MCR:SWR bit,
 221	 * set bit twice to completely reset
 222	 */
 223	err = reset_device(dev);
 224	if (err)
 225		return err;
 226
 227	err = reset_device(dev);
 228	if (err)
 229		return err;
 230
 231	/* reset channels */
 232	for (ch = 0; ch < priv->num_channels; ch++) {
 233		err = reset_channel(dev, ch);
 234		if (err)
 235			return err;
 236	}
 237
 238	/* enable channel done and error interrupts */
 239	if (is_sec1) {
 240		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
 241		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
 242		/* disable parity error check in DEU (erroneous? test vect.) */
 243		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
 244	} else {
 245		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
 246		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
 247	}
 248
 249	/* disable integrity check error interrupts (use writeback instead) */
 250	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
 251		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
 252		          TALITOS_MDEUICR_LO_ICE);
 253
 254	return 0;
 255}
 256
 257/**
 258 * talitos_submit - submits a descriptor to the device for processing
 259 * @dev:	the SEC device to be used
 260 * @ch:		the SEC device channel to be used
 261 * @desc:	the descriptor to be processed by the device
 262 * @callback:	whom to call when processing is complete
 263 * @context:	a handle for use by caller (optional)
 264 *
 265 * desc must contain valid dma-mapped (bus physical) address pointers.
 266 * callback must check err and feedback in descriptor header
 267 * for device processing status.
 268 */
 269static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
 270			  void (*callback)(struct device *dev,
 271					   struct talitos_desc *desc,
 272					   void *context, int error),
 273			  void *context)
 274{
 275	struct talitos_private *priv = dev_get_drvdata(dev);
 276	struct talitos_request *request;
 277	unsigned long flags;
 278	int head;
 279	bool is_sec1 = has_ftr_sec1(priv);
 280
 281	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
 282
 283	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
 284		/* h/w fifo is full */
 285		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
 286		return -EAGAIN;
 287	}
 288
 289	head = priv->chan[ch].head;
 290	request = &priv->chan[ch].fifo[head];
 291
 292	/* map descriptor and save caller data */
 293	if (is_sec1) {
 294		desc->hdr1 = desc->hdr;
 295		request->dma_desc = dma_map_single(dev, &desc->hdr1,
 296						   TALITOS_DESC_SIZE,
 297						   DMA_BIDIRECTIONAL);
 298	} else {
 299		request->dma_desc = dma_map_single(dev, desc,
 300						   TALITOS_DESC_SIZE,
 301						   DMA_BIDIRECTIONAL);
 302	}
 303	request->callback = callback;
 304	request->context = context;
 305
 306	/* increment fifo head */
 307	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
 308
 309	smp_wmb();
 310	request->desc = desc;
 311
 312	/* GO! */
 313	wmb();
 314	out_be32(priv->chan[ch].reg + TALITOS_FF,
 315		 upper_32_bits(request->dma_desc));
 316	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
 317		 lower_32_bits(request->dma_desc));
 318
 319	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
 320
 321	return -EINPROGRESS;
 322}
 323
 324static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
 325{
 326	struct talitos_edesc *edesc;
 327
 328	if (!is_sec1)
 329		return request->desc->hdr;
 330
 331	if (!request->desc->next_desc)
 332		return request->desc->hdr1;
 333
 334	edesc = container_of(request->desc, struct talitos_edesc, desc);
 335
 336	return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
 337}
 338
 339/*
 340 * process what was done, notify callback of error if not
 341 */
 342static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
 343{
 344	struct talitos_private *priv = dev_get_drvdata(dev);
 345	struct talitos_request *request, saved_req;
 346	unsigned long flags;
 347	int tail, status;
 348	bool is_sec1 = has_ftr_sec1(priv);
 349
 350	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
 351
 352	tail = priv->chan[ch].tail;
 353	while (priv->chan[ch].fifo[tail].desc) {
 354		__be32 hdr;
 355
 356		request = &priv->chan[ch].fifo[tail];
 357
 358		/* descriptors with their done bits set don't get the error */
 359		rmb();
 360		hdr = get_request_hdr(request, is_sec1);
 361
 362		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
 363			status = 0;
 364		else
 365			if (!error)
 366				break;
 367			else
 368				status = error;
 369
 370		dma_unmap_single(dev, request->dma_desc,
 371				 TALITOS_DESC_SIZE,
 372				 DMA_BIDIRECTIONAL);
 373
 374		/* copy entries so we can call callback outside lock */
 375		saved_req.desc = request->desc;
 376		saved_req.callback = request->callback;
 377		saved_req.context = request->context;
 378
 379		/* release request entry in fifo */
 380		smp_wmb();
 381		request->desc = NULL;
 382
 383		/* increment fifo tail */
 384		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
 385
 386		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
 387
 388		atomic_dec(&priv->chan[ch].submit_count);
 389
 390		saved_req.callback(dev, saved_req.desc, saved_req.context,
 391				   status);
 392		/* channel may resume processing in single desc error case */
 393		if (error && !reset_ch && status == error)
 394			return;
 395		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
 396		tail = priv->chan[ch].tail;
 397	}
 398
 399	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
 400}
 401
 402/*
 403 * process completed requests for channels that have done status
 404 */
 405#define DEF_TALITOS1_DONE(name, ch_done_mask)				\
 406static void talitos1_done_##name(unsigned long data)			\
 407{									\
 408	struct device *dev = (struct device *)data;			\
 409	struct talitos_private *priv = dev_get_drvdata(dev);		\
 410	unsigned long flags;						\
 411									\
 412	if (ch_done_mask & 0x10000000)					\
 413		flush_channel(dev, 0, 0, 0);			\
 414	if (ch_done_mask & 0x40000000)					\
 415		flush_channel(dev, 1, 0, 0);			\
 416	if (ch_done_mask & 0x00010000)					\
 417		flush_channel(dev, 2, 0, 0);			\
 418	if (ch_done_mask & 0x00040000)					\
 419		flush_channel(dev, 3, 0, 0);			\
 420									\
 421	/* At this point, all completed channels have been processed */	\
 422	/* Unmask done interrupts for channels completed later on. */	\
 423	spin_lock_irqsave(&priv->reg_lock, flags);			\
 424	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
 425	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
 426	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
 427}
 428
 429DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
 430DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
 431
 432#define DEF_TALITOS2_DONE(name, ch_done_mask)				\
 433static void talitos2_done_##name(unsigned long data)			\
 434{									\
 435	struct device *dev = (struct device *)data;			\
 436	struct talitos_private *priv = dev_get_drvdata(dev);		\
 437	unsigned long flags;						\
 438									\
 439	if (ch_done_mask & 1)						\
 440		flush_channel(dev, 0, 0, 0);				\
 441	if (ch_done_mask & (1 << 2))					\
 442		flush_channel(dev, 1, 0, 0);				\
 443	if (ch_done_mask & (1 << 4))					\
 444		flush_channel(dev, 2, 0, 0);				\
 445	if (ch_done_mask & (1 << 6))					\
 446		flush_channel(dev, 3, 0, 0);				\
 447									\
 448	/* At this point, all completed channels have been processed */	\
 449	/* Unmask done interrupts for channels completed later on. */	\
 450	spin_lock_irqsave(&priv->reg_lock, flags);			\
 451	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
 452	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
 453	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
 454}
 455
 456DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
 457DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
 458DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
 459DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
 460
 461/*
 462 * locate current (offending) descriptor
 463 */
 464static __be32 current_desc_hdr(struct device *dev, int ch)
 465{
 466	struct talitos_private *priv = dev_get_drvdata(dev);
 467	int tail, iter;
 468	dma_addr_t cur_desc;
 469
 470	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
 471	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
 472
 473	if (!cur_desc) {
 474		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
 475		return 0;
 476	}
 477
 478	tail = priv->chan[ch].tail;
 479
 480	iter = tail;
 481	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
 482	       priv->chan[ch].fifo[iter].desc->next_desc != cpu_to_be32(cur_desc)) {
 483		iter = (iter + 1) & (priv->fifo_len - 1);
 484		if (iter == tail) {
 485			dev_err(dev, "couldn't locate current descriptor\n");
 486			return 0;
 487		}
 488	}
 489
 490	if (priv->chan[ch].fifo[iter].desc->next_desc == cpu_to_be32(cur_desc)) {
 491		struct talitos_edesc *edesc;
 492
 493		edesc = container_of(priv->chan[ch].fifo[iter].desc,
 494				     struct talitos_edesc, desc);
 495		return ((struct talitos_desc *)
 496			(edesc->buf + edesc->dma_len))->hdr;
 497	}
 498
 499	return priv->chan[ch].fifo[iter].desc->hdr;
 500}
 501
 502/*
 503 * user diagnostics; report root cause of error based on execution unit status
 504 */
 505static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr)
 
 506{
 507	struct talitos_private *priv = dev_get_drvdata(dev);
 508	int i;
 509
 510	if (!desc_hdr)
 511		desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF));
 512
 513	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
 514	case DESC_HDR_SEL0_AFEU:
 515		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
 516			in_be32(priv->reg_afeu + TALITOS_EUISR),
 517			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
 518		break;
 519	case DESC_HDR_SEL0_DEU:
 520		dev_err(dev, "DEUISR 0x%08x_%08x\n",
 521			in_be32(priv->reg_deu + TALITOS_EUISR),
 522			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
 523		break;
 524	case DESC_HDR_SEL0_MDEUA:
 525	case DESC_HDR_SEL0_MDEUB:
 526		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
 527			in_be32(priv->reg_mdeu + TALITOS_EUISR),
 528			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
 529		break;
 530	case DESC_HDR_SEL0_RNG:
 531		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
 532			in_be32(priv->reg_rngu + TALITOS_ISR),
 533			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
 534		break;
 535	case DESC_HDR_SEL0_PKEU:
 536		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
 537			in_be32(priv->reg_pkeu + TALITOS_EUISR),
 538			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
 539		break;
 540	case DESC_HDR_SEL0_AESU:
 541		dev_err(dev, "AESUISR 0x%08x_%08x\n",
 542			in_be32(priv->reg_aesu + TALITOS_EUISR),
 543			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
 544		break;
 545	case DESC_HDR_SEL0_CRCU:
 546		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
 547			in_be32(priv->reg_crcu + TALITOS_EUISR),
 548			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
 549		break;
 550	case DESC_HDR_SEL0_KEU:
 551		dev_err(dev, "KEUISR 0x%08x_%08x\n",
 552			in_be32(priv->reg_pkeu + TALITOS_EUISR),
 553			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
 554		break;
 555	}
 556
 557	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
 558	case DESC_HDR_SEL1_MDEUA:
 559	case DESC_HDR_SEL1_MDEUB:
 560		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
 561			in_be32(priv->reg_mdeu + TALITOS_EUISR),
 562			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
 563		break;
 564	case DESC_HDR_SEL1_CRCU:
 565		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
 566			in_be32(priv->reg_crcu + TALITOS_EUISR),
 567			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
 568		break;
 569	}
 570
 571	for (i = 0; i < 8; i++)
 572		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
 573			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
 574			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
 575}
 576
 577/*
 578 * recover from error interrupts
 579 */
 580static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
 581{
 
 582	struct talitos_private *priv = dev_get_drvdata(dev);
 583	unsigned int timeout = TALITOS_TIMEOUT;
 584	int ch, error, reset_dev = 0;
 585	u32 v_lo;
 586	bool is_sec1 = has_ftr_sec1(priv);
 587	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
 588
 589	for (ch = 0; ch < priv->num_channels; ch++) {
 590		/* skip channels without errors */
 591		if (is_sec1) {
 592			/* bits 29, 31, 17, 19 */
 593			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
 594				continue;
 595		} else {
 596			if (!(isr & (1 << (ch * 2 + 1))))
 597				continue;
 598		}
 599
 600		error = -EINVAL;
 601
 602		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
 
 603
 604		if (v_lo & TALITOS_CCPSR_LO_DOF) {
 605			dev_err(dev, "double fetch fifo overflow error\n");
 606			error = -EAGAIN;
 607			reset_ch = 1;
 608		}
 609		if (v_lo & TALITOS_CCPSR_LO_SOF) {
 610			/* h/w dropped descriptor */
 611			dev_err(dev, "single fetch fifo overflow error\n");
 612			error = -EAGAIN;
 613		}
 614		if (v_lo & TALITOS_CCPSR_LO_MDTE)
 615			dev_err(dev, "master data transfer error\n");
 616		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
 617			dev_err(dev, is_sec1 ? "pointer not complete error\n"
 618					     : "s/g data length zero error\n");
 619		if (v_lo & TALITOS_CCPSR_LO_FPZ)
 620			dev_err(dev, is_sec1 ? "parity error\n"
 621					     : "fetch pointer zero error\n");
 622		if (v_lo & TALITOS_CCPSR_LO_IDH)
 623			dev_err(dev, "illegal descriptor header error\n");
 624		if (v_lo & TALITOS_CCPSR_LO_IEU)
 625			dev_err(dev, is_sec1 ? "static assignment error\n"
 626					     : "invalid exec unit error\n");
 627		if (v_lo & TALITOS_CCPSR_LO_EU)
 628			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
 629		if (!is_sec1) {
 630			if (v_lo & TALITOS_CCPSR_LO_GB)
 631				dev_err(dev, "gather boundary error\n");
 632			if (v_lo & TALITOS_CCPSR_LO_GRL)
 633				dev_err(dev, "gather return/length error\n");
 634			if (v_lo & TALITOS_CCPSR_LO_SB)
 635				dev_err(dev, "scatter boundary error\n");
 636			if (v_lo & TALITOS_CCPSR_LO_SRL)
 637				dev_err(dev, "scatter return/length error\n");
 638		}
 639
 640		flush_channel(dev, ch, error, reset_ch);
 641
 642		if (reset_ch) {
 643			reset_channel(dev, ch);
 644		} else {
 645			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
 646				  TALITOS2_CCCR_CONT);
 647			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
 648			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
 649			       TALITOS2_CCCR_CONT) && --timeout)
 650				cpu_relax();
 651			if (timeout == 0) {
 652				dev_err(dev, "failed to restart channel %d\n",
 653					ch);
 654				reset_dev = 1;
 655			}
 656		}
 657	}
 658	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
 659	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
 660		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
 661			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
 662				isr, isr_lo);
 663		else
 664			dev_err(dev, "done overflow, internal time out, or "
 665				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
 666
 667		/* purge request queues */
 668		for (ch = 0; ch < priv->num_channels; ch++)
 669			flush_channel(dev, ch, -EIO, 1);
 670
 671		/* reset and reinitialize the device */
 672		init_device(dev);
 673	}
 674}
 675
 676#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
 677static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
 678{									       \
 679	struct device *dev = data;					       \
 680	struct talitos_private *priv = dev_get_drvdata(dev);		       \
 681	u32 isr, isr_lo;						       \
 682	unsigned long flags;						       \
 683									       \
 684	spin_lock_irqsave(&priv->reg_lock, flags);			       \
 685	isr = in_be32(priv->reg + TALITOS_ISR);				       \
 686	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
 687	/* Acknowledge interrupt */					       \
 688	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
 689	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
 690									       \
 691	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
 692		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 693		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
 694	}								       \
 695	else {								       \
 696		if (likely(isr & ch_done_mask)) {			       \
 697			/* mask further done interrupts. */		       \
 698			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
 699			/* done_task will unmask done interrupts at exit */    \
 700			tasklet_schedule(&priv->done_task[tlet]);	       \
 701		}							       \
 702		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 703	}								       \
 704									       \
 705	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
 706								IRQ_NONE;      \
 707}
 708
 709DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
 710
 711#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
 712static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
 713{									       \
 714	struct device *dev = data;					       \
 715	struct talitos_private *priv = dev_get_drvdata(dev);		       \
 716	u32 isr, isr_lo;						       \
 717	unsigned long flags;						       \
 718									       \
 719	spin_lock_irqsave(&priv->reg_lock, flags);			       \
 720	isr = in_be32(priv->reg + TALITOS_ISR);				       \
 721	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
 722	/* Acknowledge interrupt */					       \
 723	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
 724	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
 725									       \
 726	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
 727		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 728		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
 729	}								       \
 730	else {								       \
 731		if (likely(isr & ch_done_mask)) {			       \
 732			/* mask further done interrupts. */		       \
 733			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
 734			/* done_task will unmask done interrupts at exit */    \
 735			tasklet_schedule(&priv->done_task[tlet]);	       \
 736		}							       \
 737		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 738	}								       \
 739									       \
 740	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
 741								IRQ_NONE;      \
 742}
 743
 744DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
 745DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
 746		       0)
 747DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
 748		       1)
 749
 750/*
 751 * hwrng
 752 */
 753static int talitos_rng_data_present(struct hwrng *rng, int wait)
 754{
 755	struct device *dev = (struct device *)rng->priv;
 756	struct talitos_private *priv = dev_get_drvdata(dev);
 757	u32 ofl;
 758	int i;
 759
 760	for (i = 0; i < 20; i++) {
 761		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
 762		      TALITOS_RNGUSR_LO_OFL;
 763		if (ofl || !wait)
 764			break;
 765		udelay(10);
 766	}
 767
 768	return !!ofl;
 769}
 770
 771static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
 772{
 773	struct device *dev = (struct device *)rng->priv;
 774	struct talitos_private *priv = dev_get_drvdata(dev);
 775
 776	/* rng fifo requires 64-bit accesses */
 777	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
 778	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
 779
 780	return sizeof(u32);
 781}
 782
 783static int talitos_rng_init(struct hwrng *rng)
 784{
 785	struct device *dev = (struct device *)rng->priv;
 786	struct talitos_private *priv = dev_get_drvdata(dev);
 787	unsigned int timeout = TALITOS_TIMEOUT;
 788
 789	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
 790	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
 791		 & TALITOS_RNGUSR_LO_RD)
 792	       && --timeout)
 793		cpu_relax();
 794	if (timeout == 0) {
 795		dev_err(dev, "failed to reset rng hw\n");
 796		return -ENODEV;
 797	}
 798
 799	/* start generating */
 800	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
 801
 802	return 0;
 803}
 804
 805static int talitos_register_rng(struct device *dev)
 806{
 807	struct talitos_private *priv = dev_get_drvdata(dev);
 808	int err;
 809
 810	priv->rng.name		= dev_driver_string(dev);
 811	priv->rng.init		= talitos_rng_init;
 812	priv->rng.data_present	= talitos_rng_data_present;
 813	priv->rng.data_read	= talitos_rng_data_read;
 814	priv->rng.priv		= (unsigned long)dev;
 815
 816	err = hwrng_register(&priv->rng);
 817	if (!err)
 818		priv->rng_registered = true;
 819
 820	return err;
 821}
 822
 823static void talitos_unregister_rng(struct device *dev)
 824{
 825	struct talitos_private *priv = dev_get_drvdata(dev);
 826
 827	if (!priv->rng_registered)
 828		return;
 829
 830	hwrng_unregister(&priv->rng);
 831	priv->rng_registered = false;
 832}
 833
 834/*
 835 * crypto alg
 836 */
 837#define TALITOS_CRA_PRIORITY		3000
 838/*
 839 * Defines a priority for doing AEAD with descriptors type
 840 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
 841 */
 842#define TALITOS_CRA_PRIORITY_AEAD_HSNA	(TALITOS_CRA_PRIORITY - 1)
 843#ifdef CONFIG_CRYPTO_DEV_TALITOS2
 844#define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
 845#else
 846#define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
 847#endif
 848#define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
 849
 
 
 850struct talitos_ctx {
 851	struct device *dev;
 852	int ch;
 853	__be32 desc_hdr_template;
 854	u8 key[TALITOS_MAX_KEY_SIZE];
 855	u8 iv[TALITOS_MAX_IV_LENGTH];
 856	dma_addr_t dma_key;
 857	unsigned int keylen;
 858	unsigned int enckeylen;
 859	unsigned int authkeylen;
 
 860};
 861
 862#define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
 863#define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
 864
 865struct talitos_ahash_req_ctx {
 866	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
 867	unsigned int hw_context_size;
 868	u8 buf[2][HASH_MAX_BLOCK_SIZE];
 869	int buf_idx;
 870	unsigned int swinit;
 871	unsigned int first;
 872	unsigned int last;
 873	unsigned int to_hash_later;
 874	unsigned int nbuf;
 875	struct scatterlist bufsl[2];
 876	struct scatterlist *psrc;
 877};
 878
 879struct talitos_export_state {
 880	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
 881	u8 buf[HASH_MAX_BLOCK_SIZE];
 882	unsigned int swinit;
 883	unsigned int first;
 884	unsigned int last;
 885	unsigned int to_hash_later;
 886	unsigned int nbuf;
 887};
 888
 889static int aead_setkey(struct crypto_aead *authenc,
 890		       const u8 *key, unsigned int keylen)
 891{
 892	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 893	struct device *dev = ctx->dev;
 894	struct crypto_authenc_keys keys;
 
 
 895
 896	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 897		goto badkey;
 898
 899	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
 900		goto badkey;
 901
 902	if (ctx->keylen)
 903		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
 904
 905	memcpy(ctx->key, keys.authkey, keys.authkeylen);
 906	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
 907
 908	ctx->keylen = keys.authkeylen + keys.enckeylen;
 909	ctx->enckeylen = keys.enckeylen;
 910	ctx->authkeylen = keys.authkeylen;
 911	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
 912				      DMA_TO_DEVICE);
 913
 914	memzero_explicit(&keys, sizeof(keys));
 915	return 0;
 916
 917badkey:
 918	memzero_explicit(&keys, sizeof(keys));
 919	return -EINVAL;
 920}
 921
 922static int aead_des3_setkey(struct crypto_aead *authenc,
 923			    const u8 *key, unsigned int keylen)
 924{
 925	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 926	struct device *dev = ctx->dev;
 927	struct crypto_authenc_keys keys;
 928	int err;
 929
 930	err = crypto_authenc_extractkeys(&keys, key, keylen);
 931	if (unlikely(err))
 932		goto out;
 933
 934	err = -EINVAL;
 935	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
 936		goto out;
 937
 938	err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
 939	if (err)
 940		goto out;
 941
 942	if (ctx->keylen)
 943		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
 944
 945	memcpy(ctx->key, keys.authkey, keys.authkeylen);
 946	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
 
 
 947
 948	ctx->keylen = keys.authkeylen + keys.enckeylen;
 949	ctx->enckeylen = keys.enckeylen;
 950	ctx->authkeylen = keys.authkeylen;
 951	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
 952				      DMA_TO_DEVICE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 953
 954out:
 955	memzero_explicit(&keys, sizeof(keys));
 956	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 957}
 958
 959static void talitos_sg_unmap(struct device *dev,
 960			     struct talitos_edesc *edesc,
 961			     struct scatterlist *src,
 962			     struct scatterlist *dst,
 963			     unsigned int len, unsigned int offset)
 964{
 965	struct talitos_private *priv = dev_get_drvdata(dev);
 966	bool is_sec1 = has_ftr_sec1(priv);
 967	unsigned int src_nents = edesc->src_nents ? : 1;
 968	unsigned int dst_nents = edesc->dst_nents ? : 1;
 969
 970	if (is_sec1 && dst && dst_nents > 1) {
 971		dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
 972					   len, DMA_FROM_DEVICE);
 973		sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
 974				     offset);
 975	}
 976	if (src != dst) {
 977		if (src_nents == 1 || !is_sec1)
 
 
 978			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
 979
 980		if (dst && (dst_nents == 1 || !is_sec1))
 981			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
 982	} else if (src_nents == 1 || !is_sec1) {
 983		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
 984	}
 
 
 
 
 
 
 
 
 985}
 986
 987static void ipsec_esp_unmap(struct device *dev,
 988			    struct talitos_edesc *edesc,
 989			    struct aead_request *areq, bool encrypt)
 990{
 991	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
 992	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
 993	unsigned int ivsize = crypto_aead_ivsize(aead);
 994	unsigned int authsize = crypto_aead_authsize(aead);
 995	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
 996	bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
 997	struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
 998
 999	if (is_ipsec_esp)
1000		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1001					 DMA_FROM_DEVICE);
1002	unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1003
1004	talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1005			 cryptlen + authsize, areq->assoclen);
1006
1007	if (edesc->dma_len)
1008		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1009				 DMA_BIDIRECTIONAL);
1010
1011	if (!is_ipsec_esp) {
1012		unsigned int dst_nents = edesc->dst_nents ? : 1;
1013
1014		sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1015				   areq->assoclen + cryptlen - ivsize);
1016	}
1017}
1018
1019/*
1020 * ipsec_esp descriptor callbacks
1021 */
1022static void ipsec_esp_encrypt_done(struct device *dev,
1023				   struct talitos_desc *desc, void *context,
1024				   int err)
1025{
1026	struct aead_request *areq = context;
1027	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1028	unsigned int ivsize = crypto_aead_ivsize(authenc);
1029	struct talitos_edesc *edesc;
 
 
1030
1031	edesc = container_of(desc, struct talitos_edesc, desc);
1032
1033	ipsec_esp_unmap(dev, edesc, areq, true);
1034
1035	dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
 
 
 
 
 
 
 
1036
1037	kfree(edesc);
1038
1039	aead_request_complete(areq, err);
1040}
1041
1042static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1043					  struct talitos_desc *desc,
1044					  void *context, int err)
1045{
1046	struct aead_request *req = context;
1047	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1048	unsigned int authsize = crypto_aead_authsize(authenc);
1049	struct talitos_edesc *edesc;
1050	char *oicv, *icv;
 
1051
1052	edesc = container_of(desc, struct talitos_edesc, desc);
1053
1054	ipsec_esp_unmap(dev, edesc, req, false);
1055
1056	if (!err) {
1057		/* auth check */
1058		oicv = edesc->buf + edesc->dma_len;
1059		icv = oicv - authsize;
 
 
 
1060
1061		err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
 
 
1062	}
1063
1064	kfree(edesc);
1065
1066	aead_request_complete(req, err);
1067}
1068
1069static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1070					  struct talitos_desc *desc,
1071					  void *context, int err)
1072{
1073	struct aead_request *req = context;
1074	struct talitos_edesc *edesc;
1075
1076	edesc = container_of(desc, struct talitos_edesc, desc);
1077
1078	ipsec_esp_unmap(dev, edesc, req, false);
1079
1080	/* check ICV auth status */
1081	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1082		     DESC_HDR_LO_ICCR1_PASS))
1083		err = -EBADMSG;
1084
1085	kfree(edesc);
1086
1087	aead_request_complete(req, err);
1088}
1089
1090/*
1091 * convert scatterlist to SEC h/w link table format
1092 * stop at cryptlen bytes
1093 */
1094static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1095				 unsigned int offset, int datalen, int elen,
1096				 struct talitos_ptr *link_tbl_ptr, int align)
1097{
1098	int n_sg = elen ? sg_count + 1 : sg_count;
1099	int count = 0;
1100	int cryptlen = datalen + elen;
1101	int padding = ALIGN(cryptlen, align) - cryptlen;
1102
1103	while (cryptlen && sg && n_sg--) {
1104		unsigned int len = sg_dma_len(sg);
1105
1106		if (offset >= len) {
1107			offset -= len;
1108			goto next;
1109		}
1110
1111		len -= offset;
1112
1113		if (len > cryptlen)
1114			len = cryptlen;
1115
1116		if (datalen > 0 && len > datalen) {
1117			to_talitos_ptr(link_tbl_ptr + count,
1118				       sg_dma_address(sg) + offset, datalen, 0);
1119			to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1120			count++;
1121			len -= datalen;
1122			offset += datalen;
1123		}
1124		to_talitos_ptr(link_tbl_ptr + count,
1125			       sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0);
1126		to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1127		count++;
1128		cryptlen -= len;
1129		datalen -= len;
1130		offset = 0;
1131
1132next:
1133		sg = sg_next(sg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1134	}
 
 
1135
1136	/* tag end of link table */
1137	if (count > 0)
1138		to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1139				       DESC_PTR_LNKTBL_RET, 0);
1140
1141	return count;
1142}
1143
1144static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1145			      unsigned int len, struct talitos_edesc *edesc,
1146			      struct talitos_ptr *ptr, int sg_count,
1147			      unsigned int offset, int tbl_off, int elen,
1148			      bool force, int align)
1149{
1150	struct talitos_private *priv = dev_get_drvdata(dev);
1151	bool is_sec1 = has_ftr_sec1(priv);
1152	int aligned_len = ALIGN(len, align);
1153
1154	if (!src) {
1155		to_talitos_ptr(ptr, 0, 0, is_sec1);
1156		return 1;
1157	}
1158	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1159	if (sg_count == 1 && !force) {
1160		to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1);
1161		return sg_count;
1162	}
1163	if (is_sec1) {
1164		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1);
1165		return sg_count;
1166	}
1167	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1168					 &edesc->link_tbl[tbl_off], align);
1169	if (sg_count == 1 && !force) {
1170		/* Only one segment now, so no link tbl needed*/
1171		copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1172		return sg_count;
1173	}
1174	to_talitos_ptr(ptr, edesc->dma_link_tbl +
1175			    tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1);
1176	to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1177
1178	return sg_count;
1179}
1180
1181static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1182			  unsigned int len, struct talitos_edesc *edesc,
1183			  struct talitos_ptr *ptr, int sg_count,
1184			  unsigned int offset, int tbl_off)
1185{
1186	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1187				  tbl_off, 0, false, 1);
1188}
1189
1190/*
1191 * fill in and submit ipsec_esp descriptor
1192 */
1193static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1194		     bool encrypt,
1195		     void (*callback)(struct device *dev,
1196				      struct talitos_desc *desc,
1197				      void *context, int error))
1198{
1199	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1200	unsigned int authsize = crypto_aead_authsize(aead);
1201	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1202	struct device *dev = ctx->dev;
1203	struct talitos_desc *desc = &edesc->desc;
1204	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
 
1205	unsigned int ivsize = crypto_aead_ivsize(aead);
1206	int tbl_off = 0;
1207	int sg_count, ret;
1208	int elen = 0;
1209	bool sync_needed = false;
1210	struct talitos_private *priv = dev_get_drvdata(dev);
1211	bool is_sec1 = has_ftr_sec1(priv);
1212	bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1213	struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1214	struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1215	dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1216
1217	/* hmac key */
1218	to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1219
1220	sg_count = edesc->src_nents ?: 1;
1221	if (is_sec1 && sg_count > 1)
1222		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1223				  areq->assoclen + cryptlen);
1224	else
1225		sg_count = dma_map_sg(dev, areq->src, sg_count,
1226				      (areq->src == areq->dst) ?
1227				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1228
1229	/* hmac data */
1230	ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1231			     &desc->ptr[1], sg_count, 0, tbl_off);
1232
1233	if (ret > 1) {
1234		tbl_off += ret;
1235		sync_needed = true;
1236	}
1237
1238	/* cipher iv */
1239	to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
 
1240
1241	/* cipher key */
1242	to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1243		       ctx->enckeylen, is_sec1);
 
1244
1245	/*
1246	 * cipher in
1247	 * map and adjust cipher len to aead request cryptlen.
1248	 * extent is bytes of HMAC postpended to ciphertext,
1249	 * typically 12 for ipsec
1250	 */
1251	if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1252		elen = authsize;
 
 
 
 
 
 
 
 
 
 
 
 
 
1253
1254	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1255				 sg_count, areq->assoclen, tbl_off, elen,
1256				 false, 1);
1257
1258	if (ret > 1) {
1259		tbl_off += ret;
1260		sync_needed = true;
 
 
 
 
 
 
1261	}
1262
1263	/* cipher out */
1264	if (areq->src != areq->dst) {
1265		sg_count = edesc->dst_nents ? : 1;
1266		if (!is_sec1 || sg_count == 1)
1267			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1268	}
1269
1270	if (is_ipsec_esp && encrypt)
1271		elen = authsize;
1272	else
1273		elen = 0;
1274	ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1275				 sg_count, areq->assoclen, tbl_off, elen,
1276				 is_ipsec_esp && !encrypt, 1);
1277	tbl_off += ret;
1278
1279	if (!encrypt && is_ipsec_esp) {
1280		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
 
 
 
 
 
 
 
 
 
1281
1282		/* Add an entry to the link table for ICV data */
1283		to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1284		to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
 
 
 
 
1285
1286		/* icv data follows link tables */
1287		to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1288		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1289		sync_needed = true;
1290	} else if (!encrypt) {
1291		to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1292		sync_needed = true;
1293	} else if (!is_ipsec_esp) {
1294		talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1295			       sg_count, areq->assoclen + cryptlen, tbl_off);
1296	}
1297
1298	/* iv out */
1299	if (is_ipsec_esp)
1300		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1301				       DMA_FROM_DEVICE);
1302
1303	if (sync_needed)
1304		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1305					   edesc->dma_len,
1306					   DMA_BIDIRECTIONAL);
1307
1308	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1309	if (ret != -EINPROGRESS) {
1310		ipsec_esp_unmap(dev, edesc, areq, encrypt);
1311		kfree(edesc);
1312	}
1313	return ret;
1314}
1315
1316/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1317 * allocate and map the extended descriptor
1318 */
1319static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1320						 struct scatterlist *src,
1321						 struct scatterlist *dst,
1322						 u8 *iv,
1323						 unsigned int assoclen,
1324						 unsigned int cryptlen,
1325						 unsigned int authsize,
1326						 unsigned int ivsize,
1327						 int icv_stashing,
1328						 u32 cryptoflags,
1329						 bool encrypt)
1330{
1331	struct talitos_edesc *edesc;
1332	int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1333	dma_addr_t iv_dma = 0;
1334	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1335		      GFP_ATOMIC;
1336	struct talitos_private *priv = dev_get_drvdata(dev);
1337	bool is_sec1 = has_ftr_sec1(priv);
1338	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1339
1340	if (cryptlen + authsize > max_len) {
1341		dev_err(dev, "length exceeds h/w max limit\n");
1342		return ERR_PTR(-EINVAL);
1343	}
1344
1345	if (!dst || dst == src) {
1346		src_len = assoclen + cryptlen + authsize;
1347		src_nents = sg_nents_for_len(src, src_len);
1348		if (src_nents < 0) {
1349			dev_err(dev, "Invalid number of src SG.\n");
1350			return ERR_PTR(-EINVAL);
1351		}
1352		src_nents = (src_nents == 1) ? 0 : src_nents;
1353		dst_nents = dst ? src_nents : 0;
1354		dst_len = 0;
1355	} else { /* dst && dst != src*/
1356		src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1357		src_nents = sg_nents_for_len(src, src_len);
1358		if (src_nents < 0) {
1359			dev_err(dev, "Invalid number of src SG.\n");
1360			return ERR_PTR(-EINVAL);
1361		}
1362		src_nents = (src_nents == 1) ? 0 : src_nents;
1363		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1364		dst_nents = sg_nents_for_len(dst, dst_len);
1365		if (dst_nents < 0) {
1366			dev_err(dev, "Invalid number of dst SG.\n");
1367			return ERR_PTR(-EINVAL);
1368		}
1369		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1370	}
1371
1372	/*
1373	 * allocate space for base edesc plus the link tables,
1374	 * allowing for two separate entries for AD and generated ICV (+ 2),
1375	 * and space for two sets of ICVs (stashed and generated)
1376	 */
1377	alloc_len = sizeof(struct talitos_edesc);
1378	if (src_nents || dst_nents || !encrypt) {
1379		if (is_sec1)
1380			dma_len = (src_nents ? src_len : 0) +
1381				  (dst_nents ? dst_len : 0) + authsize;
1382		else
1383			dma_len = (src_nents + dst_nents + 2) *
1384				  sizeof(struct talitos_ptr) + authsize;
1385		alloc_len += dma_len;
1386	} else {
1387		dma_len = 0;
 
1388	}
1389	alloc_len += icv_stashing ? authsize : 0;
1390
1391	/* if its a ahash, add space for a second desc next to the first one */
1392	if (is_sec1 && !dst)
1393		alloc_len += sizeof(struct talitos_desc);
1394	alloc_len += ivsize;
1395
1396	edesc = kmalloc(ALIGN(alloc_len, dma_get_cache_alignment()), flags);
1397	if (!edesc)
1398		return ERR_PTR(-ENOMEM);
1399	if (ivsize) {
1400		iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1401		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1402	}
1403	memset(&edesc->desc, 0, sizeof(edesc->desc));
1404
1405	edesc->src_nents = src_nents;
1406	edesc->dst_nents = dst_nents;
1407	edesc->iv_dma = iv_dma;
 
1408	edesc->dma_len = dma_len;
1409	if (dma_len)
1410		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1411						     edesc->dma_len,
1412						     DMA_BIDIRECTIONAL);
1413
1414	return edesc;
1415}
1416
1417static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1418					      int icv_stashing, bool encrypt)
1419{
1420	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1421	unsigned int authsize = crypto_aead_authsize(authenc);
1422	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1423	unsigned int ivsize = crypto_aead_ivsize(authenc);
1424	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1425
1426	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1427				   iv, areq->assoclen, cryptlen,
1428				   authsize, ivsize, icv_stashing,
1429				   areq->base.flags, encrypt);
1430}
1431
1432static int aead_encrypt(struct aead_request *req)
1433{
1434	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1435	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1436	struct talitos_edesc *edesc;
1437
1438	/* allocate extended descriptor */
1439	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1440	if (IS_ERR(edesc))
1441		return PTR_ERR(edesc);
1442
1443	/* set encrypt */
1444	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1445
1446	return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1447}
1448
1449static int aead_decrypt(struct aead_request *req)
1450{
1451	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1452	unsigned int authsize = crypto_aead_authsize(authenc);
1453	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 
1454	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1455	struct talitos_edesc *edesc;
 
1456	void *icvdata;
1457
 
 
1458	/* allocate extended descriptor */
1459	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1460	if (IS_ERR(edesc))
1461		return PTR_ERR(edesc);
1462
1463	if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1464	    (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1465	    ((!edesc->src_nents && !edesc->dst_nents) ||
1466	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1467
1468		/* decrypt and check the ICV */
1469		edesc->desc.hdr = ctx->desc_hdr_template |
1470				  DESC_HDR_DIR_INBOUND |
1471				  DESC_HDR_MODE1_MDEU_CICV;
1472
1473		/* reset integrity check result bits */
 
1474
1475		return ipsec_esp(edesc, req, false,
1476				 ipsec_esp_decrypt_hwauth_done);
 
1477	}
1478
1479	/* Have to check the ICV with software */
1480	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1481
1482	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1483	icvdata = edesc->buf + edesc->dma_len;
 
 
 
 
 
 
1484
1485	sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1486			   req->assoclen + req->cryptlen - authsize);
1487
1488	return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1489}
1490
1491static int skcipher_setkey(struct crypto_skcipher *cipher,
1492			     const u8 *key, unsigned int keylen)
1493{
1494	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1495	struct device *dev = ctx->dev;
1496
1497	if (ctx->keylen)
1498		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1499
1500	memcpy(&ctx->key, key, keylen);
1501	ctx->keylen = keylen;
 
 
1502
1503	ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
 
1504
1505	return 0;
1506}
 
1507
1508static int skcipher_des_setkey(struct crypto_skcipher *cipher,
1509				 const u8 *key, unsigned int keylen)
1510{
1511	return verify_skcipher_des_key(cipher, key) ?:
1512	       skcipher_setkey(cipher, key, keylen);
1513}
1514
1515static int skcipher_des3_setkey(struct crypto_skcipher *cipher,
1516				  const u8 *key, unsigned int keylen)
1517{
1518	return verify_skcipher_des3_key(cipher, key) ?:
1519	       skcipher_setkey(cipher, key, keylen);
1520}
1521
1522static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
1523				  const u8 *key, unsigned int keylen)
1524{
1525	if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1526	    keylen == AES_KEYSIZE_256)
1527		return skcipher_setkey(cipher, key, keylen);
1528
1529	return -EINVAL;
1530}
1531
1532static void common_nonsnoop_unmap(struct device *dev,
1533				  struct talitos_edesc *edesc,
1534				  struct skcipher_request *areq)
1535{
1536	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1537
1538	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 0);
1539	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1540
 
 
1541	if (edesc->dma_len)
1542		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1543				 DMA_BIDIRECTIONAL);
1544}
1545
1546static void skcipher_done(struct device *dev,
1547			    struct talitos_desc *desc, void *context,
1548			    int err)
1549{
1550	struct skcipher_request *areq = context;
1551	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1552	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1553	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1554	struct talitos_edesc *edesc;
1555
1556	edesc = container_of(desc, struct talitos_edesc, desc);
1557
1558	common_nonsnoop_unmap(dev, edesc, areq);
1559	memcpy(areq->iv, ctx->iv, ivsize);
1560
1561	kfree(edesc);
1562
1563	skcipher_request_complete(areq, err);
1564}
1565
1566static int common_nonsnoop(struct talitos_edesc *edesc,
1567			   struct skcipher_request *areq,
1568			   void (*callback) (struct device *dev,
1569					     struct talitos_desc *desc,
1570					     void *context, int error))
1571{
1572	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1573	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1574	struct device *dev = ctx->dev;
1575	struct talitos_desc *desc = &edesc->desc;
1576	unsigned int cryptlen = areq->cryptlen;
1577	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1578	int sg_count, ret;
1579	bool sync_needed = false;
1580	struct talitos_private *priv = dev_get_drvdata(dev);
1581	bool is_sec1 = has_ftr_sec1(priv);
1582	bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU &&
1583		      (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR;
1584
1585	/* first DWORD empty */
 
 
 
1586
1587	/* cipher iv */
1588	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
 
 
1589
1590	/* cipher key */
1591	to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
 
1592
1593	sg_count = edesc->src_nents ?: 1;
1594	if (is_sec1 && sg_count > 1)
1595		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1596				  cryptlen);
1597	else
1598		sg_count = dma_map_sg(dev, areq->src, sg_count,
1599				      (areq->src == areq->dst) ?
1600				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1601	/*
1602	 * cipher in
1603	 */
1604	sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3],
1605				      sg_count, 0, 0, 0, false, is_ctr ? 16 : 1);
1606	if (sg_count > 1)
1607		sync_needed = true;
1608
1609	/* cipher out */
1610	if (areq->src != areq->dst) {
1611		sg_count = edesc->dst_nents ? : 1;
1612		if (!is_sec1 || sg_count == 1)
1613			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1614	}
1615
1616	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1617			     sg_count, 0, (edesc->src_nents + 1));
1618	if (ret > 1)
1619		sync_needed = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1620
1621	/* iv out */
1622	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1623			       DMA_FROM_DEVICE);
1624
1625	/* last DWORD empty */
1626
1627	if (sync_needed)
1628		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1629					   edesc->dma_len, DMA_BIDIRECTIONAL);
1630
1631	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1632	if (ret != -EINPROGRESS) {
1633		common_nonsnoop_unmap(dev, edesc, areq);
1634		kfree(edesc);
1635	}
1636	return ret;
1637}
1638
1639static struct talitos_edesc *skcipher_edesc_alloc(struct skcipher_request *
1640						    areq, bool encrypt)
1641{
1642	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1643	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1644	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1645
1646	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1647				   areq->iv, 0, areq->cryptlen, 0, ivsize, 0,
1648				   areq->base.flags, encrypt);
1649}
1650
1651static int skcipher_encrypt(struct skcipher_request *areq)
1652{
1653	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1654	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1655	struct talitos_edesc *edesc;
1656	unsigned int blocksize =
1657			crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1658
1659	if (!areq->cryptlen)
1660		return 0;
1661
1662	if (areq->cryptlen % blocksize)
1663		return -EINVAL;
1664
1665	/* allocate extended descriptor */
1666	edesc = skcipher_edesc_alloc(areq, true);
1667	if (IS_ERR(edesc))
1668		return PTR_ERR(edesc);
1669
1670	/* set encrypt */
1671	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1672
1673	return common_nonsnoop(edesc, areq, skcipher_done);
1674}
1675
1676static int skcipher_decrypt(struct skcipher_request *areq)
1677{
1678	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1679	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1680	struct talitos_edesc *edesc;
1681	unsigned int blocksize =
1682			crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1683
1684	if (!areq->cryptlen)
1685		return 0;
1686
1687	if (areq->cryptlen % blocksize)
1688		return -EINVAL;
1689
1690	/* allocate extended descriptor */
1691	edesc = skcipher_edesc_alloc(areq, false);
1692	if (IS_ERR(edesc))
1693		return PTR_ERR(edesc);
1694
1695	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1696
1697	return common_nonsnoop(edesc, areq, skcipher_done);
1698}
1699
1700static void common_nonsnoop_hash_unmap(struct device *dev,
1701				       struct talitos_edesc *edesc,
1702				       struct ahash_request *areq)
1703{
1704	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1705	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1706	struct talitos_private *priv = dev_get_drvdata(dev);
1707	bool is_sec1 = has_ftr_sec1(priv);
1708	struct talitos_desc *desc = &edesc->desc;
1709	struct talitos_desc *desc2 = (struct talitos_desc *)
1710				     (edesc->buf + edesc->dma_len);
1711
1712	unmap_single_talitos_ptr(dev, &desc->ptr[5], DMA_FROM_DEVICE);
1713	if (desc->next_desc &&
1714	    desc->ptr[5].ptr != desc2->ptr[5].ptr)
1715		unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1716	if (req_ctx->last)
1717		memcpy(areq->result, req_ctx->hw_context,
1718		       crypto_ahash_digestsize(tfm));
1719
1720	if (req_ctx->psrc)
1721		talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1722
1723	/* When using hashctx-in, must unmap it. */
1724	if (from_talitos_ptr_len(&desc->ptr[1], is_sec1))
1725		unmap_single_talitos_ptr(dev, &desc->ptr[1],
1726					 DMA_TO_DEVICE);
1727	else if (desc->next_desc)
1728		unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1729					 DMA_TO_DEVICE);
1730
1731	if (is_sec1 && req_ctx->nbuf)
1732		unmap_single_talitos_ptr(dev, &desc->ptr[3],
1733					 DMA_TO_DEVICE);
1734
 
 
1735	if (edesc->dma_len)
1736		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1737				 DMA_BIDIRECTIONAL);
1738
1739	if (desc->next_desc)
1740		dma_unmap_single(dev, be32_to_cpu(desc->next_desc),
1741				 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1742}
1743
1744static void ahash_done(struct device *dev,
1745		       struct talitos_desc *desc, void *context,
1746		       int err)
1747{
1748	struct ahash_request *areq = context;
1749	struct talitos_edesc *edesc =
1750		 container_of(desc, struct talitos_edesc, desc);
1751	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1752
1753	if (!req_ctx->last && req_ctx->to_hash_later) {
1754		/* Position any partial block for next update/final/finup */
1755		req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1756		req_ctx->nbuf = req_ctx->to_hash_later;
1757	}
1758	common_nonsnoop_hash_unmap(dev, edesc, areq);
1759
1760	kfree(edesc);
1761
1762	ahash_request_complete(areq, err);
1763}
1764
1765/*
1766 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1767 * ourself and submit a padded block
1768 */
1769static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1770			       struct talitos_edesc *edesc,
1771			       struct talitos_ptr *ptr)
1772{
1773	static u8 padded_hash[64] = {
1774		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1775		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1776		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1777		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1778	};
1779
1780	pr_err_once("Bug in SEC1, padding ourself\n");
1781	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1782	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1783			       (char *)padded_hash, DMA_TO_DEVICE);
1784}
1785
1786static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1787				struct ahash_request *areq, unsigned int length,
1788				void (*callback) (struct device *dev,
1789						  struct talitos_desc *desc,
1790						  void *context, int error))
1791{
1792	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1793	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1794	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1795	struct device *dev = ctx->dev;
1796	struct talitos_desc *desc = &edesc->desc;
1797	int ret;
1798	bool sync_needed = false;
1799	struct talitos_private *priv = dev_get_drvdata(dev);
1800	bool is_sec1 = has_ftr_sec1(priv);
1801	int sg_count;
1802
1803	/* first DWORD empty */
 
1804
1805	/* hash context in */
1806	if (!req_ctx->first || req_ctx->swinit) {
1807		map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1808					      req_ctx->hw_context_size,
1809					      req_ctx->hw_context,
1810					      DMA_TO_DEVICE);
1811		req_ctx->swinit = 0;
 
 
 
 
1812	}
1813	/* Indicate next op is not the first. */
1814	req_ctx->first = 0;
1815
1816	/* HMAC key */
1817	if (ctx->keylen)
1818		to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1819			       is_sec1);
1820
1821	if (is_sec1 && req_ctx->nbuf)
1822		length -= req_ctx->nbuf;
1823
1824	sg_count = edesc->src_nents ?: 1;
1825	if (is_sec1 && sg_count > 1)
1826		sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1827	else if (length)
1828		sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1829				      DMA_TO_DEVICE);
1830	/*
1831	 * data in
1832	 */
1833	if (is_sec1 && req_ctx->nbuf) {
1834		map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1835				       req_ctx->buf[req_ctx->buf_idx],
1836				       DMA_TO_DEVICE);
 
 
 
 
 
 
1837	} else {
1838		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1839					  &desc->ptr[3], sg_count, 0, 0);
1840		if (sg_count > 1)
1841			sync_needed = true;
 
 
 
 
 
 
 
 
 
 
1842	}
1843
1844	/* fifth DWORD empty */
 
1845
1846	/* hash/HMAC out -or- hash context out */
1847	if (req_ctx->last)
1848		map_single_talitos_ptr(dev, &desc->ptr[5],
1849				       crypto_ahash_digestsize(tfm),
1850				       req_ctx->hw_context, DMA_FROM_DEVICE);
1851	else
1852		map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1853					      req_ctx->hw_context_size,
1854					      req_ctx->hw_context,
1855					      DMA_FROM_DEVICE);
1856
1857	/* last DWORD empty */
1858
1859	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1860		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1861
1862	if (is_sec1 && req_ctx->nbuf && length) {
1863		struct talitos_desc *desc2 = (struct talitos_desc *)
1864					     (edesc->buf + edesc->dma_len);
1865		dma_addr_t next_desc;
1866
1867		memset(desc2, 0, sizeof(*desc2));
1868		desc2->hdr = desc->hdr;
1869		desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1870		desc2->hdr1 = desc2->hdr;
1871		desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1872		desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1873		desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1874
1875		if (desc->ptr[1].ptr)
1876			copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1877					 is_sec1);
1878		else
1879			map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1880						      req_ctx->hw_context_size,
1881						      req_ctx->hw_context,
1882						      DMA_TO_DEVICE);
1883		copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1884		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1885					  &desc2->ptr[3], sg_count, 0, 0);
1886		if (sg_count > 1)
1887			sync_needed = true;
1888		copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1889		if (req_ctx->last)
1890			map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1891						      req_ctx->hw_context_size,
1892						      req_ctx->hw_context,
1893						      DMA_FROM_DEVICE);
1894
1895		next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1896					   DMA_BIDIRECTIONAL);
1897		desc->next_desc = cpu_to_be32(next_desc);
1898	}
1899
1900	if (sync_needed)
1901		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1902					   edesc->dma_len, DMA_BIDIRECTIONAL);
1903
1904	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1905	if (ret != -EINPROGRESS) {
1906		common_nonsnoop_hash_unmap(dev, edesc, areq);
1907		kfree(edesc);
1908	}
1909	return ret;
1910}
1911
1912static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1913					       unsigned int nbytes)
1914{
1915	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1916	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1917	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1918	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1919	bool is_sec1 = has_ftr_sec1(priv);
1920
1921	if (is_sec1)
1922		nbytes -= req_ctx->nbuf;
1923
1924	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1925				   nbytes, 0, 0, 0, areq->base.flags, false);
1926}
1927
1928static int ahash_init(struct ahash_request *areq)
1929{
1930	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1931	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1932	struct device *dev = ctx->dev;
1933	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1934	unsigned int size;
1935	dma_addr_t dma;
1936
1937	/* Initialize the context */
1938	req_ctx->buf_idx = 0;
1939	req_ctx->nbuf = 0;
1940	req_ctx->first = 1; /* first indicates h/w must init its context */
1941	req_ctx->swinit = 0; /* assume h/w init of context */
1942	size =	(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
 
1943			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1944			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1945	req_ctx->hw_context_size = size;
1946
1947	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1948			     DMA_TO_DEVICE);
1949	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1950
1951	return 0;
1952}
1953
1954/*
1955 * on h/w without explicit sha224 support, we initialize h/w context
1956 * manually with sha224 constants, and tell it to run sha256.
1957 */
1958static int ahash_init_sha224_swinit(struct ahash_request *areq)
1959{
1960	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1961
 
 
 
1962	req_ctx->hw_context[0] = SHA224_H0;
1963	req_ctx->hw_context[1] = SHA224_H1;
1964	req_ctx->hw_context[2] = SHA224_H2;
1965	req_ctx->hw_context[3] = SHA224_H3;
1966	req_ctx->hw_context[4] = SHA224_H4;
1967	req_ctx->hw_context[5] = SHA224_H5;
1968	req_ctx->hw_context[6] = SHA224_H6;
1969	req_ctx->hw_context[7] = SHA224_H7;
1970
1971	/* init 64-bit count */
1972	req_ctx->hw_context[8] = 0;
1973	req_ctx->hw_context[9] = 0;
1974
1975	ahash_init(areq);
1976	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1977
1978	return 0;
1979}
1980
1981static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1982{
1983	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1984	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1985	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1986	struct talitos_edesc *edesc;
1987	unsigned int blocksize =
1988			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1989	unsigned int nbytes_to_hash;
1990	unsigned int to_hash_later;
1991	unsigned int nsg;
1992	int nents;
1993	struct device *dev = ctx->dev;
1994	struct talitos_private *priv = dev_get_drvdata(dev);
1995	bool is_sec1 = has_ftr_sec1(priv);
1996	u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1997
1998	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1999		/* Buffer up to one whole block */
2000		nents = sg_nents_for_len(areq->src, nbytes);
2001		if (nents < 0) {
2002			dev_err(dev, "Invalid number of src SG.\n");
2003			return nents;
2004		}
2005		sg_copy_to_buffer(areq->src, nents,
2006				  ctx_buf + req_ctx->nbuf, nbytes);
2007		req_ctx->nbuf += nbytes;
2008		return 0;
2009	}
2010
2011	/* At least (blocksize + 1) bytes are available to hash */
2012	nbytes_to_hash = nbytes + req_ctx->nbuf;
2013	to_hash_later = nbytes_to_hash & (blocksize - 1);
2014
2015	if (req_ctx->last)
2016		to_hash_later = 0;
2017	else if (to_hash_later)
2018		/* There is a partial block. Hash the full block(s) now */
2019		nbytes_to_hash -= to_hash_later;
2020	else {
2021		/* Keep one block buffered */
2022		nbytes_to_hash -= blocksize;
2023		to_hash_later = blocksize;
2024	}
2025
2026	/* Chain in any previously buffered data */
2027	if (!is_sec1 && req_ctx->nbuf) {
2028		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2029		sg_init_table(req_ctx->bufsl, nsg);
2030		sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2031		if (nsg > 1)
2032			sg_chain(req_ctx->bufsl, 2, areq->src);
2033		req_ctx->psrc = req_ctx->bufsl;
2034	} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2035		int offset;
2036
2037		if (nbytes_to_hash > blocksize)
2038			offset = blocksize - req_ctx->nbuf;
2039		else
2040			offset = nbytes_to_hash - req_ctx->nbuf;
2041		nents = sg_nents_for_len(areq->src, offset);
2042		if (nents < 0) {
2043			dev_err(dev, "Invalid number of src SG.\n");
2044			return nents;
2045		}
2046		sg_copy_to_buffer(areq->src, nents,
2047				  ctx_buf + req_ctx->nbuf, offset);
2048		req_ctx->nbuf += offset;
2049		req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2050						 offset);
2051	} else
2052		req_ctx->psrc = areq->src;
2053
2054	if (to_hash_later) {
2055		nents = sg_nents_for_len(areq->src, nbytes);
2056		if (nents < 0) {
2057			dev_err(dev, "Invalid number of src SG.\n");
2058			return nents;
2059		}
2060		sg_pcopy_to_buffer(areq->src, nents,
2061				   req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2062				      to_hash_later,
2063				      nbytes - to_hash_later);
2064	}
2065	req_ctx->to_hash_later = to_hash_later;
2066
2067	/* Allocate extended descriptor */
2068	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2069	if (IS_ERR(edesc))
2070		return PTR_ERR(edesc);
2071
2072	edesc->desc.hdr = ctx->desc_hdr_template;
2073
2074	/* On last one, request SEC to pad; otherwise continue */
2075	if (req_ctx->last)
2076		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2077	else
2078		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2079
2080	/* request SEC to INIT hash. */
2081	if (req_ctx->first && !req_ctx->swinit)
2082		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2083
2084	/* When the tfm context has a keylen, it's an HMAC.
2085	 * A first or last (ie. not middle) descriptor must request HMAC.
2086	 */
2087	if (ctx->keylen && (req_ctx->first || req_ctx->last))
2088		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2089
2090	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
 
2091}
2092
2093static int ahash_update(struct ahash_request *areq)
2094{
2095	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2096
2097	req_ctx->last = 0;
2098
2099	return ahash_process_req(areq, areq->nbytes);
2100}
2101
2102static int ahash_final(struct ahash_request *areq)
2103{
2104	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2105
2106	req_ctx->last = 1;
2107
2108	return ahash_process_req(areq, 0);
2109}
2110
2111static int ahash_finup(struct ahash_request *areq)
2112{
2113	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2114
2115	req_ctx->last = 1;
2116
2117	return ahash_process_req(areq, areq->nbytes);
2118}
2119
2120static int ahash_digest(struct ahash_request *areq)
2121{
2122	ahash_init(areq);
2123	return ahash_finup(areq);
2124}
2125
2126static int ahash_digest_sha224_swinit(struct ahash_request *areq)
2127{
2128	ahash_init_sha224_swinit(areq);
2129	return ahash_finup(areq);
2130}
2131
2132static int ahash_export(struct ahash_request *areq, void *out)
2133{
2134	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2135	struct talitos_export_state *export = out;
2136	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2137	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2138	struct device *dev = ctx->dev;
2139	dma_addr_t dma;
2140
2141	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2142			     DMA_FROM_DEVICE);
2143	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2144
2145	memcpy(export->hw_context, req_ctx->hw_context,
2146	       req_ctx->hw_context_size);
2147	memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2148	export->swinit = req_ctx->swinit;
2149	export->first = req_ctx->first;
2150	export->last = req_ctx->last;
2151	export->to_hash_later = req_ctx->to_hash_later;
2152	export->nbuf = req_ctx->nbuf;
2153
2154	return 0;
2155}
2156
2157static int ahash_import(struct ahash_request *areq, const void *in)
2158{
2159	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2160	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2161	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2162	struct device *dev = ctx->dev;
2163	const struct talitos_export_state *export = in;
2164	unsigned int size;
2165	dma_addr_t dma;
2166
2167	memset(req_ctx, 0, sizeof(*req_ctx));
2168	size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2169			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2170			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2171	req_ctx->hw_context_size = size;
2172	memcpy(req_ctx->hw_context, export->hw_context, size);
2173	memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2174	req_ctx->swinit = export->swinit;
2175	req_ctx->first = export->first;
2176	req_ctx->last = export->last;
2177	req_ctx->to_hash_later = export->to_hash_later;
2178	req_ctx->nbuf = export->nbuf;
2179
2180	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2181			     DMA_TO_DEVICE);
2182	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2183
2184	return 0;
2185}
2186
2187static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2188		   u8 *hash)
2189{
2190	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2191
2192	struct scatterlist sg[1];
2193	struct ahash_request *req;
2194	struct crypto_wait wait;
2195	int ret;
2196
2197	crypto_init_wait(&wait);
2198
2199	req = ahash_request_alloc(tfm, GFP_KERNEL);
2200	if (!req)
2201		return -ENOMEM;
2202
2203	/* Keep tfm keylen == 0 during hash of the long key */
2204	ctx->keylen = 0;
2205	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2206				   crypto_req_done, &wait);
2207
2208	sg_init_one(&sg[0], key, keylen);
2209
2210	ahash_request_set_crypt(req, sg, hash, keylen);
2211	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2212
2213	ahash_request_free(req);
2214
2215	return ret;
2216}
2217
2218static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2219			unsigned int keylen)
2220{
2221	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2222	struct device *dev = ctx->dev;
2223	unsigned int blocksize =
2224			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2225	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2226	unsigned int keysize = keylen;
2227	u8 hash[SHA512_DIGEST_SIZE];
2228	int ret;
2229
2230	if (keylen <= blocksize)
2231		memcpy(ctx->key, key, keysize);
2232	else {
2233		/* Must get the hash of the long key */
2234		ret = keyhash(tfm, key, keylen, hash);
2235
2236		if (ret)
2237			return -EINVAL;
2238
2239		keysize = digestsize;
2240		memcpy(ctx->key, hash, digestsize);
2241	}
2242
2243	if (ctx->keylen)
2244		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2245
2246	ctx->keylen = keysize;
2247	ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2248
2249	return 0;
2250}
2251
2252
2253struct talitos_alg_template {
2254	u32 type;
2255	u32 priority;
2256	union {
2257		struct skcipher_alg skcipher;
2258		struct ahash_alg hash;
2259		struct aead_alg aead;
2260	} alg;
2261	__be32 desc_hdr_template;
2262};
2263
2264static struct talitos_alg_template driver_algs[] = {
2265	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2266	{	.type = CRYPTO_ALG_TYPE_AEAD,
2267		.alg.aead = {
2268			.base = {
2269				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2270				.cra_driver_name = "authenc-hmac-sha1-"
2271						   "cbc-aes-talitos",
2272				.cra_blocksize = AES_BLOCK_SIZE,
2273				.cra_flags = CRYPTO_ALG_ASYNC |
2274					     CRYPTO_ALG_ALLOCATES_MEMORY,
2275			},
2276			.ivsize = AES_BLOCK_SIZE,
2277			.maxauthsize = SHA1_DIGEST_SIZE,
 
 
 
 
 
2278		},
2279		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2280			             DESC_HDR_SEL0_AESU |
2281		                     DESC_HDR_MODE0_AESU_CBC |
2282		                     DESC_HDR_SEL1_MDEUA |
2283		                     DESC_HDR_MODE1_MDEU_INIT |
2284		                     DESC_HDR_MODE1_MDEU_PAD |
2285		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2286	},
2287	{	.type = CRYPTO_ALG_TYPE_AEAD,
2288		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2289		.alg.aead = {
2290			.base = {
2291				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2292				.cra_driver_name = "authenc-hmac-sha1-"
2293						   "cbc-aes-talitos-hsna",
2294				.cra_blocksize = AES_BLOCK_SIZE,
2295				.cra_flags = CRYPTO_ALG_ASYNC |
2296					     CRYPTO_ALG_ALLOCATES_MEMORY,
2297			},
2298			.ivsize = AES_BLOCK_SIZE,
2299			.maxauthsize = SHA1_DIGEST_SIZE,
2300		},
2301		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2302				     DESC_HDR_SEL0_AESU |
2303				     DESC_HDR_MODE0_AESU_CBC |
2304				     DESC_HDR_SEL1_MDEUA |
2305				     DESC_HDR_MODE1_MDEU_INIT |
2306				     DESC_HDR_MODE1_MDEU_PAD |
2307				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2308	},
2309	{	.type = CRYPTO_ALG_TYPE_AEAD,
2310		.alg.aead = {
2311			.base = {
2312				.cra_name = "authenc(hmac(sha1),"
2313					    "cbc(des3_ede))",
2314				.cra_driver_name = "authenc-hmac-sha1-"
2315						   "cbc-3des-talitos",
2316				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2317				.cra_flags = CRYPTO_ALG_ASYNC |
2318					     CRYPTO_ALG_ALLOCATES_MEMORY,
2319			},
2320			.ivsize = DES3_EDE_BLOCK_SIZE,
2321			.maxauthsize = SHA1_DIGEST_SIZE,
2322			.setkey = aead_des3_setkey,
2323		},
2324		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2325			             DESC_HDR_SEL0_DEU |
2326		                     DESC_HDR_MODE0_DEU_CBC |
2327		                     DESC_HDR_MODE0_DEU_3DES |
2328		                     DESC_HDR_SEL1_MDEUA |
2329		                     DESC_HDR_MODE1_MDEU_INIT |
2330		                     DESC_HDR_MODE1_MDEU_PAD |
2331		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2332	},
2333	{	.type = CRYPTO_ALG_TYPE_AEAD,
2334		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2335		.alg.aead = {
2336			.base = {
2337				.cra_name = "authenc(hmac(sha1),"
2338					    "cbc(des3_ede))",
2339				.cra_driver_name = "authenc-hmac-sha1-"
2340						   "cbc-3des-talitos-hsna",
2341				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2342				.cra_flags = CRYPTO_ALG_ASYNC |
2343					     CRYPTO_ALG_ALLOCATES_MEMORY,
2344			},
2345			.ivsize = DES3_EDE_BLOCK_SIZE,
2346			.maxauthsize = SHA1_DIGEST_SIZE,
2347			.setkey = aead_des3_setkey,
2348		},
2349		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2350				     DESC_HDR_SEL0_DEU |
2351				     DESC_HDR_MODE0_DEU_CBC |
2352				     DESC_HDR_MODE0_DEU_3DES |
2353				     DESC_HDR_SEL1_MDEUA |
2354				     DESC_HDR_MODE1_MDEU_INIT |
2355				     DESC_HDR_MODE1_MDEU_PAD |
2356				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2357	},
2358	{       .type = CRYPTO_ALG_TYPE_AEAD,
2359		.alg.aead = {
2360			.base = {
2361				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2362				.cra_driver_name = "authenc-hmac-sha224-"
2363						   "cbc-aes-talitos",
2364				.cra_blocksize = AES_BLOCK_SIZE,
2365				.cra_flags = CRYPTO_ALG_ASYNC |
2366					     CRYPTO_ALG_ALLOCATES_MEMORY,
2367			},
2368			.ivsize = AES_BLOCK_SIZE,
2369			.maxauthsize = SHA224_DIGEST_SIZE,
2370		},
2371		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2372				     DESC_HDR_SEL0_AESU |
2373				     DESC_HDR_MODE0_AESU_CBC |
2374				     DESC_HDR_SEL1_MDEUA |
2375				     DESC_HDR_MODE1_MDEU_INIT |
2376				     DESC_HDR_MODE1_MDEU_PAD |
2377				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2378	},
2379	{       .type = CRYPTO_ALG_TYPE_AEAD,
2380		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2381		.alg.aead = {
2382			.base = {
2383				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2384				.cra_driver_name = "authenc-hmac-sha224-"
2385						   "cbc-aes-talitos-hsna",
2386				.cra_blocksize = AES_BLOCK_SIZE,
2387				.cra_flags = CRYPTO_ALG_ASYNC |
2388					     CRYPTO_ALG_ALLOCATES_MEMORY,
2389			},
2390			.ivsize = AES_BLOCK_SIZE,
2391			.maxauthsize = SHA224_DIGEST_SIZE,
2392		},
2393		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2394				     DESC_HDR_SEL0_AESU |
2395				     DESC_HDR_MODE0_AESU_CBC |
2396				     DESC_HDR_SEL1_MDEUA |
2397				     DESC_HDR_MODE1_MDEU_INIT |
2398				     DESC_HDR_MODE1_MDEU_PAD |
2399				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2400	},
2401	{	.type = CRYPTO_ALG_TYPE_AEAD,
2402		.alg.aead = {
2403			.base = {
2404				.cra_name = "authenc(hmac(sha224),"
2405					    "cbc(des3_ede))",
2406				.cra_driver_name = "authenc-hmac-sha224-"
2407						   "cbc-3des-talitos",
2408				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2409				.cra_flags = CRYPTO_ALG_ASYNC |
2410					     CRYPTO_ALG_ALLOCATES_MEMORY,
2411			},
2412			.ivsize = DES3_EDE_BLOCK_SIZE,
2413			.maxauthsize = SHA224_DIGEST_SIZE,
2414			.setkey = aead_des3_setkey,
2415		},
2416		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2417			             DESC_HDR_SEL0_DEU |
2418		                     DESC_HDR_MODE0_DEU_CBC |
2419		                     DESC_HDR_MODE0_DEU_3DES |
2420		                     DESC_HDR_SEL1_MDEUA |
2421		                     DESC_HDR_MODE1_MDEU_INIT |
2422		                     DESC_HDR_MODE1_MDEU_PAD |
2423		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2424	},
2425	{	.type = CRYPTO_ALG_TYPE_AEAD,
2426		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2427		.alg.aead = {
2428			.base = {
2429				.cra_name = "authenc(hmac(sha224),"
2430					    "cbc(des3_ede))",
2431				.cra_driver_name = "authenc-hmac-sha224-"
2432						   "cbc-3des-talitos-hsna",
2433				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2434				.cra_flags = CRYPTO_ALG_ASYNC |
2435					     CRYPTO_ALG_ALLOCATES_MEMORY,
2436			},
2437			.ivsize = DES3_EDE_BLOCK_SIZE,
2438			.maxauthsize = SHA224_DIGEST_SIZE,
2439			.setkey = aead_des3_setkey,
2440		},
2441		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2442				     DESC_HDR_SEL0_DEU |
2443				     DESC_HDR_MODE0_DEU_CBC |
2444				     DESC_HDR_MODE0_DEU_3DES |
2445				     DESC_HDR_SEL1_MDEUA |
2446				     DESC_HDR_MODE1_MDEU_INIT |
2447				     DESC_HDR_MODE1_MDEU_PAD |
2448				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2449	},
2450	{	.type = CRYPTO_ALG_TYPE_AEAD,
2451		.alg.aead = {
2452			.base = {
2453				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2454				.cra_driver_name = "authenc-hmac-sha256-"
2455						   "cbc-aes-talitos",
2456				.cra_blocksize = AES_BLOCK_SIZE,
2457				.cra_flags = CRYPTO_ALG_ASYNC |
2458					     CRYPTO_ALG_ALLOCATES_MEMORY,
2459			},
2460			.ivsize = AES_BLOCK_SIZE,
2461			.maxauthsize = SHA256_DIGEST_SIZE,
2462		},
2463		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2464			             DESC_HDR_SEL0_AESU |
2465		                     DESC_HDR_MODE0_AESU_CBC |
2466		                     DESC_HDR_SEL1_MDEUA |
2467		                     DESC_HDR_MODE1_MDEU_INIT |
2468		                     DESC_HDR_MODE1_MDEU_PAD |
2469		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2470	},
2471	{	.type = CRYPTO_ALG_TYPE_AEAD,
2472		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2473		.alg.aead = {
2474			.base = {
2475				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2476				.cra_driver_name = "authenc-hmac-sha256-"
2477						   "cbc-aes-talitos-hsna",
2478				.cra_blocksize = AES_BLOCK_SIZE,
2479				.cra_flags = CRYPTO_ALG_ASYNC |
2480					     CRYPTO_ALG_ALLOCATES_MEMORY,
2481			},
2482			.ivsize = AES_BLOCK_SIZE,
2483			.maxauthsize = SHA256_DIGEST_SIZE,
2484		},
2485		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2486				     DESC_HDR_SEL0_AESU |
2487				     DESC_HDR_MODE0_AESU_CBC |
2488				     DESC_HDR_SEL1_MDEUA |
2489				     DESC_HDR_MODE1_MDEU_INIT |
2490				     DESC_HDR_MODE1_MDEU_PAD |
2491				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2492	},
2493	{	.type = CRYPTO_ALG_TYPE_AEAD,
2494		.alg.aead = {
2495			.base = {
2496				.cra_name = "authenc(hmac(sha256),"
2497					    "cbc(des3_ede))",
2498				.cra_driver_name = "authenc-hmac-sha256-"
2499						   "cbc-3des-talitos",
2500				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2501				.cra_flags = CRYPTO_ALG_ASYNC |
2502					     CRYPTO_ALG_ALLOCATES_MEMORY,
2503			},
2504			.ivsize = DES3_EDE_BLOCK_SIZE,
2505			.maxauthsize = SHA256_DIGEST_SIZE,
2506			.setkey = aead_des3_setkey,
2507		},
2508		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2509			             DESC_HDR_SEL0_DEU |
2510		                     DESC_HDR_MODE0_DEU_CBC |
2511		                     DESC_HDR_MODE0_DEU_3DES |
2512		                     DESC_HDR_SEL1_MDEUA |
2513		                     DESC_HDR_MODE1_MDEU_INIT |
2514		                     DESC_HDR_MODE1_MDEU_PAD |
2515		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2516	},
2517	{	.type = CRYPTO_ALG_TYPE_AEAD,
2518		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2519		.alg.aead = {
2520			.base = {
2521				.cra_name = "authenc(hmac(sha256),"
2522					    "cbc(des3_ede))",
2523				.cra_driver_name = "authenc-hmac-sha256-"
2524						   "cbc-3des-talitos-hsna",
2525				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2526				.cra_flags = CRYPTO_ALG_ASYNC |
2527					     CRYPTO_ALG_ALLOCATES_MEMORY,
2528			},
2529			.ivsize = DES3_EDE_BLOCK_SIZE,
2530			.maxauthsize = SHA256_DIGEST_SIZE,
2531			.setkey = aead_des3_setkey,
2532		},
2533		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2534				     DESC_HDR_SEL0_DEU |
2535				     DESC_HDR_MODE0_DEU_CBC |
2536				     DESC_HDR_MODE0_DEU_3DES |
2537				     DESC_HDR_SEL1_MDEUA |
2538				     DESC_HDR_MODE1_MDEU_INIT |
2539				     DESC_HDR_MODE1_MDEU_PAD |
2540				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2541	},
2542	{	.type = CRYPTO_ALG_TYPE_AEAD,
2543		.alg.aead = {
2544			.base = {
2545				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2546				.cra_driver_name = "authenc-hmac-sha384-"
2547						   "cbc-aes-talitos",
2548				.cra_blocksize = AES_BLOCK_SIZE,
2549				.cra_flags = CRYPTO_ALG_ASYNC |
2550					     CRYPTO_ALG_ALLOCATES_MEMORY,
2551			},
2552			.ivsize = AES_BLOCK_SIZE,
2553			.maxauthsize = SHA384_DIGEST_SIZE,
2554		},
2555		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2556			             DESC_HDR_SEL0_AESU |
2557		                     DESC_HDR_MODE0_AESU_CBC |
2558		                     DESC_HDR_SEL1_MDEUB |
2559		                     DESC_HDR_MODE1_MDEU_INIT |
2560		                     DESC_HDR_MODE1_MDEU_PAD |
2561		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2562	},
2563	{	.type = CRYPTO_ALG_TYPE_AEAD,
2564		.alg.aead = {
2565			.base = {
2566				.cra_name = "authenc(hmac(sha384),"
2567					    "cbc(des3_ede))",
2568				.cra_driver_name = "authenc-hmac-sha384-"
2569						   "cbc-3des-talitos",
2570				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2571				.cra_flags = CRYPTO_ALG_ASYNC |
2572					     CRYPTO_ALG_ALLOCATES_MEMORY,
2573			},
2574			.ivsize = DES3_EDE_BLOCK_SIZE,
2575			.maxauthsize = SHA384_DIGEST_SIZE,
2576			.setkey = aead_des3_setkey,
2577		},
2578		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2579			             DESC_HDR_SEL0_DEU |
2580		                     DESC_HDR_MODE0_DEU_CBC |
2581		                     DESC_HDR_MODE0_DEU_3DES |
2582		                     DESC_HDR_SEL1_MDEUB |
2583		                     DESC_HDR_MODE1_MDEU_INIT |
2584		                     DESC_HDR_MODE1_MDEU_PAD |
2585		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2586	},
2587	{	.type = CRYPTO_ALG_TYPE_AEAD,
2588		.alg.aead = {
2589			.base = {
2590				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2591				.cra_driver_name = "authenc-hmac-sha512-"
2592						   "cbc-aes-talitos",
2593				.cra_blocksize = AES_BLOCK_SIZE,
2594				.cra_flags = CRYPTO_ALG_ASYNC |
2595					     CRYPTO_ALG_ALLOCATES_MEMORY,
2596			},
2597			.ivsize = AES_BLOCK_SIZE,
2598			.maxauthsize = SHA512_DIGEST_SIZE,
2599		},
2600		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2601			             DESC_HDR_SEL0_AESU |
2602		                     DESC_HDR_MODE0_AESU_CBC |
2603		                     DESC_HDR_SEL1_MDEUB |
2604		                     DESC_HDR_MODE1_MDEU_INIT |
2605		                     DESC_HDR_MODE1_MDEU_PAD |
2606		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2607	},
2608	{	.type = CRYPTO_ALG_TYPE_AEAD,
2609		.alg.aead = {
2610			.base = {
2611				.cra_name = "authenc(hmac(sha512),"
2612					    "cbc(des3_ede))",
2613				.cra_driver_name = "authenc-hmac-sha512-"
2614						   "cbc-3des-talitos",
2615				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2616				.cra_flags = CRYPTO_ALG_ASYNC |
2617					     CRYPTO_ALG_ALLOCATES_MEMORY,
2618			},
2619			.ivsize = DES3_EDE_BLOCK_SIZE,
2620			.maxauthsize = SHA512_DIGEST_SIZE,
2621			.setkey = aead_des3_setkey,
2622		},
2623		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2624			             DESC_HDR_SEL0_DEU |
2625		                     DESC_HDR_MODE0_DEU_CBC |
2626		                     DESC_HDR_MODE0_DEU_3DES |
2627		                     DESC_HDR_SEL1_MDEUB |
2628		                     DESC_HDR_MODE1_MDEU_INIT |
2629		                     DESC_HDR_MODE1_MDEU_PAD |
2630		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2631	},
2632	{	.type = CRYPTO_ALG_TYPE_AEAD,
2633		.alg.aead = {
2634			.base = {
2635				.cra_name = "authenc(hmac(md5),cbc(aes))",
2636				.cra_driver_name = "authenc-hmac-md5-"
2637						   "cbc-aes-talitos",
2638				.cra_blocksize = AES_BLOCK_SIZE,
2639				.cra_flags = CRYPTO_ALG_ASYNC |
2640					     CRYPTO_ALG_ALLOCATES_MEMORY,
2641			},
2642			.ivsize = AES_BLOCK_SIZE,
2643			.maxauthsize = MD5_DIGEST_SIZE,
2644		},
2645		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2646			             DESC_HDR_SEL0_AESU |
2647		                     DESC_HDR_MODE0_AESU_CBC |
2648		                     DESC_HDR_SEL1_MDEUA |
2649		                     DESC_HDR_MODE1_MDEU_INIT |
2650		                     DESC_HDR_MODE1_MDEU_PAD |
2651		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2652	},
2653	{	.type = CRYPTO_ALG_TYPE_AEAD,
2654		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2655		.alg.aead = {
2656			.base = {
2657				.cra_name = "authenc(hmac(md5),cbc(aes))",
2658				.cra_driver_name = "authenc-hmac-md5-"
2659						   "cbc-aes-talitos-hsna",
2660				.cra_blocksize = AES_BLOCK_SIZE,
2661				.cra_flags = CRYPTO_ALG_ASYNC |
2662					     CRYPTO_ALG_ALLOCATES_MEMORY,
2663			},
2664			.ivsize = AES_BLOCK_SIZE,
2665			.maxauthsize = MD5_DIGEST_SIZE,
2666		},
2667		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2668				     DESC_HDR_SEL0_AESU |
2669				     DESC_HDR_MODE0_AESU_CBC |
2670				     DESC_HDR_SEL1_MDEUA |
2671				     DESC_HDR_MODE1_MDEU_INIT |
2672				     DESC_HDR_MODE1_MDEU_PAD |
2673				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2674	},
2675	{	.type = CRYPTO_ALG_TYPE_AEAD,
2676		.alg.aead = {
2677			.base = {
2678				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2679				.cra_driver_name = "authenc-hmac-md5-"
2680						   "cbc-3des-talitos",
2681				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2682				.cra_flags = CRYPTO_ALG_ASYNC |
2683					     CRYPTO_ALG_ALLOCATES_MEMORY,
2684			},
2685			.ivsize = DES3_EDE_BLOCK_SIZE,
2686			.maxauthsize = MD5_DIGEST_SIZE,
2687			.setkey = aead_des3_setkey,
2688		},
2689		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2690			             DESC_HDR_SEL0_DEU |
2691		                     DESC_HDR_MODE0_DEU_CBC |
2692		                     DESC_HDR_MODE0_DEU_3DES |
2693		                     DESC_HDR_SEL1_MDEUA |
2694		                     DESC_HDR_MODE1_MDEU_INIT |
2695		                     DESC_HDR_MODE1_MDEU_PAD |
2696		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2697	},
2698	{	.type = CRYPTO_ALG_TYPE_AEAD,
2699		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2700		.alg.aead = {
2701			.base = {
2702				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2703				.cra_driver_name = "authenc-hmac-md5-"
2704						   "cbc-3des-talitos-hsna",
2705				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2706				.cra_flags = CRYPTO_ALG_ASYNC |
2707					     CRYPTO_ALG_ALLOCATES_MEMORY,
2708			},
2709			.ivsize = DES3_EDE_BLOCK_SIZE,
2710			.maxauthsize = MD5_DIGEST_SIZE,
2711			.setkey = aead_des3_setkey,
2712		},
2713		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2714				     DESC_HDR_SEL0_DEU |
2715				     DESC_HDR_MODE0_DEU_CBC |
2716				     DESC_HDR_MODE0_DEU_3DES |
2717				     DESC_HDR_SEL1_MDEUA |
2718				     DESC_HDR_MODE1_MDEU_INIT |
2719				     DESC_HDR_MODE1_MDEU_PAD |
2720				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2721	},
2722	/* SKCIPHER algorithms. */
2723	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2724		.alg.skcipher = {
2725			.base.cra_name = "ecb(aes)",
2726			.base.cra_driver_name = "ecb-aes-talitos",
2727			.base.cra_blocksize = AES_BLOCK_SIZE,
2728			.base.cra_flags = CRYPTO_ALG_ASYNC |
2729					  CRYPTO_ALG_ALLOCATES_MEMORY,
2730			.min_keysize = AES_MIN_KEY_SIZE,
2731			.max_keysize = AES_MAX_KEY_SIZE,
2732			.setkey = skcipher_aes_setkey,
2733		},
2734		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2735				     DESC_HDR_SEL0_AESU,
2736	},
2737	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2738		.alg.skcipher = {
2739			.base.cra_name = "cbc(aes)",
2740			.base.cra_driver_name = "cbc-aes-talitos",
2741			.base.cra_blocksize = AES_BLOCK_SIZE,
2742			.base.cra_flags = CRYPTO_ALG_ASYNC |
2743					  CRYPTO_ALG_ALLOCATES_MEMORY,
2744			.min_keysize = AES_MIN_KEY_SIZE,
2745			.max_keysize = AES_MAX_KEY_SIZE,
2746			.ivsize = AES_BLOCK_SIZE,
2747			.setkey = skcipher_aes_setkey,
2748		},
2749		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2750				     DESC_HDR_SEL0_AESU |
2751				     DESC_HDR_MODE0_AESU_CBC,
2752	},
2753	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2754		.alg.skcipher = {
2755			.base.cra_name = "ctr(aes)",
2756			.base.cra_driver_name = "ctr-aes-talitos",
2757			.base.cra_blocksize = 1,
2758			.base.cra_flags = CRYPTO_ALG_ASYNC |
2759					  CRYPTO_ALG_ALLOCATES_MEMORY,
2760			.min_keysize = AES_MIN_KEY_SIZE,
2761			.max_keysize = AES_MAX_KEY_SIZE,
2762			.ivsize = AES_BLOCK_SIZE,
2763			.setkey = skcipher_aes_setkey,
2764		},
2765		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2766				     DESC_HDR_SEL0_AESU |
2767				     DESC_HDR_MODE0_AESU_CTR,
2768	},
2769	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2770		.alg.skcipher = {
2771			.base.cra_name = "ctr(aes)",
2772			.base.cra_driver_name = "ctr-aes-talitos",
2773			.base.cra_blocksize = 1,
2774			.base.cra_flags = CRYPTO_ALG_ASYNC |
2775					  CRYPTO_ALG_ALLOCATES_MEMORY,
2776			.min_keysize = AES_MIN_KEY_SIZE,
2777			.max_keysize = AES_MAX_KEY_SIZE,
2778			.ivsize = AES_BLOCK_SIZE,
2779			.setkey = skcipher_aes_setkey,
2780		},
2781		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2782				     DESC_HDR_SEL0_AESU |
2783				     DESC_HDR_MODE0_AESU_CTR,
2784	},
2785	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2786		.alg.skcipher = {
2787			.base.cra_name = "ecb(des)",
2788			.base.cra_driver_name = "ecb-des-talitos",
2789			.base.cra_blocksize = DES_BLOCK_SIZE,
2790			.base.cra_flags = CRYPTO_ALG_ASYNC |
2791					  CRYPTO_ALG_ALLOCATES_MEMORY,
2792			.min_keysize = DES_KEY_SIZE,
2793			.max_keysize = DES_KEY_SIZE,
2794			.setkey = skcipher_des_setkey,
2795		},
2796		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2797				     DESC_HDR_SEL0_DEU,
2798	},
2799	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2800		.alg.skcipher = {
2801			.base.cra_name = "cbc(des)",
2802			.base.cra_driver_name = "cbc-des-talitos",
2803			.base.cra_blocksize = DES_BLOCK_SIZE,
2804			.base.cra_flags = CRYPTO_ALG_ASYNC |
2805					  CRYPTO_ALG_ALLOCATES_MEMORY,
2806			.min_keysize = DES_KEY_SIZE,
2807			.max_keysize = DES_KEY_SIZE,
2808			.ivsize = DES_BLOCK_SIZE,
2809			.setkey = skcipher_des_setkey,
2810		},
2811		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2812				     DESC_HDR_SEL0_DEU |
2813				     DESC_HDR_MODE0_DEU_CBC,
2814	},
2815	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2816		.alg.skcipher = {
2817			.base.cra_name = "ecb(des3_ede)",
2818			.base.cra_driver_name = "ecb-3des-talitos",
2819			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2820			.base.cra_flags = CRYPTO_ALG_ASYNC |
2821					  CRYPTO_ALG_ALLOCATES_MEMORY,
2822			.min_keysize = DES3_EDE_KEY_SIZE,
2823			.max_keysize = DES3_EDE_KEY_SIZE,
2824			.setkey = skcipher_des3_setkey,
2825		},
2826		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2827				     DESC_HDR_SEL0_DEU |
2828				     DESC_HDR_MODE0_DEU_3DES,
2829	},
2830	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2831		.alg.skcipher = {
2832			.base.cra_name = "cbc(des3_ede)",
2833			.base.cra_driver_name = "cbc-3des-talitos",
2834			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2835			.base.cra_flags = CRYPTO_ALG_ASYNC |
2836					  CRYPTO_ALG_ALLOCATES_MEMORY,
2837			.min_keysize = DES3_EDE_KEY_SIZE,
2838			.max_keysize = DES3_EDE_KEY_SIZE,
2839			.ivsize = DES3_EDE_BLOCK_SIZE,
2840			.setkey = skcipher_des3_setkey,
2841		},
2842		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2843			             DESC_HDR_SEL0_DEU |
2844		                     DESC_HDR_MODE0_DEU_CBC |
2845		                     DESC_HDR_MODE0_DEU_3DES,
2846	},
2847	/* AHASH algorithms. */
2848	{	.type = CRYPTO_ALG_TYPE_AHASH,
2849		.alg.hash = {
 
 
 
 
 
2850			.halg.digestsize = MD5_DIGEST_SIZE,
2851			.halg.statesize = sizeof(struct talitos_export_state),
2852			.halg.base = {
2853				.cra_name = "md5",
2854				.cra_driver_name = "md5-talitos",
2855				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2856				.cra_flags = CRYPTO_ALG_ASYNC |
2857					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2858			}
2859		},
2860		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2861				     DESC_HDR_SEL0_MDEUA |
2862				     DESC_HDR_MODE0_MDEU_MD5,
2863	},
2864	{	.type = CRYPTO_ALG_TYPE_AHASH,
2865		.alg.hash = {
 
 
 
 
 
2866			.halg.digestsize = SHA1_DIGEST_SIZE,
2867			.halg.statesize = sizeof(struct talitos_export_state),
2868			.halg.base = {
2869				.cra_name = "sha1",
2870				.cra_driver_name = "sha1-talitos",
2871				.cra_blocksize = SHA1_BLOCK_SIZE,
2872				.cra_flags = CRYPTO_ALG_ASYNC |
2873					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2874			}
2875		},
2876		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2877				     DESC_HDR_SEL0_MDEUA |
2878				     DESC_HDR_MODE0_MDEU_SHA1,
2879	},
2880	{	.type = CRYPTO_ALG_TYPE_AHASH,
2881		.alg.hash = {
 
 
 
 
 
2882			.halg.digestsize = SHA224_DIGEST_SIZE,
2883			.halg.statesize = sizeof(struct talitos_export_state),
2884			.halg.base = {
2885				.cra_name = "sha224",
2886				.cra_driver_name = "sha224-talitos",
2887				.cra_blocksize = SHA224_BLOCK_SIZE,
2888				.cra_flags = CRYPTO_ALG_ASYNC |
2889					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2890			}
2891		},
2892		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2893				     DESC_HDR_SEL0_MDEUA |
2894				     DESC_HDR_MODE0_MDEU_SHA224,
2895	},
2896	{	.type = CRYPTO_ALG_TYPE_AHASH,
2897		.alg.hash = {
 
 
 
 
 
2898			.halg.digestsize = SHA256_DIGEST_SIZE,
2899			.halg.statesize = sizeof(struct talitos_export_state),
2900			.halg.base = {
2901				.cra_name = "sha256",
2902				.cra_driver_name = "sha256-talitos",
2903				.cra_blocksize = SHA256_BLOCK_SIZE,
2904				.cra_flags = CRYPTO_ALG_ASYNC |
2905					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2906			}
2907		},
2908		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2909				     DESC_HDR_SEL0_MDEUA |
2910				     DESC_HDR_MODE0_MDEU_SHA256,
2911	},
2912	{	.type = CRYPTO_ALG_TYPE_AHASH,
2913		.alg.hash = {
 
 
 
 
 
2914			.halg.digestsize = SHA384_DIGEST_SIZE,
2915			.halg.statesize = sizeof(struct talitos_export_state),
2916			.halg.base = {
2917				.cra_name = "sha384",
2918				.cra_driver_name = "sha384-talitos",
2919				.cra_blocksize = SHA384_BLOCK_SIZE,
2920				.cra_flags = CRYPTO_ALG_ASYNC |
2921					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2922			}
2923		},
2924		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2925				     DESC_HDR_SEL0_MDEUB |
2926				     DESC_HDR_MODE0_MDEUB_SHA384,
2927	},
2928	{	.type = CRYPTO_ALG_TYPE_AHASH,
2929		.alg.hash = {
 
 
 
 
 
2930			.halg.digestsize = SHA512_DIGEST_SIZE,
2931			.halg.statesize = sizeof(struct talitos_export_state),
2932			.halg.base = {
2933				.cra_name = "sha512",
2934				.cra_driver_name = "sha512-talitos",
2935				.cra_blocksize = SHA512_BLOCK_SIZE,
2936				.cra_flags = CRYPTO_ALG_ASYNC |
2937					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2938			}
2939		},
2940		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2941				     DESC_HDR_SEL0_MDEUB |
2942				     DESC_HDR_MODE0_MDEUB_SHA512,
2943	},
2944	{	.type = CRYPTO_ALG_TYPE_AHASH,
2945		.alg.hash = {
2946			.halg.digestsize = MD5_DIGEST_SIZE,
2947			.halg.statesize = sizeof(struct talitos_export_state),
2948			.halg.base = {
2949				.cra_name = "hmac(md5)",
2950				.cra_driver_name = "hmac-md5-talitos",
2951				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2952				.cra_flags = CRYPTO_ALG_ASYNC |
2953					     CRYPTO_ALG_ALLOCATES_MEMORY,
2954			}
2955		},
2956		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2957				     DESC_HDR_SEL0_MDEUA |
2958				     DESC_HDR_MODE0_MDEU_MD5,
2959	},
2960	{	.type = CRYPTO_ALG_TYPE_AHASH,
2961		.alg.hash = {
2962			.halg.digestsize = SHA1_DIGEST_SIZE,
2963			.halg.statesize = sizeof(struct talitos_export_state),
2964			.halg.base = {
2965				.cra_name = "hmac(sha1)",
2966				.cra_driver_name = "hmac-sha1-talitos",
2967				.cra_blocksize = SHA1_BLOCK_SIZE,
2968				.cra_flags = CRYPTO_ALG_ASYNC |
2969					     CRYPTO_ALG_ALLOCATES_MEMORY,
2970			}
2971		},
2972		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2973				     DESC_HDR_SEL0_MDEUA |
2974				     DESC_HDR_MODE0_MDEU_SHA1,
2975	},
2976	{	.type = CRYPTO_ALG_TYPE_AHASH,
2977		.alg.hash = {
2978			.halg.digestsize = SHA224_DIGEST_SIZE,
2979			.halg.statesize = sizeof(struct talitos_export_state),
2980			.halg.base = {
2981				.cra_name = "hmac(sha224)",
2982				.cra_driver_name = "hmac-sha224-talitos",
2983				.cra_blocksize = SHA224_BLOCK_SIZE,
2984				.cra_flags = CRYPTO_ALG_ASYNC |
2985					     CRYPTO_ALG_ALLOCATES_MEMORY,
2986			}
2987		},
2988		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2989				     DESC_HDR_SEL0_MDEUA |
2990				     DESC_HDR_MODE0_MDEU_SHA224,
2991	},
2992	{	.type = CRYPTO_ALG_TYPE_AHASH,
2993		.alg.hash = {
2994			.halg.digestsize = SHA256_DIGEST_SIZE,
2995			.halg.statesize = sizeof(struct talitos_export_state),
2996			.halg.base = {
2997				.cra_name = "hmac(sha256)",
2998				.cra_driver_name = "hmac-sha256-talitos",
2999				.cra_blocksize = SHA256_BLOCK_SIZE,
3000				.cra_flags = CRYPTO_ALG_ASYNC |
3001					     CRYPTO_ALG_ALLOCATES_MEMORY,
3002			}
3003		},
3004		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3005				     DESC_HDR_SEL0_MDEUA |
3006				     DESC_HDR_MODE0_MDEU_SHA256,
3007	},
3008	{	.type = CRYPTO_ALG_TYPE_AHASH,
3009		.alg.hash = {
3010			.halg.digestsize = SHA384_DIGEST_SIZE,
3011			.halg.statesize = sizeof(struct talitos_export_state),
3012			.halg.base = {
3013				.cra_name = "hmac(sha384)",
3014				.cra_driver_name = "hmac-sha384-talitos",
3015				.cra_blocksize = SHA384_BLOCK_SIZE,
3016				.cra_flags = CRYPTO_ALG_ASYNC |
3017					     CRYPTO_ALG_ALLOCATES_MEMORY,
3018			}
3019		},
3020		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3021				     DESC_HDR_SEL0_MDEUB |
3022				     DESC_HDR_MODE0_MDEUB_SHA384,
3023	},
3024	{	.type = CRYPTO_ALG_TYPE_AHASH,
3025		.alg.hash = {
3026			.halg.digestsize = SHA512_DIGEST_SIZE,
3027			.halg.statesize = sizeof(struct talitos_export_state),
3028			.halg.base = {
3029				.cra_name = "hmac(sha512)",
3030				.cra_driver_name = "hmac-sha512-talitos",
3031				.cra_blocksize = SHA512_BLOCK_SIZE,
3032				.cra_flags = CRYPTO_ALG_ASYNC |
3033					     CRYPTO_ALG_ALLOCATES_MEMORY,
3034			}
3035		},
3036		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3037				     DESC_HDR_SEL0_MDEUB |
3038				     DESC_HDR_MODE0_MDEUB_SHA512,
3039	}
3040};
3041
3042struct talitos_crypto_alg {
3043	struct list_head entry;
3044	struct device *dev;
3045	struct talitos_alg_template algt;
3046};
3047
3048static int talitos_init_common(struct talitos_ctx *ctx,
3049			       struct talitos_crypto_alg *talitos_alg)
3050{
 
 
 
3051	struct talitos_private *priv;
3052
 
 
 
 
 
 
 
 
3053	/* update context with ptr to dev */
3054	ctx->dev = talitos_alg->dev;
3055
3056	/* assign SEC channel to tfm in round-robin fashion */
3057	priv = dev_get_drvdata(ctx->dev);
3058	ctx->ch = atomic_inc_return(&priv->last_chan) &
3059		  (priv->num_channels - 1);
3060
3061	/* copy descriptor header template value */
3062	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3063
3064	/* select done notification */
3065	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3066
3067	return 0;
3068}
3069
3070static int talitos_cra_init_aead(struct crypto_aead *tfm)
3071{
3072	struct aead_alg *alg = crypto_aead_alg(tfm);
3073	struct talitos_crypto_alg *talitos_alg;
3074	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3075
3076	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3077				   algt.alg.aead);
3078
3079	return talitos_init_common(ctx, talitos_alg);
3080}
3081
3082static int talitos_cra_init_skcipher(struct crypto_skcipher *tfm)
3083{
3084	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3085	struct talitos_crypto_alg *talitos_alg;
3086	struct talitos_ctx *ctx = crypto_skcipher_ctx(tfm);
3087
3088	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3089				   algt.alg.skcipher);
3090
3091	return talitos_init_common(ctx, talitos_alg);
3092}
3093
3094static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3095{
3096	struct crypto_alg *alg = tfm->__crt_alg;
3097	struct talitos_crypto_alg *talitos_alg;
3098	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3099
3100	talitos_alg = container_of(__crypto_ahash_alg(alg),
3101				   struct talitos_crypto_alg,
3102				   algt.alg.hash);
3103
3104	ctx->keylen = 0;
3105	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3106				 sizeof(struct talitos_ahash_req_ctx));
3107
3108	return talitos_init_common(ctx, talitos_alg);
3109}
3110
3111static void talitos_cra_exit(struct crypto_tfm *tfm)
3112{
3113	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3114	struct device *dev = ctx->dev;
3115
3116	if (ctx->keylen)
3117		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3118}
3119
3120/*
3121 * given the alg's descriptor header template, determine whether descriptor
3122 * type and primary/secondary execution units required match the hw
3123 * capabilities description provided in the device tree node.
3124 */
3125static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3126{
3127	struct talitos_private *priv = dev_get_drvdata(dev);
3128	int ret;
3129
3130	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3131	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3132
3133	if (SECONDARY_EU(desc_hdr_template))
3134		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3135		              & priv->exec_units);
3136
3137	return ret;
3138}
3139
3140static void talitos_remove(struct platform_device *ofdev)
3141{
3142	struct device *dev = &ofdev->dev;
3143	struct talitos_private *priv = dev_get_drvdata(dev);
3144	struct talitos_crypto_alg *t_alg, *n;
3145	int i;
3146
3147	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3148		switch (t_alg->algt.type) {
3149		case CRYPTO_ALG_TYPE_SKCIPHER:
3150			crypto_unregister_skcipher(&t_alg->algt.alg.skcipher);
3151			break;
3152		case CRYPTO_ALG_TYPE_AEAD:
3153			crypto_unregister_aead(&t_alg->algt.alg.aead);
3154			break;
3155		case CRYPTO_ALG_TYPE_AHASH:
3156			crypto_unregister_ahash(&t_alg->algt.alg.hash);
3157			break;
3158		}
3159		list_del(&t_alg->entry);
 
3160	}
3161
3162	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3163		talitos_unregister_rng(dev);
3164
3165	for (i = 0; i < 2; i++)
3166		if (priv->irq[i]) {
3167			free_irq(priv->irq[i], dev);
3168			irq_dispose_mapping(priv->irq[i]);
3169		}
 
 
 
 
3170
3171	tasklet_kill(&priv->done_task[0]);
3172	if (priv->irq[1])
3173		tasklet_kill(&priv->done_task[1]);
 
 
 
 
 
 
3174}
3175
3176static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3177						    struct talitos_alg_template
3178						           *template)
3179{
3180	struct talitos_private *priv = dev_get_drvdata(dev);
3181	struct talitos_crypto_alg *t_alg;
3182	struct crypto_alg *alg;
3183
3184	t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3185			     GFP_KERNEL);
3186	if (!t_alg)
3187		return ERR_PTR(-ENOMEM);
3188
3189	t_alg->algt = *template;
3190
3191	switch (t_alg->algt.type) {
3192	case CRYPTO_ALG_TYPE_SKCIPHER:
3193		alg = &t_alg->algt.alg.skcipher.base;
3194		alg->cra_exit = talitos_cra_exit;
3195		t_alg->algt.alg.skcipher.init = talitos_cra_init_skcipher;
3196		t_alg->algt.alg.skcipher.setkey =
3197			t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
3198		t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
3199		t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
3200		if (!strcmp(alg->cra_name, "ctr(aes)") && !has_ftr_sec1(priv) &&
3201		    DESC_TYPE(t_alg->algt.desc_hdr_template) !=
3202		    DESC_TYPE(DESC_HDR_TYPE_AESU_CTR_NONSNOOP)) {
3203			devm_kfree(dev, t_alg);
3204			return ERR_PTR(-ENOTSUPP);
3205		}
3206		break;
3207	case CRYPTO_ALG_TYPE_AEAD:
3208		alg = &t_alg->algt.alg.aead.base;
3209		alg->cra_exit = talitos_cra_exit;
3210		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3211		t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3212					      aead_setkey;
3213		t_alg->algt.alg.aead.encrypt = aead_encrypt;
3214		t_alg->algt.alg.aead.decrypt = aead_decrypt;
3215		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3216		    !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3217			devm_kfree(dev, t_alg);
3218			return ERR_PTR(-ENOTSUPP);
3219		}
3220		break;
3221	case CRYPTO_ALG_TYPE_AHASH:
3222		alg = &t_alg->algt.alg.hash.halg.base;
3223		alg->cra_init = talitos_cra_init_ahash;
3224		alg->cra_exit = talitos_cra_exit;
3225		t_alg->algt.alg.hash.init = ahash_init;
3226		t_alg->algt.alg.hash.update = ahash_update;
3227		t_alg->algt.alg.hash.final = ahash_final;
3228		t_alg->algt.alg.hash.finup = ahash_finup;
3229		t_alg->algt.alg.hash.digest = ahash_digest;
3230		if (!strncmp(alg->cra_name, "hmac", 4))
3231			t_alg->algt.alg.hash.setkey = ahash_setkey;
3232		t_alg->algt.alg.hash.import = ahash_import;
3233		t_alg->algt.alg.hash.export = ahash_export;
3234
3235		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3236		    !strncmp(alg->cra_name, "hmac", 4)) {
3237			devm_kfree(dev, t_alg);
3238			return ERR_PTR(-ENOTSUPP);
3239		}
3240		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3241		    (!strcmp(alg->cra_name, "sha224") ||
3242		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
3243			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3244			t_alg->algt.alg.hash.digest =
3245				ahash_digest_sha224_swinit;
3246			t_alg->algt.desc_hdr_template =
3247					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3248					DESC_HDR_SEL0_MDEUA |
3249					DESC_HDR_MODE0_MDEU_SHA256;
3250		}
3251		break;
3252	default:
3253		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3254		devm_kfree(dev, t_alg);
3255		return ERR_PTR(-EINVAL);
3256	}
3257
3258	alg->cra_module = THIS_MODULE;
3259	if (t_alg->algt.priority)
3260		alg->cra_priority = t_alg->algt.priority;
3261	else
3262		alg->cra_priority = TALITOS_CRA_PRIORITY;
3263	if (has_ftr_sec1(priv) && t_alg->algt.type != CRYPTO_ALG_TYPE_AHASH)
3264		alg->cra_alignmask = 3;
3265	else
3266		alg->cra_alignmask = 0;
3267	alg->cra_ctxsize = sizeof(struct talitos_ctx);
3268	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3269
3270	t_alg->dev = dev;
3271
3272	return t_alg;
3273}
3274
3275static int talitos_probe_irq(struct platform_device *ofdev)
3276{
3277	struct device *dev = &ofdev->dev;
3278	struct device_node *np = ofdev->dev.of_node;
3279	struct talitos_private *priv = dev_get_drvdata(dev);
3280	int err;
3281	bool is_sec1 = has_ftr_sec1(priv);
3282
3283	priv->irq[0] = irq_of_parse_and_map(np, 0);
3284	if (!priv->irq[0]) {
3285		dev_err(dev, "failed to map irq\n");
3286		return -EINVAL;
3287	}
3288	if (is_sec1) {
3289		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3290				  dev_driver_string(dev), dev);
3291		goto primary_out;
3292	}
3293
3294	priv->irq[1] = irq_of_parse_and_map(np, 1);
3295
3296	/* get the primary irq line */
3297	if (!priv->irq[1]) {
3298		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3299				  dev_driver_string(dev), dev);
3300		goto primary_out;
3301	}
3302
3303	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3304			  dev_driver_string(dev), dev);
3305	if (err)
3306		goto primary_out;
3307
3308	/* get the secondary irq line */
3309	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3310			  dev_driver_string(dev), dev);
3311	if (err) {
3312		dev_err(dev, "failed to request secondary irq\n");
3313		irq_dispose_mapping(priv->irq[1]);
3314		priv->irq[1] = 0;
3315	}
3316
3317	return err;
3318
3319primary_out:
3320	if (err) {
3321		dev_err(dev, "failed to request primary irq\n");
3322		irq_dispose_mapping(priv->irq[0]);
3323		priv->irq[0] = 0;
3324	}
3325
3326	return err;
3327}
3328
3329static int talitos_probe(struct platform_device *ofdev)
3330{
3331	struct device *dev = &ofdev->dev;
3332	struct device_node *np = ofdev->dev.of_node;
3333	struct talitos_private *priv;
 
3334	int i, err;
3335	int stride;
3336	struct resource *res;
3337
3338	priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3339	if (!priv)
3340		return -ENOMEM;
3341
3342	INIT_LIST_HEAD(&priv->alg_list);
3343
3344	dev_set_drvdata(dev, priv);
3345
3346	priv->ofdev = ofdev;
3347
3348	spin_lock_init(&priv->reg_lock);
 
 
 
 
3349
3350	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3351	if (!res)
3352		return -ENXIO;
3353	priv->reg = devm_ioremap(dev, res->start, resource_size(res));
 
 
 
 
 
 
 
 
 
 
 
 
 
3354	if (!priv->reg) {
3355		dev_err(dev, "failed to of_iomap\n");
3356		err = -ENOMEM;
3357		goto err_out;
3358	}
3359
3360	/* get SEC version capabilities from device tree */
3361	of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3362	of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3363	of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3364	of_property_read_u32(np, "fsl,descriptor-types-mask",
3365			     &priv->desc_types);
 
 
 
 
 
 
 
 
 
 
3366
3367	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3368	    !priv->exec_units || !priv->desc_types) {
3369		dev_err(dev, "invalid property data in device tree node\n");
3370		err = -EINVAL;
3371		goto err_out;
3372	}
3373
3374	if (of_device_is_compatible(np, "fsl,sec3.0"))
3375		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3376
3377	if (of_device_is_compatible(np, "fsl,sec2.1"))
3378		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3379				  TALITOS_FTR_SHA224_HWINIT |
3380				  TALITOS_FTR_HMAC_OK;
3381
3382	if (of_device_is_compatible(np, "fsl,sec1.0"))
3383		priv->features |= TALITOS_FTR_SEC1;
3384
3385	if (of_device_is_compatible(np, "fsl,sec1.2")) {
3386		priv->reg_deu = priv->reg + TALITOS12_DEU;
3387		priv->reg_aesu = priv->reg + TALITOS12_AESU;
3388		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3389		stride = TALITOS1_CH_STRIDE;
3390	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3391		priv->reg_deu = priv->reg + TALITOS10_DEU;
3392		priv->reg_aesu = priv->reg + TALITOS10_AESU;
3393		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3394		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3395		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3396		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3397		stride = TALITOS1_CH_STRIDE;
3398	} else {
3399		priv->reg_deu = priv->reg + TALITOS2_DEU;
3400		priv->reg_aesu = priv->reg + TALITOS2_AESU;
3401		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3402		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3403		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3404		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3405		priv->reg_keu = priv->reg + TALITOS2_KEU;
3406		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3407		stride = TALITOS2_CH_STRIDE;
3408	}
3409
3410	err = talitos_probe_irq(ofdev);
3411	if (err)
3412		goto err_out;
3413
3414	if (has_ftr_sec1(priv)) {
3415		if (priv->num_channels == 1)
3416			tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3417				     (unsigned long)dev);
3418		else
3419			tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3420				     (unsigned long)dev);
3421	} else {
3422		if (priv->irq[1]) {
3423			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3424				     (unsigned long)dev);
3425			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3426				     (unsigned long)dev);
3427		} else if (priv->num_channels == 1) {
3428			tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3429				     (unsigned long)dev);
3430		} else {
3431			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3432				     (unsigned long)dev);
3433		}
3434	}
3435
3436	priv->chan = devm_kcalloc(dev,
3437				  priv->num_channels,
3438				  sizeof(struct talitos_channel),
3439				  GFP_KERNEL);
3440	if (!priv->chan) {
3441		dev_err(dev, "failed to allocate channel management space\n");
3442		err = -ENOMEM;
3443		goto err_out;
3444	}
3445
3446	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3447
3448	for (i = 0; i < priv->num_channels; i++) {
3449		priv->chan[i].reg = priv->reg + stride * (i + 1);
3450		if (!priv->irq[1] || !(i & 1))
3451			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3452
3453		spin_lock_init(&priv->chan[i].head_lock);
3454		spin_lock_init(&priv->chan[i].tail_lock);
 
3455
3456		priv->chan[i].fifo = devm_kcalloc(dev,
3457						priv->fifo_len,
3458						sizeof(struct talitos_request),
3459						GFP_KERNEL);
 
3460		if (!priv->chan[i].fifo) {
3461			dev_err(dev, "failed to allocate request fifo %d\n", i);
3462			err = -ENOMEM;
3463			goto err_out;
3464		}
 
3465
 
3466		atomic_set(&priv->chan[i].submit_count,
3467			   -(priv->chfifo_len - 1));
3468	}
3469
3470	dma_set_mask(dev, DMA_BIT_MASK(36));
3471
3472	/* reset and initialize the h/w */
3473	err = init_device(dev);
3474	if (err) {
3475		dev_err(dev, "failed to initialize device\n");
3476		goto err_out;
3477	}
3478
3479	/* register the RNG, if available */
3480	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3481		err = talitos_register_rng(dev);
3482		if (err) {
3483			dev_err(dev, "failed to register hwrng: %d\n", err);
3484			goto err_out;
3485		} else
3486			dev_info(dev, "hwrng\n");
3487	}
3488
3489	/* register crypto algorithms the device supports */
3490	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3491		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3492			struct talitos_crypto_alg *t_alg;
3493			struct crypto_alg *alg = NULL;
3494
3495			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3496			if (IS_ERR(t_alg)) {
3497				err = PTR_ERR(t_alg);
3498				if (err == -ENOTSUPP)
3499					continue;
3500				goto err_out;
3501			}
3502
3503			switch (t_alg->algt.type) {
3504			case CRYPTO_ALG_TYPE_SKCIPHER:
3505				err = crypto_register_skcipher(
3506						&t_alg->algt.alg.skcipher);
3507				alg = &t_alg->algt.alg.skcipher.base;
3508				break;
3509
3510			case CRYPTO_ALG_TYPE_AEAD:
3511				err = crypto_register_aead(
3512					&t_alg->algt.alg.aead);
3513				alg = &t_alg->algt.alg.aead.base;
3514				break;
3515
3516			case CRYPTO_ALG_TYPE_AHASH:
3517				err = crypto_register_ahash(
3518						&t_alg->algt.alg.hash);
3519				alg = &t_alg->algt.alg.hash.halg.base;
 
3520				break;
3521			}
3522			if (err) {
3523				dev_err(dev, "%s alg registration failed\n",
3524					alg->cra_driver_name);
3525				devm_kfree(dev, t_alg);
3526			} else
3527				list_add_tail(&t_alg->entry, &priv->alg_list);
 
 
3528		}
3529	}
3530	if (!list_empty(&priv->alg_list))
3531		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3532			 (char *)of_get_property(np, "compatible", NULL));
3533
3534	return 0;
3535
3536err_out:
3537	talitos_remove(ofdev);
3538
3539	return err;
3540}
3541
3542static const struct of_device_id talitos_match[] = {
3543#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3544	{
3545		.compatible = "fsl,sec1.0",
3546	},
3547#endif
3548#ifdef CONFIG_CRYPTO_DEV_TALITOS2
3549	{
3550		.compatible = "fsl,sec2.0",
3551	},
3552#endif
3553	{},
3554};
3555MODULE_DEVICE_TABLE(of, talitos_match);
3556
3557static struct platform_driver talitos_driver = {
3558	.driver = {
3559		.name = "talitos",
 
3560		.of_match_table = talitos_match,
3561	},
3562	.probe = talitos_probe,
3563	.remove = talitos_remove,
3564};
3565
3566module_platform_driver(talitos_driver);
 
 
 
 
 
 
 
 
 
 
3567
3568MODULE_LICENSE("GPL");
3569MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3570MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
v3.1
 
   1/*
   2 * talitos - Freescale Integrated Security Engine (SEC) device driver
   3 *
   4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
   5 *
   6 * Scatterlist Crypto API glue code copied from files with the following:
   7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
   8 *
   9 * Crypto algorithm registration code copied from hifn driver:
  10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
  11 * All rights reserved.
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License as published by
  15 * the Free Software Foundation; either version 2 of the License, or
  16 * (at your option) any later version.
  17 *
  18 * This program is distributed in the hope that it will be useful,
  19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  21 * GNU General Public License for more details.
  22 *
  23 * You should have received a copy of the GNU General Public License
  24 * along with this program; if not, write to the Free Software
  25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  26 */
  27
  28#include <linux/kernel.h>
  29#include <linux/module.h>
  30#include <linux/mod_devicetable.h>
  31#include <linux/device.h>
  32#include <linux/interrupt.h>
  33#include <linux/crypto.h>
  34#include <linux/hw_random.h>
  35#include <linux/of_platform.h>
 
 
  36#include <linux/dma-mapping.h>
  37#include <linux/io.h>
  38#include <linux/spinlock.h>
  39#include <linux/rtnetlink.h>
  40#include <linux/slab.h>
  41
  42#include <crypto/algapi.h>
  43#include <crypto/aes.h>
  44#include <crypto/des.h>
  45#include <crypto/sha.h>
 
  46#include <crypto/md5.h>
  47#include <crypto/aead.h>
  48#include <crypto/authenc.h>
  49#include <crypto/skcipher.h>
  50#include <crypto/hash.h>
  51#include <crypto/internal/hash.h>
  52#include <crypto/scatterwalk.h>
  53
  54#include "talitos.h"
  55
  56#define TALITOS_TIMEOUT 100000
  57#define TALITOS_MAX_DATA_LEN 65535
 
 
 
 
 
 
 
 
 
  58
  59#define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
  60#define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
  61#define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
  62
  63/* descriptor pointer entry */
  64struct talitos_ptr {
  65	__be16 len;	/* length */
  66	u8 j_extent;	/* jump to sg link table and/or extent */
  67	u8 eptr;	/* extended address */
  68	__be32 ptr;	/* address */
  69};
  70
  71static const struct talitos_ptr zero_entry = {
  72	.len = 0,
  73	.j_extent = 0,
  74	.eptr = 0,
  75	.ptr = 0
  76};
 
 
  77
  78/* descriptor */
  79struct talitos_desc {
  80	__be32 hdr;			/* header high bits */
  81	__be32 hdr_lo;			/* header low bits */
  82	struct talitos_ptr ptr[7];	/* ptr/len pair array */
  83};
  84
  85/**
  86 * talitos_request - descriptor submission request
  87 * @desc: descriptor pointer (kernel virtual)
  88 * @dma_desc: descriptor's physical bus address
  89 * @callback: whom to call when descriptor processing is done
  90 * @context: caller context (optional)
  91 */
  92struct talitos_request {
  93	struct talitos_desc *desc;
  94	dma_addr_t dma_desc;
  95	void (*callback) (struct device *dev, struct talitos_desc *desc,
  96	                  void *context, int error);
  97	void *context;
  98};
  99
 100/* per-channel fifo management */
 101struct talitos_channel {
 102	/* request fifo */
 103	struct talitos_request *fifo;
 104
 105	/* number of requests pending in channel h/w fifo */
 106	atomic_t submit_count ____cacheline_aligned;
 107
 108	/* request submission (head) lock */
 109	spinlock_t head_lock ____cacheline_aligned;
 110	/* index to next free descriptor request */
 111	int head;
 112
 113	/* request release (tail) lock */
 114	spinlock_t tail_lock ____cacheline_aligned;
 115	/* index to next in-progress/done descriptor request */
 116	int tail;
 117};
 118
 119struct talitos_private {
 120	struct device *dev;
 121	struct platform_device *ofdev;
 122	void __iomem *reg;
 123	int irq;
 124
 125	/* SEC version geometry (from device tree node) */
 126	unsigned int num_channels;
 127	unsigned int chfifo_len;
 128	unsigned int exec_units;
 129	unsigned int desc_types;
 130
 131	/* SEC Compatibility info */
 132	unsigned long features;
 133
 134	/*
 135	 * length of the request fifo
 136	 * fifo_len is chfifo_len rounded up to next power of 2
 137	 * so we can use bitwise ops to wrap
 138	 */
 139	unsigned int fifo_len;
 140
 141	struct talitos_channel *chan;
 142
 143	/* next channel to be assigned next incoming descriptor */
 144	atomic_t last_chan ____cacheline_aligned;
 145
 146	/* request callback tasklet */
 147	struct tasklet_struct done_task;
 148
 149	/* list of registered algorithms */
 150	struct list_head alg_list;
 151
 152	/* hwrng device */
 153	struct hwrng rng;
 154};
 155
 156/* .features flag */
 157#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
 158#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
 159#define TALITOS_FTR_SHA224_HWINIT 0x00000004
 160
 161static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
 162{
 163	talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
 164	talitos_ptr->eptr = upper_32_bits(dma_addr);
 165}
 166
 167/*
 168 * map virtual single (contiguous) pointer to h/w descriptor pointer
 169 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 170static void map_single_talitos_ptr(struct device *dev,
 171				   struct talitos_ptr *talitos_ptr,
 172				   unsigned short len, void *data,
 173				   unsigned char extent,
 174				   enum dma_data_direction dir)
 175{
 176	dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
 
 177
 178	talitos_ptr->len = cpu_to_be16(len);
 179	to_talitos_ptr(talitos_ptr, dma_addr);
 180	talitos_ptr->j_extent = extent;
 
 
 
 
 181}
 182
 183/*
 184 * unmap bus single (contiguous) h/w descriptor pointer
 185 */
 186static void unmap_single_talitos_ptr(struct device *dev,
 187				     struct talitos_ptr *talitos_ptr,
 188				     enum dma_data_direction dir)
 189{
 190	dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr),
 191			 be16_to_cpu(talitos_ptr->len), dir);
 
 
 
 192}
 193
 194static int reset_channel(struct device *dev, int ch)
 195{
 196	struct talitos_private *priv = dev_get_drvdata(dev);
 197	unsigned int timeout = TALITOS_TIMEOUT;
 
 198
 199	setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET);
 
 
 
 
 
 
 
 
 
 200
 201	while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET)
 202	       && --timeout)
 203		cpu_relax();
 
 204
 205	if (timeout == 0) {
 206		dev_err(dev, "failed to reset channel %d\n", ch);
 207		return -EIO;
 208	}
 209
 210	/* set 36-bit addressing, done writeback enable and done IRQ enable */
 211	setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
 212		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
 
 
 
 
 213
 214	/* and ICCR writeback, if available */
 215	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
 216		setbits32(priv->reg + TALITOS_CCCR_LO(ch),
 217		          TALITOS_CCCR_LO_IWSE);
 218
 219	return 0;
 220}
 221
 222static int reset_device(struct device *dev)
 223{
 224	struct talitos_private *priv = dev_get_drvdata(dev);
 225	unsigned int timeout = TALITOS_TIMEOUT;
 
 
 226
 227	setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR);
 228
 229	while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
 230	       && --timeout)
 231		cpu_relax();
 232
 
 
 
 
 
 233	if (timeout == 0) {
 234		dev_err(dev, "failed to reset device\n");
 235		return -EIO;
 236	}
 237
 238	return 0;
 239}
 240
 241/*
 242 * Reset and initialize the device
 243 */
 244static int init_device(struct device *dev)
 245{
 246	struct talitos_private *priv = dev_get_drvdata(dev);
 247	int ch, err;
 
 248
 249	/*
 250	 * Master reset
 251	 * errata documentation: warning: certain SEC interrupts
 252	 * are not fully cleared by writing the MCR:SWR bit,
 253	 * set bit twice to completely reset
 254	 */
 255	err = reset_device(dev);
 256	if (err)
 257		return err;
 258
 259	err = reset_device(dev);
 260	if (err)
 261		return err;
 262
 263	/* reset channels */
 264	for (ch = 0; ch < priv->num_channels; ch++) {
 265		err = reset_channel(dev, ch);
 266		if (err)
 267			return err;
 268	}
 269
 270	/* enable channel done and error interrupts */
 271	setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
 272	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
 
 
 
 
 
 
 
 273
 274	/* disable integrity check error interrupts (use writeback instead) */
 275	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
 276		setbits32(priv->reg + TALITOS_MDEUICR_LO,
 277		          TALITOS_MDEUICR_LO_ICE);
 278
 279	return 0;
 280}
 281
 282/**
 283 * talitos_submit - submits a descriptor to the device for processing
 284 * @dev:	the SEC device to be used
 285 * @ch:		the SEC device channel to be used
 286 * @desc:	the descriptor to be processed by the device
 287 * @callback:	whom to call when processing is complete
 288 * @context:	a handle for use by caller (optional)
 289 *
 290 * desc must contain valid dma-mapped (bus physical) address pointers.
 291 * callback must check err and feedback in descriptor header
 292 * for device processing status.
 293 */
 294static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
 295			  void (*callback)(struct device *dev,
 296					   struct talitos_desc *desc,
 297					   void *context, int error),
 298			  void *context)
 299{
 300	struct talitos_private *priv = dev_get_drvdata(dev);
 301	struct talitos_request *request;
 302	unsigned long flags;
 303	int head;
 
 304
 305	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
 306
 307	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
 308		/* h/w fifo is full */
 309		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
 310		return -EAGAIN;
 311	}
 312
 313	head = priv->chan[ch].head;
 314	request = &priv->chan[ch].fifo[head];
 315
 316	/* map descriptor and save caller data */
 317	request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
 318					   DMA_BIDIRECTIONAL);
 
 
 
 
 
 
 
 
 319	request->callback = callback;
 320	request->context = context;
 321
 322	/* increment fifo head */
 323	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
 324
 325	smp_wmb();
 326	request->desc = desc;
 327
 328	/* GO! */
 329	wmb();
 330	out_be32(priv->reg + TALITOS_FF(ch), upper_32_bits(request->dma_desc));
 331	out_be32(priv->reg + TALITOS_FF_LO(ch),
 
 332		 lower_32_bits(request->dma_desc));
 333
 334	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
 335
 336	return -EINPROGRESS;
 337}
 338
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 339/*
 340 * process what was done, notify callback of error if not
 341 */
 342static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
 343{
 344	struct talitos_private *priv = dev_get_drvdata(dev);
 345	struct talitos_request *request, saved_req;
 346	unsigned long flags;
 347	int tail, status;
 
 348
 349	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
 350
 351	tail = priv->chan[ch].tail;
 352	while (priv->chan[ch].fifo[tail].desc) {
 
 
 353		request = &priv->chan[ch].fifo[tail];
 354
 355		/* descriptors with their done bits set don't get the error */
 356		rmb();
 357		if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
 
 
 358			status = 0;
 359		else
 360			if (!error)
 361				break;
 362			else
 363				status = error;
 364
 365		dma_unmap_single(dev, request->dma_desc,
 366				 sizeof(struct talitos_desc),
 367				 DMA_BIDIRECTIONAL);
 368
 369		/* copy entries so we can call callback outside lock */
 370		saved_req.desc = request->desc;
 371		saved_req.callback = request->callback;
 372		saved_req.context = request->context;
 373
 374		/* release request entry in fifo */
 375		smp_wmb();
 376		request->desc = NULL;
 377
 378		/* increment fifo tail */
 379		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
 380
 381		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
 382
 383		atomic_dec(&priv->chan[ch].submit_count);
 384
 385		saved_req.callback(dev, saved_req.desc, saved_req.context,
 386				   status);
 387		/* channel may resume processing in single desc error case */
 388		if (error && !reset_ch && status == error)
 389			return;
 390		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
 391		tail = priv->chan[ch].tail;
 392	}
 393
 394	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
 395}
 396
 397/*
 398 * process completed requests for channels that have done status
 399 */
 400static void talitos_done(unsigned long data)
 401{
 402	struct device *dev = (struct device *)data;
 403	struct talitos_private *priv = dev_get_drvdata(dev);
 404	int ch;
 405
 406	for (ch = 0; ch < priv->num_channels; ch++)
 407		flush_channel(dev, ch, 0, 0);
 408
 409	/* At this point, all completed channels have been processed.
 410	 * Unmask done interrupts for channels completed later on.
 411	 */
 412	setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
 413	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
 414}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 415
 416/*
 417 * locate current (offending) descriptor
 418 */
 419static struct talitos_desc *current_desc(struct device *dev, int ch)
 420{
 421	struct talitos_private *priv = dev_get_drvdata(dev);
 422	int tail = priv->chan[ch].tail;
 423	dma_addr_t cur_desc;
 424
 425	cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
 
 
 
 
 
 
 
 
 426
 427	while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
 428		tail = (tail + 1) & (priv->fifo_len - 1);
 429		if (tail == priv->chan[ch].tail) {
 
 
 430			dev_err(dev, "couldn't locate current descriptor\n");
 431			return NULL;
 432		}
 433	}
 434
 435	return priv->chan[ch].fifo[tail].desc;
 
 
 
 
 
 
 
 
 
 436}
 437
 438/*
 439 * user diagnostics; report root cause of error based on execution unit status
 440 */
 441static void report_eu_error(struct device *dev, int ch,
 442			    struct talitos_desc *desc)
 443{
 444	struct talitos_private *priv = dev_get_drvdata(dev);
 445	int i;
 446
 447	switch (desc->hdr & DESC_HDR_SEL0_MASK) {
 
 
 
 448	case DESC_HDR_SEL0_AFEU:
 449		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
 450			in_be32(priv->reg + TALITOS_AFEUISR),
 451			in_be32(priv->reg + TALITOS_AFEUISR_LO));
 452		break;
 453	case DESC_HDR_SEL0_DEU:
 454		dev_err(dev, "DEUISR 0x%08x_%08x\n",
 455			in_be32(priv->reg + TALITOS_DEUISR),
 456			in_be32(priv->reg + TALITOS_DEUISR_LO));
 457		break;
 458	case DESC_HDR_SEL0_MDEUA:
 459	case DESC_HDR_SEL0_MDEUB:
 460		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
 461			in_be32(priv->reg + TALITOS_MDEUISR),
 462			in_be32(priv->reg + TALITOS_MDEUISR_LO));
 463		break;
 464	case DESC_HDR_SEL0_RNG:
 465		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
 466			in_be32(priv->reg + TALITOS_RNGUISR),
 467			in_be32(priv->reg + TALITOS_RNGUISR_LO));
 468		break;
 469	case DESC_HDR_SEL0_PKEU:
 470		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
 471			in_be32(priv->reg + TALITOS_PKEUISR),
 472			in_be32(priv->reg + TALITOS_PKEUISR_LO));
 473		break;
 474	case DESC_HDR_SEL0_AESU:
 475		dev_err(dev, "AESUISR 0x%08x_%08x\n",
 476			in_be32(priv->reg + TALITOS_AESUISR),
 477			in_be32(priv->reg + TALITOS_AESUISR_LO));
 478		break;
 479	case DESC_HDR_SEL0_CRCU:
 480		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
 481			in_be32(priv->reg + TALITOS_CRCUISR),
 482			in_be32(priv->reg + TALITOS_CRCUISR_LO));
 483		break;
 484	case DESC_HDR_SEL0_KEU:
 485		dev_err(dev, "KEUISR 0x%08x_%08x\n",
 486			in_be32(priv->reg + TALITOS_KEUISR),
 487			in_be32(priv->reg + TALITOS_KEUISR_LO));
 488		break;
 489	}
 490
 491	switch (desc->hdr & DESC_HDR_SEL1_MASK) {
 492	case DESC_HDR_SEL1_MDEUA:
 493	case DESC_HDR_SEL1_MDEUB:
 494		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
 495			in_be32(priv->reg + TALITOS_MDEUISR),
 496			in_be32(priv->reg + TALITOS_MDEUISR_LO));
 497		break;
 498	case DESC_HDR_SEL1_CRCU:
 499		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
 500			in_be32(priv->reg + TALITOS_CRCUISR),
 501			in_be32(priv->reg + TALITOS_CRCUISR_LO));
 502		break;
 503	}
 504
 505	for (i = 0; i < 8; i++)
 506		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
 507			in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i),
 508			in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i));
 509}
 510
 511/*
 512 * recover from error interrupts
 513 */
 514static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
 515{
 516	struct device *dev = (struct device *)data;
 517	struct talitos_private *priv = dev_get_drvdata(dev);
 518	unsigned int timeout = TALITOS_TIMEOUT;
 519	int ch, error, reset_dev = 0, reset_ch = 0;
 520	u32 v, v_lo;
 
 
 521
 522	for (ch = 0; ch < priv->num_channels; ch++) {
 523		/* skip channels without errors */
 524		if (!(isr & (1 << (ch * 2 + 1))))
 525			continue;
 
 
 
 
 
 
 526
 527		error = -EINVAL;
 528
 529		v = in_be32(priv->reg + TALITOS_CCPSR(ch));
 530		v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch));
 531
 532		if (v_lo & TALITOS_CCPSR_LO_DOF) {
 533			dev_err(dev, "double fetch fifo overflow error\n");
 534			error = -EAGAIN;
 535			reset_ch = 1;
 536		}
 537		if (v_lo & TALITOS_CCPSR_LO_SOF) {
 538			/* h/w dropped descriptor */
 539			dev_err(dev, "single fetch fifo overflow error\n");
 540			error = -EAGAIN;
 541		}
 542		if (v_lo & TALITOS_CCPSR_LO_MDTE)
 543			dev_err(dev, "master data transfer error\n");
 544		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
 545			dev_err(dev, "s/g data length zero error\n");
 
 546		if (v_lo & TALITOS_CCPSR_LO_FPZ)
 547			dev_err(dev, "fetch pointer zero error\n");
 
 548		if (v_lo & TALITOS_CCPSR_LO_IDH)
 549			dev_err(dev, "illegal descriptor header error\n");
 550		if (v_lo & TALITOS_CCPSR_LO_IEU)
 551			dev_err(dev, "invalid execution unit error\n");
 
 552		if (v_lo & TALITOS_CCPSR_LO_EU)
 553			report_eu_error(dev, ch, current_desc(dev, ch));
 554		if (v_lo & TALITOS_CCPSR_LO_GB)
 555			dev_err(dev, "gather boundary error\n");
 556		if (v_lo & TALITOS_CCPSR_LO_GRL)
 557			dev_err(dev, "gather return/length error\n");
 558		if (v_lo & TALITOS_CCPSR_LO_SB)
 559			dev_err(dev, "scatter boundary error\n");
 560		if (v_lo & TALITOS_CCPSR_LO_SRL)
 561			dev_err(dev, "scatter return/length error\n");
 
 
 562
 563		flush_channel(dev, ch, error, reset_ch);
 564
 565		if (reset_ch) {
 566			reset_channel(dev, ch);
 567		} else {
 568			setbits32(priv->reg + TALITOS_CCCR(ch),
 569				  TALITOS_CCCR_CONT);
 570			setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0);
 571			while ((in_be32(priv->reg + TALITOS_CCCR(ch)) &
 572			       TALITOS_CCCR_CONT) && --timeout)
 573				cpu_relax();
 574			if (timeout == 0) {
 575				dev_err(dev, "failed to restart channel %d\n",
 576					ch);
 577				reset_dev = 1;
 578			}
 579		}
 580	}
 581	if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) {
 582		dev_err(dev, "done overflow, internal time out, or rngu error: "
 583		        "ISR 0x%08x_%08x\n", isr, isr_lo);
 
 
 
 
 
 584
 585		/* purge request queues */
 586		for (ch = 0; ch < priv->num_channels; ch++)
 587			flush_channel(dev, ch, -EIO, 1);
 588
 589		/* reset and reinitialize the device */
 590		init_device(dev);
 591	}
 592}
 593
 594static irqreturn_t talitos_interrupt(int irq, void *data)
 595{
 596	struct device *dev = data;
 597	struct talitos_private *priv = dev_get_drvdata(dev);
 598	u32 isr, isr_lo;
 599
 600	isr = in_be32(priv->reg + TALITOS_ISR);
 601	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
 602	/* Acknowledge interrupt */
 603	out_be32(priv->reg + TALITOS_ICR, isr);
 604	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
 605
 606	if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo))
 607		talitos_error((unsigned long)data, isr, isr_lo);
 608	else
 609		if (likely(isr & TALITOS_ISR_CHDONE)) {
 610			/* mask further done interrupts. */
 611			clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE);
 612			/* done_task will unmask done interrupts at exit */
 613			tasklet_schedule(&priv->done_task);
 614		}
 615
 616	return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
 617}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 618
 619/*
 620 * hwrng
 621 */
 622static int talitos_rng_data_present(struct hwrng *rng, int wait)
 623{
 624	struct device *dev = (struct device *)rng->priv;
 625	struct talitos_private *priv = dev_get_drvdata(dev);
 626	u32 ofl;
 627	int i;
 628
 629	for (i = 0; i < 20; i++) {
 630		ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
 631		      TALITOS_RNGUSR_LO_OFL;
 632		if (ofl || !wait)
 633			break;
 634		udelay(10);
 635	}
 636
 637	return !!ofl;
 638}
 639
 640static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
 641{
 642	struct device *dev = (struct device *)rng->priv;
 643	struct talitos_private *priv = dev_get_drvdata(dev);
 644
 645	/* rng fifo requires 64-bit accesses */
 646	*data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
 647	*data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
 648
 649	return sizeof(u32);
 650}
 651
 652static int talitos_rng_init(struct hwrng *rng)
 653{
 654	struct device *dev = (struct device *)rng->priv;
 655	struct talitos_private *priv = dev_get_drvdata(dev);
 656	unsigned int timeout = TALITOS_TIMEOUT;
 657
 658	setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
 659	while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
 
 660	       && --timeout)
 661		cpu_relax();
 662	if (timeout == 0) {
 663		dev_err(dev, "failed to reset rng hw\n");
 664		return -ENODEV;
 665	}
 666
 667	/* start generating */
 668	setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
 669
 670	return 0;
 671}
 672
 673static int talitos_register_rng(struct device *dev)
 674{
 675	struct talitos_private *priv = dev_get_drvdata(dev);
 
 676
 677	priv->rng.name		= dev_driver_string(dev),
 678	priv->rng.init		= talitos_rng_init,
 679	priv->rng.data_present	= talitos_rng_data_present,
 680	priv->rng.data_read	= talitos_rng_data_read,
 681	priv->rng.priv		= (unsigned long)dev;
 682
 683	return hwrng_register(&priv->rng);
 
 
 
 
 684}
 685
 686static void talitos_unregister_rng(struct device *dev)
 687{
 688	struct talitos_private *priv = dev_get_drvdata(dev);
 689
 
 
 
 690	hwrng_unregister(&priv->rng);
 
 691}
 692
 693/*
 694 * crypto alg
 695 */
 696#define TALITOS_CRA_PRIORITY		3000
 697#define TALITOS_MAX_KEY_SIZE		64
 
 
 
 
 
 
 
 
 
 698#define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
 699
 700#define MD5_BLOCK_SIZE    64
 701
 702struct talitos_ctx {
 703	struct device *dev;
 704	int ch;
 705	__be32 desc_hdr_template;
 706	u8 key[TALITOS_MAX_KEY_SIZE];
 707	u8 iv[TALITOS_MAX_IV_LENGTH];
 
 708	unsigned int keylen;
 709	unsigned int enckeylen;
 710	unsigned int authkeylen;
 711	unsigned int authsize;
 712};
 713
 714#define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
 715#define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
 716
 717struct talitos_ahash_req_ctx {
 718	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
 719	unsigned int hw_context_size;
 720	u8 buf[HASH_MAX_BLOCK_SIZE];
 721	u8 bufnext[HASH_MAX_BLOCK_SIZE];
 722	unsigned int swinit;
 723	unsigned int first;
 724	unsigned int last;
 725	unsigned int to_hash_later;
 726	u64 nbuf;
 727	struct scatterlist bufsl[2];
 728	struct scatterlist *psrc;
 729};
 730
 731static int aead_setauthsize(struct crypto_aead *authenc,
 732			    unsigned int authsize)
 733{
 734	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 735
 736	ctx->authsize = authsize;
 737
 738	return 0;
 739}
 740
 741static int aead_setkey(struct crypto_aead *authenc,
 742		       const u8 *key, unsigned int keylen)
 743{
 744	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 745	struct rtattr *rta = (void *)key;
 746	struct crypto_authenc_key_param *param;
 747	unsigned int authkeylen;
 748	unsigned int enckeylen;
 749
 750	if (!RTA_OK(rta, keylen))
 751		goto badkey;
 752
 753	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
 754		goto badkey;
 755
 756	if (RTA_PAYLOAD(rta) < sizeof(*param))
 757		goto badkey;
 758
 759	param = RTA_DATA(rta);
 760	enckeylen = be32_to_cpu(param->enckeylen);
 761
 762	key += RTA_ALIGN(rta->rta_len);
 763	keylen -= RTA_ALIGN(rta->rta_len);
 
 
 
 764
 765	if (keylen < enckeylen)
 766		goto badkey;
 767
 768	authkeylen = keylen - enckeylen;
 
 
 
 769
 770	if (keylen > TALITOS_MAX_KEY_SIZE)
 771		goto badkey;
 
 
 
 
 
 772
 773	memcpy(&ctx->key, key, keylen);
 
 
 
 
 
 
 774
 775	ctx->keylen = keylen;
 776	ctx->enckeylen = enckeylen;
 777	ctx->authkeylen = authkeylen;
 778
 779	return 0;
 
 780
 781badkey:
 782	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
 783	return -EINVAL;
 784}
 785
 786/*
 787 * talitos_edesc - s/w-extended descriptor
 788 * @src_nents: number of segments in input scatterlist
 789 * @dst_nents: number of segments in output scatterlist
 790 * @dma_len: length of dma mapped link_tbl space
 791 * @dma_link_tbl: bus physical address of link_tbl
 792 * @desc: h/w descriptor
 793 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
 794 *
 795 * if decrypting (with authcheck), or either one of src_nents or dst_nents
 796 * is greater than 1, an integrity check value is concatenated to the end
 797 * of link_tbl data
 798 */
 799struct talitos_edesc {
 800	int src_nents;
 801	int dst_nents;
 802	int src_is_chained;
 803	int dst_is_chained;
 804	int dma_len;
 805	dma_addr_t dma_link_tbl;
 806	struct talitos_desc desc;
 807	struct talitos_ptr link_tbl[0];
 808};
 809
 810static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
 811			  unsigned int nents, enum dma_data_direction dir,
 812			  int chained)
 813{
 814	if (unlikely(chained))
 815		while (sg) {
 816			dma_map_sg(dev, sg, 1, dir);
 817			sg = scatterwalk_sg_next(sg);
 818		}
 819	else
 820		dma_map_sg(dev, sg, nents, dir);
 821	return nents;
 822}
 823
 824static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
 825				   enum dma_data_direction dir)
 826{
 827	while (sg) {
 828		dma_unmap_sg(dev, sg, 1, dir);
 829		sg = scatterwalk_sg_next(sg);
 830	}
 831}
 832
 833static void talitos_sg_unmap(struct device *dev,
 834			     struct talitos_edesc *edesc,
 835			     struct scatterlist *src,
 836			     struct scatterlist *dst)
 
 837{
 
 
 838	unsigned int src_nents = edesc->src_nents ? : 1;
 839	unsigned int dst_nents = edesc->dst_nents ? : 1;
 840
 
 
 
 
 
 
 841	if (src != dst) {
 842		if (edesc->src_is_chained)
 843			talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
 844		else
 845			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
 846
 847		if (dst) {
 848			if (edesc->dst_is_chained)
 849				talitos_unmap_sg_chain(dev, dst,
 850						       DMA_FROM_DEVICE);
 851			else
 852				dma_unmap_sg(dev, dst, dst_nents,
 853					     DMA_FROM_DEVICE);
 854		}
 855	} else
 856		if (edesc->src_is_chained)
 857			talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
 858		else
 859			dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
 860}
 861
 862static void ipsec_esp_unmap(struct device *dev,
 863			    struct talitos_edesc *edesc,
 864			    struct aead_request *areq)
 865{
 866	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
 867	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
 868	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
 869	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
 870
 871	dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
 
 
 
 
 
 
 872
 873	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
 
 874
 875	if (edesc->dma_len)
 876		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
 877				 DMA_BIDIRECTIONAL);
 
 
 
 
 
 
 
 878}
 879
 880/*
 881 * ipsec_esp descriptor callbacks
 882 */
 883static void ipsec_esp_encrypt_done(struct device *dev,
 884				   struct talitos_desc *desc, void *context,
 885				   int err)
 886{
 887	struct aead_request *areq = context;
 888	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
 889	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 890	struct talitos_edesc *edesc;
 891	struct scatterlist *sg;
 892	void *icvdata;
 893
 894	edesc = container_of(desc, struct talitos_edesc, desc);
 895
 896	ipsec_esp_unmap(dev, edesc, areq);
 897
 898	/* copy the generated ICV to dst */
 899	if (edesc->dma_len) {
 900		icvdata = &edesc->link_tbl[edesc->src_nents +
 901					   edesc->dst_nents + 2];
 902		sg = sg_last(areq->dst, edesc->dst_nents);
 903		memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
 904		       icvdata, ctx->authsize);
 905	}
 906
 907	kfree(edesc);
 908
 909	aead_request_complete(areq, err);
 910}
 911
 912static void ipsec_esp_decrypt_swauth_done(struct device *dev,
 913					  struct talitos_desc *desc,
 914					  void *context, int err)
 915{
 916	struct aead_request *req = context;
 917	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
 918	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 919	struct talitos_edesc *edesc;
 920	struct scatterlist *sg;
 921	void *icvdata;
 922
 923	edesc = container_of(desc, struct talitos_edesc, desc);
 924
 925	ipsec_esp_unmap(dev, edesc, req);
 926
 927	if (!err) {
 928		/* auth check */
 929		if (edesc->dma_len)
 930			icvdata = &edesc->link_tbl[edesc->src_nents +
 931						   edesc->dst_nents + 2];
 932		else
 933			icvdata = &edesc->link_tbl[0];
 934
 935		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
 936		err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
 937			     ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
 938	}
 939
 940	kfree(edesc);
 941
 942	aead_request_complete(req, err);
 943}
 944
 945static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
 946					  struct talitos_desc *desc,
 947					  void *context, int err)
 948{
 949	struct aead_request *req = context;
 950	struct talitos_edesc *edesc;
 951
 952	edesc = container_of(desc, struct talitos_edesc, desc);
 953
 954	ipsec_esp_unmap(dev, edesc, req);
 955
 956	/* check ICV auth status */
 957	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
 958		     DESC_HDR_LO_ICCR1_PASS))
 959		err = -EBADMSG;
 960
 961	kfree(edesc);
 962
 963	aead_request_complete(req, err);
 964}
 965
 966/*
 967 * convert scatterlist to SEC h/w link table format
 968 * stop at cryptlen bytes
 969 */
 970static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
 971			   int cryptlen, struct talitos_ptr *link_tbl_ptr)
 972{
 973	int n_sg = sg_count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 974
 975	while (n_sg--) {
 976		to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
 977		link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
 978		link_tbl_ptr->j_extent = 0;
 979		link_tbl_ptr++;
 980		cryptlen -= sg_dma_len(sg);
 981		sg = scatterwalk_sg_next(sg);
 982	}
 983
 984	/* adjust (decrease) last one (or two) entry's len to cryptlen */
 985	link_tbl_ptr--;
 986	while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
 987		/* Empty this entry, and move to previous one */
 988		cryptlen += be16_to_cpu(link_tbl_ptr->len);
 989		link_tbl_ptr->len = 0;
 990		sg_count--;
 991		link_tbl_ptr--;
 992	}
 993	link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
 994					+ cryptlen);
 995
 996	/* tag end of link table */
 997	link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 998
 999	return sg_count;
1000}
1001
 
 
 
 
 
 
 
 
 
1002/*
1003 * fill in and submit ipsec_esp descriptor
1004 */
1005static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1006		     u8 *giv, u64 seq,
1007		     void (*callback) (struct device *dev,
1008				       struct talitos_desc *desc,
1009				       void *context, int error))
1010{
1011	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
 
1012	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1013	struct device *dev = ctx->dev;
1014	struct talitos_desc *desc = &edesc->desc;
1015	unsigned int cryptlen = areq->cryptlen;
1016	unsigned int authsize = ctx->authsize;
1017	unsigned int ivsize = crypto_aead_ivsize(aead);
 
1018	int sg_count, ret;
1019	int sg_link_tbl_len;
 
 
 
 
 
 
 
1020
1021	/* hmac key */
1022	map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1023			       0, DMA_TO_DEVICE);
 
 
 
 
 
 
 
 
 
1024	/* hmac data */
1025	map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
1026			       sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
 
 
 
 
 
 
1027	/* cipher iv */
1028	map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
1029			       DMA_TO_DEVICE);
1030
1031	/* cipher key */
1032	map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1033			       (char *)&ctx->key + ctx->authkeylen, 0,
1034			       DMA_TO_DEVICE);
1035
1036	/*
1037	 * cipher in
1038	 * map and adjust cipher len to aead request cryptlen.
1039	 * extent is bytes of HMAC postpended to ciphertext,
1040	 * typically 12 for ipsec
1041	 */
1042	desc->ptr[4].len = cpu_to_be16(cryptlen);
1043	desc->ptr[4].j_extent = authsize;
1044
1045	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1046				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1047							   : DMA_TO_DEVICE,
1048				  edesc->src_is_chained);
1049
1050	if (sg_count == 1) {
1051		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
1052	} else {
1053		sg_link_tbl_len = cryptlen;
1054
1055		if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1056			sg_link_tbl_len = cryptlen + authsize;
1057
1058		sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1059					  &edesc->link_tbl[0]);
1060		if (sg_count > 1) {
1061			desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1062			to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
1063			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1064						   edesc->dma_len,
1065						   DMA_BIDIRECTIONAL);
1066		} else {
1067			/* Only one segment now, so no link tbl needed */
1068			to_talitos_ptr(&desc->ptr[4],
1069				       sg_dma_address(areq->src));
1070		}
1071	}
1072
1073	/* cipher out */
1074	desc->ptr[5].len = cpu_to_be16(cryptlen);
1075	desc->ptr[5].j_extent = authsize;
 
 
 
1076
1077	if (areq->src != areq->dst)
1078		sg_count = talitos_map_sg(dev, areq->dst,
1079					  edesc->dst_nents ? : 1,
1080					  DMA_FROM_DEVICE,
1081					  edesc->dst_is_chained);
 
 
 
1082
1083	if (sg_count == 1) {
1084		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1085	} else {
1086		struct talitos_ptr *link_tbl_ptr =
1087			&edesc->link_tbl[edesc->src_nents + 1];
1088
1089		to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1090			       (edesc->src_nents + 1) *
1091			       sizeof(struct talitos_ptr));
1092		sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1093					  link_tbl_ptr);
1094
1095		/* Add an entry to the link table for ICV data */
1096		link_tbl_ptr += sg_count - 1;
1097		link_tbl_ptr->j_extent = 0;
1098		sg_count++;
1099		link_tbl_ptr++;
1100		link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1101		link_tbl_ptr->len = cpu_to_be16(authsize);
1102
1103		/* icv data follows link tables */
1104		to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
1105			       (edesc->src_nents + edesc->dst_nents + 2) *
1106			       sizeof(struct talitos_ptr));
1107		desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1108		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1109					   edesc->dma_len, DMA_BIDIRECTIONAL);
 
 
 
1110	}
1111
1112	/* iv out */
1113	map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
1114			       DMA_FROM_DEVICE);
 
 
 
 
 
 
1115
1116	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1117	if (ret != -EINPROGRESS) {
1118		ipsec_esp_unmap(dev, edesc, areq);
1119		kfree(edesc);
1120	}
1121	return ret;
1122}
1123
1124/*
1125 * derive number of elements in scatterlist
1126 */
1127static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
1128{
1129	struct scatterlist *sg = sg_list;
1130	int sg_nents = 0;
1131
1132	*chained = 0;
1133	while (nbytes > 0) {
1134		sg_nents++;
1135		nbytes -= sg->length;
1136		if (!sg_is_last(sg) && (sg + 1)->length == 0)
1137			*chained = 1;
1138		sg = scatterwalk_sg_next(sg);
1139	}
1140
1141	return sg_nents;
1142}
1143
1144/**
1145 * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
1146 * @sgl:		 The SG list
1147 * @nents:		 Number of SG entries
1148 * @buf:		 Where to copy to
1149 * @buflen:		 The number of bytes to copy
1150 * @skip:		 The number of bytes to skip before copying.
1151 *                       Note: skip + buflen should equal SG total size.
1152 *
1153 * Returns the number of copied bytes.
1154 *
1155 **/
1156static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
1157				    void *buf, size_t buflen, unsigned int skip)
1158{
1159	unsigned int offset = 0;
1160	unsigned int boffset = 0;
1161	struct sg_mapping_iter miter;
1162	unsigned long flags;
1163	unsigned int sg_flags = SG_MITER_ATOMIC;
1164	size_t total_buffer = buflen + skip;
1165
1166	sg_flags |= SG_MITER_FROM_SG;
1167
1168	sg_miter_start(&miter, sgl, nents, sg_flags);
1169
1170	local_irq_save(flags);
1171
1172	while (sg_miter_next(&miter) && offset < total_buffer) {
1173		unsigned int len;
1174		unsigned int ignore;
1175
1176		if ((offset + miter.length) > skip) {
1177			if (offset < skip) {
1178				/* Copy part of this segment */
1179				ignore = skip - offset;
1180				len = miter.length - ignore;
1181				if (boffset + len > buflen)
1182					len = buflen - boffset;
1183				memcpy(buf + boffset, miter.addr + ignore, len);
1184			} else {
1185				/* Copy all of this segment (up to buflen) */
1186				len = miter.length;
1187				if (boffset + len > buflen)
1188					len = buflen - boffset;
1189				memcpy(buf + boffset, miter.addr, len);
1190			}
1191			boffset += len;
1192		}
1193		offset += miter.length;
1194	}
1195
1196	sg_miter_stop(&miter);
1197
1198	local_irq_restore(flags);
1199	return boffset;
1200}
1201
1202/*
1203 * allocate and map the extended descriptor
1204 */
1205static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1206						 struct scatterlist *src,
1207						 struct scatterlist *dst,
1208						 int hash_result,
 
1209						 unsigned int cryptlen,
1210						 unsigned int authsize,
 
1211						 int icv_stashing,
1212						 u32 cryptoflags)
 
1213{
1214	struct talitos_edesc *edesc;
1215	int src_nents, dst_nents, alloc_len, dma_len;
1216	int src_chained, dst_chained = 0;
1217	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1218		      GFP_ATOMIC;
 
 
 
1219
1220	if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1221		dev_err(dev, "length exceeds h/w max limit\n");
1222		return ERR_PTR(-EINVAL);
1223	}
1224
1225	src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1226	src_nents = (src_nents == 1) ? 0 : src_nents;
1227
1228	if (hash_result) {
1229		dst_nents = 0;
1230	} else {
1231		if (dst == src) {
1232			dst_nents = src_nents;
1233		} else {
1234			dst_nents = sg_count(dst, cryptlen + authsize,
1235					     &dst_chained);
1236			dst_nents = (dst_nents == 1) ? 0 : dst_nents;
 
 
 
 
 
 
 
 
 
 
 
1237		}
 
1238	}
1239
1240	/*
1241	 * allocate space for base edesc plus the link tables,
1242	 * allowing for two separate entries for ICV and generated ICV (+ 2),
1243	 * and the ICV data itself
1244	 */
1245	alloc_len = sizeof(struct talitos_edesc);
1246	if (src_nents || dst_nents) {
1247		dma_len = (src_nents + dst_nents + 2) *
1248				 sizeof(struct talitos_ptr) + authsize;
 
 
 
 
1249		alloc_len += dma_len;
1250	} else {
1251		dma_len = 0;
1252		alloc_len += icv_stashing ? authsize : 0;
1253	}
 
1254
1255	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1256	if (!edesc) {
1257		dev_err(dev, "could not allocate edescriptor\n");
 
 
 
 
1258		return ERR_PTR(-ENOMEM);
 
 
 
1259	}
 
1260
1261	edesc->src_nents = src_nents;
1262	edesc->dst_nents = dst_nents;
1263	edesc->src_is_chained = src_chained;
1264	edesc->dst_is_chained = dst_chained;
1265	edesc->dma_len = dma_len;
1266	if (dma_len)
1267		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1268						     edesc->dma_len,
1269						     DMA_BIDIRECTIONAL);
1270
1271	return edesc;
1272}
1273
1274static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
1275					      int icv_stashing)
1276{
1277	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
 
1278	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 
 
1279
1280	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
1281				   areq->cryptlen, ctx->authsize, icv_stashing,
1282				   areq->base.flags);
 
1283}
1284
1285static int aead_encrypt(struct aead_request *req)
1286{
1287	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1288	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1289	struct talitos_edesc *edesc;
1290
1291	/* allocate extended descriptor */
1292	edesc = aead_edesc_alloc(req, 0);
1293	if (IS_ERR(edesc))
1294		return PTR_ERR(edesc);
1295
1296	/* set encrypt */
1297	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1298
1299	return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
1300}
1301
1302static int aead_decrypt(struct aead_request *req)
1303{
1304	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
 
1305	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1306	unsigned int authsize = ctx->authsize;
1307	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1308	struct talitos_edesc *edesc;
1309	struct scatterlist *sg;
1310	void *icvdata;
1311
1312	req->cryptlen -= authsize;
1313
1314	/* allocate extended descriptor */
1315	edesc = aead_edesc_alloc(req, 1);
1316	if (IS_ERR(edesc))
1317		return PTR_ERR(edesc);
1318
1319	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
 
1320	    ((!edesc->src_nents && !edesc->dst_nents) ||
1321	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1322
1323		/* decrypt and check the ICV */
1324		edesc->desc.hdr = ctx->desc_hdr_template |
1325				  DESC_HDR_DIR_INBOUND |
1326				  DESC_HDR_MODE1_MDEU_CICV;
1327
1328		/* reset integrity check result bits */
1329		edesc->desc.hdr_lo = 0;
1330
1331		return ipsec_esp(edesc, req, NULL, 0,
1332				 ipsec_esp_decrypt_hwauth_done);
1333
1334	}
1335
1336	/* Have to check the ICV with software */
1337	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1338
1339	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1340	if (edesc->dma_len)
1341		icvdata = &edesc->link_tbl[edesc->src_nents +
1342					   edesc->dst_nents + 2];
1343	else
1344		icvdata = &edesc->link_tbl[0];
1345
1346	sg = sg_last(req->src, edesc->src_nents ? : 1);
1347
1348	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1349	       ctx->authsize);
1350
1351	return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
1352}
1353
1354static int aead_givencrypt(struct aead_givcrypt_request *req)
 
1355{
1356	struct aead_request *areq = &req->areq;
1357	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1358	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1359	struct talitos_edesc *edesc;
 
1360
1361	/* allocate extended descriptor */
1362	edesc = aead_edesc_alloc(areq, 0);
1363	if (IS_ERR(edesc))
1364		return PTR_ERR(edesc);
1365
1366	/* set encrypt */
1367	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1368
1369	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1370	/* avoid consecutive packets going out with same IV */
1371	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1372
1373	return ipsec_esp(edesc, areq, req->giv, req->seq,
1374			 ipsec_esp_encrypt_done);
 
 
 
1375}
1376
1377static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1378			     const u8 *key, unsigned int keylen)
1379{
1380	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
 
 
1381
1382	memcpy(&ctx->key, key, keylen);
1383	ctx->keylen = keylen;
 
 
 
 
1384
1385	return 0;
1386}
1387
1388static void common_nonsnoop_unmap(struct device *dev,
1389				  struct talitos_edesc *edesc,
1390				  struct ablkcipher_request *areq)
1391{
1392	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1393	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
 
1394	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1395
1396	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
1397
1398	if (edesc->dma_len)
1399		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1400				 DMA_BIDIRECTIONAL);
1401}
1402
1403static void ablkcipher_done(struct device *dev,
1404			    struct talitos_desc *desc, void *context,
1405			    int err)
1406{
1407	struct ablkcipher_request *areq = context;
 
 
 
1408	struct talitos_edesc *edesc;
1409
1410	edesc = container_of(desc, struct talitos_edesc, desc);
1411
1412	common_nonsnoop_unmap(dev, edesc, areq);
 
1413
1414	kfree(edesc);
1415
1416	areq->base.complete(&areq->base, err);
1417}
1418
1419static int common_nonsnoop(struct talitos_edesc *edesc,
1420			   struct ablkcipher_request *areq,
1421			   void (*callback) (struct device *dev,
1422					     struct talitos_desc *desc,
1423					     void *context, int error))
1424{
1425	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1426	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1427	struct device *dev = ctx->dev;
1428	struct talitos_desc *desc = &edesc->desc;
1429	unsigned int cryptlen = areq->nbytes;
1430	unsigned int ivsize;
1431	int sg_count, ret;
 
 
 
 
 
1432
1433	/* first DWORD empty */
1434	desc->ptr[0].len = 0;
1435	to_talitos_ptr(&desc->ptr[0], 0);
1436	desc->ptr[0].j_extent = 0;
1437
1438	/* cipher iv */
1439	ivsize = crypto_ablkcipher_ivsize(cipher);
1440	map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, areq->info, 0,
1441			       DMA_TO_DEVICE);
1442
1443	/* cipher key */
1444	map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1445			       (char *)&ctx->key, 0, DMA_TO_DEVICE);
1446
 
 
 
 
 
 
 
 
1447	/*
1448	 * cipher in
1449	 */
1450	desc->ptr[3].len = cpu_to_be16(cryptlen);
1451	desc->ptr[3].j_extent = 0;
 
 
1452
1453	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1454				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1455							   : DMA_TO_DEVICE,
1456				  edesc->src_is_chained);
1457
1458	if (sg_count == 1) {
1459		to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
1460	} else {
1461		sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1462					  &edesc->link_tbl[0]);
1463		if (sg_count > 1) {
1464			to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1465			desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1466			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1467						   edesc->dma_len,
1468						   DMA_BIDIRECTIONAL);
1469		} else {
1470			/* Only one segment now, so no link tbl needed */
1471			to_talitos_ptr(&desc->ptr[3],
1472				       sg_dma_address(areq->src));
1473		}
1474	}
1475
1476	/* cipher out */
1477	desc->ptr[4].len = cpu_to_be16(cryptlen);
1478	desc->ptr[4].j_extent = 0;
1479
1480	if (areq->src != areq->dst)
1481		sg_count = talitos_map_sg(dev, areq->dst,
1482					  edesc->dst_nents ? : 1,
1483					  DMA_FROM_DEVICE,
1484					  edesc->dst_is_chained);
1485
1486	if (sg_count == 1) {
1487		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
1488	} else {
1489		struct talitos_ptr *link_tbl_ptr =
1490			&edesc->link_tbl[edesc->src_nents + 1];
1491
1492		to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1493					      (edesc->src_nents + 1) *
1494					      sizeof(struct talitos_ptr));
1495		desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1496		sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1497					  link_tbl_ptr);
1498		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1499					   edesc->dma_len, DMA_BIDIRECTIONAL);
1500	}
1501
1502	/* iv out */
1503	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
1504			       DMA_FROM_DEVICE);
1505
1506	/* last DWORD empty */
1507	desc->ptr[6].len = 0;
1508	to_talitos_ptr(&desc->ptr[6], 0);
1509	desc->ptr[6].j_extent = 0;
 
1510
1511	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1512	if (ret != -EINPROGRESS) {
1513		common_nonsnoop_unmap(dev, edesc, areq);
1514		kfree(edesc);
1515	}
1516	return ret;
1517}
1518
1519static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1520						    areq)
1521{
1522	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1523	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
 
1524
1525	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
1526				   areq->nbytes, 0, 0, areq->base.flags);
 
1527}
1528
1529static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1530{
1531	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1532	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1533	struct talitos_edesc *edesc;
 
 
 
 
 
 
 
 
1534
1535	/* allocate extended descriptor */
1536	edesc = ablkcipher_edesc_alloc(areq);
1537	if (IS_ERR(edesc))
1538		return PTR_ERR(edesc);
1539
1540	/* set encrypt */
1541	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1542
1543	return common_nonsnoop(edesc, areq, ablkcipher_done);
1544}
1545
1546static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1547{
1548	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1549	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1550	struct talitos_edesc *edesc;
 
 
 
 
 
 
 
 
1551
1552	/* allocate extended descriptor */
1553	edesc = ablkcipher_edesc_alloc(areq);
1554	if (IS_ERR(edesc))
1555		return PTR_ERR(edesc);
1556
1557	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1558
1559	return common_nonsnoop(edesc, areq, ablkcipher_done);
1560}
1561
1562static void common_nonsnoop_hash_unmap(struct device *dev,
1563				       struct talitos_edesc *edesc,
1564				       struct ahash_request *areq)
1565{
1566	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
 
 
 
 
 
 
1567
1568	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
 
 
 
 
 
 
 
 
 
1569
1570	/* When using hashctx-in, must unmap it. */
1571	if (edesc->desc.ptr[1].len)
1572		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
 
 
 
1573					 DMA_TO_DEVICE);
1574
1575	if (edesc->desc.ptr[2].len)
1576		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1577					 DMA_TO_DEVICE);
1578
1579	talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL);
1580
1581	if (edesc->dma_len)
1582		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1583				 DMA_BIDIRECTIONAL);
1584
 
 
 
1585}
1586
1587static void ahash_done(struct device *dev,
1588		       struct talitos_desc *desc, void *context,
1589		       int err)
1590{
1591	struct ahash_request *areq = context;
1592	struct talitos_edesc *edesc =
1593		 container_of(desc, struct talitos_edesc, desc);
1594	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1595
1596	if (!req_ctx->last && req_ctx->to_hash_later) {
1597		/* Position any partial block for next update/final/finup */
1598		memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1599		req_ctx->nbuf = req_ctx->to_hash_later;
1600	}
1601	common_nonsnoop_hash_unmap(dev, edesc, areq);
1602
1603	kfree(edesc);
1604
1605	areq->base.complete(&areq->base, err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1606}
1607
1608static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1609				struct ahash_request *areq, unsigned int length,
1610				void (*callback) (struct device *dev,
1611						  struct talitos_desc *desc,
1612						  void *context, int error))
1613{
1614	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1615	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1616	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1617	struct device *dev = ctx->dev;
1618	struct talitos_desc *desc = &edesc->desc;
1619	int sg_count, ret;
 
 
 
 
1620
1621	/* first DWORD empty */
1622	desc->ptr[0] = zero_entry;
1623
1624	/* hash context in */
1625	if (!req_ctx->first || req_ctx->swinit) {
1626		map_single_talitos_ptr(dev, &desc->ptr[1],
1627				       req_ctx->hw_context_size,
1628				       (char *)req_ctx->hw_context, 0,
1629				       DMA_TO_DEVICE);
1630		req_ctx->swinit = 0;
1631	} else {
1632		desc->ptr[1] = zero_entry;
1633		/* Indicate next op is not the first. */
1634		req_ctx->first = 0;
1635	}
 
 
1636
1637	/* HMAC key */
1638	if (ctx->keylen)
1639		map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1640				       (char *)&ctx->key, 0, DMA_TO_DEVICE);
1641	else
1642		desc->ptr[2] = zero_entry;
 
1643
 
 
 
 
 
 
1644	/*
1645	 * data in
1646	 */
1647	desc->ptr[3].len = cpu_to_be16(length);
1648	desc->ptr[3].j_extent = 0;
1649
1650	sg_count = talitos_map_sg(dev, req_ctx->psrc,
1651				  edesc->src_nents ? : 1,
1652				  DMA_TO_DEVICE,
1653				  edesc->src_is_chained);
1654
1655	if (sg_count == 1) {
1656		to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
1657	} else {
1658		sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length,
1659					  &edesc->link_tbl[0]);
1660		if (sg_count > 1) {
1661			desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1662			to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1663			dma_sync_single_for_device(ctx->dev,
1664						   edesc->dma_link_tbl,
1665						   edesc->dma_len,
1666						   DMA_BIDIRECTIONAL);
1667		} else {
1668			/* Only one segment now, so no link tbl needed */
1669			to_talitos_ptr(&desc->ptr[3],
1670				       sg_dma_address(req_ctx->psrc));
1671		}
1672	}
1673
1674	/* fifth DWORD empty */
1675	desc->ptr[4] = zero_entry;
1676
1677	/* hash/HMAC out -or- hash context out */
1678	if (req_ctx->last)
1679		map_single_talitos_ptr(dev, &desc->ptr[5],
1680				       crypto_ahash_digestsize(tfm),
1681				       areq->result, 0, DMA_FROM_DEVICE);
1682	else
1683		map_single_talitos_ptr(dev, &desc->ptr[5],
1684				       req_ctx->hw_context_size,
1685				       req_ctx->hw_context, 0, DMA_FROM_DEVICE);
 
1686
1687	/* last DWORD empty */
1688	desc->ptr[6] = zero_entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1689
1690	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1691	if (ret != -EINPROGRESS) {
1692		common_nonsnoop_hash_unmap(dev, edesc, areq);
1693		kfree(edesc);
1694	}
1695	return ret;
1696}
1697
1698static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1699					       unsigned int nbytes)
1700{
1701	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1702	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1703	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
 
 
 
 
 
1704
1705	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1,
1706				   nbytes, 0, 0, areq->base.flags);
1707}
1708
1709static int ahash_init(struct ahash_request *areq)
1710{
1711	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
 
 
1712	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
 
 
1713
1714	/* Initialize the context */
 
1715	req_ctx->nbuf = 0;
1716	req_ctx->first = 1; /* first indicates h/w must init its context */
1717	req_ctx->swinit = 0; /* assume h/w init of context */
1718	req_ctx->hw_context_size =
1719		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1720			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1721			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
 
 
 
 
 
1722
1723	return 0;
1724}
1725
1726/*
1727 * on h/w without explicit sha224 support, we initialize h/w context
1728 * manually with sha224 constants, and tell it to run sha256.
1729 */
1730static int ahash_init_sha224_swinit(struct ahash_request *areq)
1731{
1732	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1733
1734	ahash_init(areq);
1735	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1736
1737	req_ctx->hw_context[0] = SHA224_H0;
1738	req_ctx->hw_context[1] = SHA224_H1;
1739	req_ctx->hw_context[2] = SHA224_H2;
1740	req_ctx->hw_context[3] = SHA224_H3;
1741	req_ctx->hw_context[4] = SHA224_H4;
1742	req_ctx->hw_context[5] = SHA224_H5;
1743	req_ctx->hw_context[6] = SHA224_H6;
1744	req_ctx->hw_context[7] = SHA224_H7;
1745
1746	/* init 64-bit count */
1747	req_ctx->hw_context[8] = 0;
1748	req_ctx->hw_context[9] = 0;
1749
 
 
 
1750	return 0;
1751}
1752
1753static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1754{
1755	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1756	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1757	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1758	struct talitos_edesc *edesc;
1759	unsigned int blocksize =
1760			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1761	unsigned int nbytes_to_hash;
1762	unsigned int to_hash_later;
1763	unsigned int nsg;
1764	int chained;
 
 
 
 
1765
1766	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1767		/* Buffer up to one whole block */
1768		sg_copy_to_buffer(areq->src,
1769				  sg_count(areq->src, nbytes, &chained),
1770				  req_ctx->buf + req_ctx->nbuf, nbytes);
 
 
 
 
1771		req_ctx->nbuf += nbytes;
1772		return 0;
1773	}
1774
1775	/* At least (blocksize + 1) bytes are available to hash */
1776	nbytes_to_hash = nbytes + req_ctx->nbuf;
1777	to_hash_later = nbytes_to_hash & (blocksize - 1);
1778
1779	if (req_ctx->last)
1780		to_hash_later = 0;
1781	else if (to_hash_later)
1782		/* There is a partial block. Hash the full block(s) now */
1783		nbytes_to_hash -= to_hash_later;
1784	else {
1785		/* Keep one block buffered */
1786		nbytes_to_hash -= blocksize;
1787		to_hash_later = blocksize;
1788	}
1789
1790	/* Chain in any previously buffered data */
1791	if (req_ctx->nbuf) {
1792		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1793		sg_init_table(req_ctx->bufsl, nsg);
1794		sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1795		if (nsg > 1)
1796			scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1797		req_ctx->psrc = req_ctx->bufsl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1798	} else
1799		req_ctx->psrc = areq->src;
1800
1801	if (to_hash_later) {
1802		int nents = sg_count(areq->src, nbytes, &chained);
1803		sg_copy_end_to_buffer(areq->src, nents,
1804				      req_ctx->bufnext,
 
 
 
 
1805				      to_hash_later,
1806				      nbytes - to_hash_later);
1807	}
1808	req_ctx->to_hash_later = to_hash_later;
1809
1810	/* Allocate extended descriptor */
1811	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1812	if (IS_ERR(edesc))
1813		return PTR_ERR(edesc);
1814
1815	edesc->desc.hdr = ctx->desc_hdr_template;
1816
1817	/* On last one, request SEC to pad; otherwise continue */
1818	if (req_ctx->last)
1819		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1820	else
1821		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1822
1823	/* request SEC to INIT hash. */
1824	if (req_ctx->first && !req_ctx->swinit)
1825		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1826
1827	/* When the tfm context has a keylen, it's an HMAC.
1828	 * A first or last (ie. not middle) descriptor must request HMAC.
1829	 */
1830	if (ctx->keylen && (req_ctx->first || req_ctx->last))
1831		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1832
1833	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1834				    ahash_done);
1835}
1836
1837static int ahash_update(struct ahash_request *areq)
1838{
1839	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1840
1841	req_ctx->last = 0;
1842
1843	return ahash_process_req(areq, areq->nbytes);
1844}
1845
1846static int ahash_final(struct ahash_request *areq)
1847{
1848	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1849
1850	req_ctx->last = 1;
1851
1852	return ahash_process_req(areq, 0);
1853}
1854
1855static int ahash_finup(struct ahash_request *areq)
1856{
1857	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1858
1859	req_ctx->last = 1;
1860
1861	return ahash_process_req(areq, areq->nbytes);
1862}
1863
1864static int ahash_digest(struct ahash_request *areq)
1865{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1866	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1867	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1868
1869	ahash->init(areq);
1870	req_ctx->last = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1871
1872	return ahash_process_req(areq, areq->nbytes);
1873}
1874
 
1875struct talitos_alg_template {
1876	u32 type;
 
1877	union {
1878		struct crypto_alg crypto;
1879		struct ahash_alg hash;
 
1880	} alg;
1881	__be32 desc_hdr_template;
1882};
1883
1884static struct talitos_alg_template driver_algs[] = {
1885	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
1886	{	.type = CRYPTO_ALG_TYPE_AEAD,
1887		.alg.crypto = {
1888			.cra_name = "authenc(hmac(sha1),cbc(aes))",
1889			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1890			.cra_blocksize = AES_BLOCK_SIZE,
1891			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1892			.cra_type = &crypto_aead_type,
1893			.cra_aead = {
1894				.setkey = aead_setkey,
1895				.setauthsize = aead_setauthsize,
1896				.encrypt = aead_encrypt,
1897				.decrypt = aead_decrypt,
1898				.givencrypt = aead_givencrypt,
1899				.geniv = "<built-in>",
1900				.ivsize = AES_BLOCK_SIZE,
1901				.maxauthsize = SHA1_DIGEST_SIZE,
1902			}
1903		},
1904		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1905			             DESC_HDR_SEL0_AESU |
1906		                     DESC_HDR_MODE0_AESU_CBC |
1907		                     DESC_HDR_SEL1_MDEUA |
1908		                     DESC_HDR_MODE1_MDEU_INIT |
1909		                     DESC_HDR_MODE1_MDEU_PAD |
1910		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1911	},
1912	{	.type = CRYPTO_ALG_TYPE_AEAD,
1913		.alg.crypto = {
1914			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1915			.cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1916			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1917			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1918			.cra_type = &crypto_aead_type,
1919			.cra_aead = {
1920				.setkey = aead_setkey,
1921				.setauthsize = aead_setauthsize,
1922				.encrypt = aead_encrypt,
1923				.decrypt = aead_decrypt,
1924				.givencrypt = aead_givencrypt,
1925				.geniv = "<built-in>",
1926				.ivsize = DES3_EDE_BLOCK_SIZE,
1927				.maxauthsize = SHA1_DIGEST_SIZE,
1928			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1929		},
1930		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1931			             DESC_HDR_SEL0_DEU |
1932		                     DESC_HDR_MODE0_DEU_CBC |
1933		                     DESC_HDR_MODE0_DEU_3DES |
1934		                     DESC_HDR_SEL1_MDEUA |
1935		                     DESC_HDR_MODE1_MDEU_INIT |
1936		                     DESC_HDR_MODE1_MDEU_PAD |
1937		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1938	},
1939	{	.type = CRYPTO_ALG_TYPE_AEAD,
1940		.alg.crypto = {
1941			.cra_name = "authenc(hmac(sha256),cbc(aes))",
1942			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
1943			.cra_blocksize = AES_BLOCK_SIZE,
1944			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1945			.cra_type = &crypto_aead_type,
1946			.cra_aead = {
1947				.setkey = aead_setkey,
1948				.setauthsize = aead_setauthsize,
1949				.encrypt = aead_encrypt,
1950				.decrypt = aead_decrypt,
1951				.givencrypt = aead_givencrypt,
1952				.geniv = "<built-in>",
1953				.ivsize = AES_BLOCK_SIZE,
1954				.maxauthsize = SHA256_DIGEST_SIZE,
1955			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1956		},
1957		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1958			             DESC_HDR_SEL0_AESU |
1959		                     DESC_HDR_MODE0_AESU_CBC |
1960		                     DESC_HDR_SEL1_MDEUA |
1961		                     DESC_HDR_MODE1_MDEU_INIT |
1962		                     DESC_HDR_MODE1_MDEU_PAD |
1963		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1964	},
1965	{	.type = CRYPTO_ALG_TYPE_AEAD,
1966		.alg.crypto = {
1967			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
1968			.cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
1969			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1970			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1971			.cra_type = &crypto_aead_type,
1972			.cra_aead = {
1973				.setkey = aead_setkey,
1974				.setauthsize = aead_setauthsize,
1975				.encrypt = aead_encrypt,
1976				.decrypt = aead_decrypt,
1977				.givencrypt = aead_givencrypt,
1978				.geniv = "<built-in>",
1979				.ivsize = DES3_EDE_BLOCK_SIZE,
1980				.maxauthsize = SHA256_DIGEST_SIZE,
1981			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1982		},
1983		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1984			             DESC_HDR_SEL0_DEU |
1985		                     DESC_HDR_MODE0_DEU_CBC |
1986		                     DESC_HDR_MODE0_DEU_3DES |
1987		                     DESC_HDR_SEL1_MDEUA |
1988		                     DESC_HDR_MODE1_MDEU_INIT |
1989		                     DESC_HDR_MODE1_MDEU_PAD |
1990		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1991	},
1992	{	.type = CRYPTO_ALG_TYPE_AEAD,
1993		.alg.crypto = {
1994			.cra_name = "authenc(hmac(md5),cbc(aes))",
1995			.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
1996			.cra_blocksize = AES_BLOCK_SIZE,
1997			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1998			.cra_type = &crypto_aead_type,
1999			.cra_aead = {
2000				.setkey = aead_setkey,
2001				.setauthsize = aead_setauthsize,
2002				.encrypt = aead_encrypt,
2003				.decrypt = aead_decrypt,
2004				.givencrypt = aead_givencrypt,
2005				.geniv = "<built-in>",
2006				.ivsize = AES_BLOCK_SIZE,
2007				.maxauthsize = MD5_DIGEST_SIZE,
2008			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2009		},
2010		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2011			             DESC_HDR_SEL0_AESU |
2012		                     DESC_HDR_MODE0_AESU_CBC |
2013		                     DESC_HDR_SEL1_MDEUA |
2014		                     DESC_HDR_MODE1_MDEU_INIT |
2015		                     DESC_HDR_MODE1_MDEU_PAD |
2016		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2017	},
2018	{	.type = CRYPTO_ALG_TYPE_AEAD,
2019		.alg.crypto = {
2020			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2021			.cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2022			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2023			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2024			.cra_type = &crypto_aead_type,
2025			.cra_aead = {
2026				.setkey = aead_setkey,
2027				.setauthsize = aead_setauthsize,
2028				.encrypt = aead_encrypt,
2029				.decrypt = aead_decrypt,
2030				.givencrypt = aead_givencrypt,
2031				.geniv = "<built-in>",
2032				.ivsize = DES3_EDE_BLOCK_SIZE,
2033				.maxauthsize = MD5_DIGEST_SIZE,
2034			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2035		},
2036		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2037			             DESC_HDR_SEL0_DEU |
2038		                     DESC_HDR_MODE0_DEU_CBC |
2039		                     DESC_HDR_MODE0_DEU_3DES |
2040		                     DESC_HDR_SEL1_MDEUA |
2041		                     DESC_HDR_MODE1_MDEU_INIT |
2042		                     DESC_HDR_MODE1_MDEU_PAD |
2043		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2044	},
2045	/* ABLKCIPHER algorithms. */
2046	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2047		.alg.crypto = {
2048			.cra_name = "cbc(aes)",
2049			.cra_driver_name = "cbc-aes-talitos",
2050			.cra_blocksize = AES_BLOCK_SIZE,
2051			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2052                                     CRYPTO_ALG_ASYNC,
2053			.cra_type = &crypto_ablkcipher_type,
2054			.cra_ablkcipher = {
2055				.setkey = ablkcipher_setkey,
2056				.encrypt = ablkcipher_encrypt,
2057				.decrypt = ablkcipher_decrypt,
2058				.geniv = "eseqiv",
2059				.min_keysize = AES_MIN_KEY_SIZE,
2060				.max_keysize = AES_MAX_KEY_SIZE,
2061				.ivsize = AES_BLOCK_SIZE,
2062			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2063		},
2064		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2065				     DESC_HDR_SEL0_AESU |
2066				     DESC_HDR_MODE0_AESU_CBC,
2067	},
2068	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2069		.alg.crypto = {
2070			.cra_name = "cbc(des3_ede)",
2071			.cra_driver_name = "cbc-3des-talitos",
2072			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2073			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2074                                     CRYPTO_ALG_ASYNC,
2075			.cra_type = &crypto_ablkcipher_type,
2076			.cra_ablkcipher = {
2077				.setkey = ablkcipher_setkey,
2078				.encrypt = ablkcipher_encrypt,
2079				.decrypt = ablkcipher_decrypt,
2080				.geniv = "eseqiv",
2081				.min_keysize = DES3_EDE_KEY_SIZE,
2082				.max_keysize = DES3_EDE_KEY_SIZE,
2083				.ivsize = DES3_EDE_BLOCK_SIZE,
2084			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2085		},
2086		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2087			             DESC_HDR_SEL0_DEU |
2088		                     DESC_HDR_MODE0_DEU_CBC |
2089		                     DESC_HDR_MODE0_DEU_3DES,
2090	},
2091	/* AHASH algorithms. */
2092	{	.type = CRYPTO_ALG_TYPE_AHASH,
2093		.alg.hash = {
2094			.init = ahash_init,
2095			.update = ahash_update,
2096			.final = ahash_final,
2097			.finup = ahash_finup,
2098			.digest = ahash_digest,
2099			.halg.digestsize = MD5_DIGEST_SIZE,
 
2100			.halg.base = {
2101				.cra_name = "md5",
2102				.cra_driver_name = "md5-talitos",
2103				.cra_blocksize = MD5_BLOCK_SIZE,
2104				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2105					     CRYPTO_ALG_ASYNC,
2106				.cra_type = &crypto_ahash_type
2107			}
2108		},
2109		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2110				     DESC_HDR_SEL0_MDEUA |
2111				     DESC_HDR_MODE0_MDEU_MD5,
2112	},
2113	{	.type = CRYPTO_ALG_TYPE_AHASH,
2114		.alg.hash = {
2115			.init = ahash_init,
2116			.update = ahash_update,
2117			.final = ahash_final,
2118			.finup = ahash_finup,
2119			.digest = ahash_digest,
2120			.halg.digestsize = SHA1_DIGEST_SIZE,
 
2121			.halg.base = {
2122				.cra_name = "sha1",
2123				.cra_driver_name = "sha1-talitos",
2124				.cra_blocksize = SHA1_BLOCK_SIZE,
2125				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2126					     CRYPTO_ALG_ASYNC,
2127				.cra_type = &crypto_ahash_type
2128			}
2129		},
2130		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2131				     DESC_HDR_SEL0_MDEUA |
2132				     DESC_HDR_MODE0_MDEU_SHA1,
2133	},
2134	{	.type = CRYPTO_ALG_TYPE_AHASH,
2135		.alg.hash = {
2136			.init = ahash_init,
2137			.update = ahash_update,
2138			.final = ahash_final,
2139			.finup = ahash_finup,
2140			.digest = ahash_digest,
2141			.halg.digestsize = SHA224_DIGEST_SIZE,
 
2142			.halg.base = {
2143				.cra_name = "sha224",
2144				.cra_driver_name = "sha224-talitos",
2145				.cra_blocksize = SHA224_BLOCK_SIZE,
2146				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2147					     CRYPTO_ALG_ASYNC,
2148				.cra_type = &crypto_ahash_type
2149			}
2150		},
2151		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2152				     DESC_HDR_SEL0_MDEUA |
2153				     DESC_HDR_MODE0_MDEU_SHA224,
2154	},
2155	{	.type = CRYPTO_ALG_TYPE_AHASH,
2156		.alg.hash = {
2157			.init = ahash_init,
2158			.update = ahash_update,
2159			.final = ahash_final,
2160			.finup = ahash_finup,
2161			.digest = ahash_digest,
2162			.halg.digestsize = SHA256_DIGEST_SIZE,
 
2163			.halg.base = {
2164				.cra_name = "sha256",
2165				.cra_driver_name = "sha256-talitos",
2166				.cra_blocksize = SHA256_BLOCK_SIZE,
2167				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2168					     CRYPTO_ALG_ASYNC,
2169				.cra_type = &crypto_ahash_type
2170			}
2171		},
2172		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2173				     DESC_HDR_SEL0_MDEUA |
2174				     DESC_HDR_MODE0_MDEU_SHA256,
2175	},
2176	{	.type = CRYPTO_ALG_TYPE_AHASH,
2177		.alg.hash = {
2178			.init = ahash_init,
2179			.update = ahash_update,
2180			.final = ahash_final,
2181			.finup = ahash_finup,
2182			.digest = ahash_digest,
2183			.halg.digestsize = SHA384_DIGEST_SIZE,
 
2184			.halg.base = {
2185				.cra_name = "sha384",
2186				.cra_driver_name = "sha384-talitos",
2187				.cra_blocksize = SHA384_BLOCK_SIZE,
2188				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2189					     CRYPTO_ALG_ASYNC,
2190				.cra_type = &crypto_ahash_type
2191			}
2192		},
2193		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2194				     DESC_HDR_SEL0_MDEUB |
2195				     DESC_HDR_MODE0_MDEUB_SHA384,
2196	},
2197	{	.type = CRYPTO_ALG_TYPE_AHASH,
2198		.alg.hash = {
2199			.init = ahash_init,
2200			.update = ahash_update,
2201			.final = ahash_final,
2202			.finup = ahash_finup,
2203			.digest = ahash_digest,
2204			.halg.digestsize = SHA512_DIGEST_SIZE,
 
2205			.halg.base = {
2206				.cra_name = "sha512",
2207				.cra_driver_name = "sha512-talitos",
2208				.cra_blocksize = SHA512_BLOCK_SIZE,
2209				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2210					     CRYPTO_ALG_ASYNC,
2211				.cra_type = &crypto_ahash_type
2212			}
2213		},
2214		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2215				     DESC_HDR_SEL0_MDEUB |
2216				     DESC_HDR_MODE0_MDEUB_SHA512,
2217	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2218};
2219
2220struct talitos_crypto_alg {
2221	struct list_head entry;
2222	struct device *dev;
2223	struct talitos_alg_template algt;
2224};
2225
2226static int talitos_cra_init(struct crypto_tfm *tfm)
 
2227{
2228	struct crypto_alg *alg = tfm->__crt_alg;
2229	struct talitos_crypto_alg *talitos_alg;
2230	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2231	struct talitos_private *priv;
2232
2233	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2234		talitos_alg = container_of(__crypto_ahash_alg(alg),
2235					   struct talitos_crypto_alg,
2236					   algt.alg.hash);
2237	else
2238		talitos_alg = container_of(alg, struct talitos_crypto_alg,
2239					   algt.alg.crypto);
2240
2241	/* update context with ptr to dev */
2242	ctx->dev = talitos_alg->dev;
2243
2244	/* assign SEC channel to tfm in round-robin fashion */
2245	priv = dev_get_drvdata(ctx->dev);
2246	ctx->ch = atomic_inc_return(&priv->last_chan) &
2247		  (priv->num_channels - 1);
2248
2249	/* copy descriptor header template value */
2250	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2251
2252	/* select done notification */
2253	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2254
2255	return 0;
2256}
2257
2258static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2259{
2260	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
 
 
 
 
 
 
 
 
2261
2262	talitos_cra_init(tfm);
 
 
 
 
2263
2264	/* random first IV */
2265	get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
2266
2267	return 0;
2268}
2269
2270static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2271{
 
 
2272	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2273
2274	talitos_cra_init(tfm);
 
 
2275
2276	ctx->keylen = 0;
2277	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2278				 sizeof(struct talitos_ahash_req_ctx));
2279
2280	return 0;
 
 
 
 
 
 
 
 
 
2281}
2282
2283/*
2284 * given the alg's descriptor header template, determine whether descriptor
2285 * type and primary/secondary execution units required match the hw
2286 * capabilities description provided in the device tree node.
2287 */
2288static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2289{
2290	struct talitos_private *priv = dev_get_drvdata(dev);
2291	int ret;
2292
2293	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2294	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2295
2296	if (SECONDARY_EU(desc_hdr_template))
2297		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2298		              & priv->exec_units);
2299
2300	return ret;
2301}
2302
2303static int talitos_remove(struct platform_device *ofdev)
2304{
2305	struct device *dev = &ofdev->dev;
2306	struct talitos_private *priv = dev_get_drvdata(dev);
2307	struct talitos_crypto_alg *t_alg, *n;
2308	int i;
2309
2310	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2311		switch (t_alg->algt.type) {
2312		case CRYPTO_ALG_TYPE_ABLKCIPHER:
 
 
2313		case CRYPTO_ALG_TYPE_AEAD:
2314			crypto_unregister_alg(&t_alg->algt.alg.crypto);
2315			break;
2316		case CRYPTO_ALG_TYPE_AHASH:
2317			crypto_unregister_ahash(&t_alg->algt.alg.hash);
2318			break;
2319		}
2320		list_del(&t_alg->entry);
2321		kfree(t_alg);
2322	}
2323
2324	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2325		talitos_unregister_rng(dev);
2326
2327	for (i = 0; i < priv->num_channels; i++)
2328		kfree(priv->chan[i].fifo);
2329
2330	kfree(priv->chan);
2331
2332	if (priv->irq != NO_IRQ) {
2333		free_irq(priv->irq, dev);
2334		irq_dispose_mapping(priv->irq);
2335	}
2336
2337	tasklet_kill(&priv->done_task);
2338
2339	iounmap(priv->reg);
2340
2341	dev_set_drvdata(dev, NULL);
2342
2343	kfree(priv);
2344
2345	return 0;
2346}
2347
2348static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2349						    struct talitos_alg_template
2350						           *template)
2351{
2352	struct talitos_private *priv = dev_get_drvdata(dev);
2353	struct talitos_crypto_alg *t_alg;
2354	struct crypto_alg *alg;
2355
2356	t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
 
2357	if (!t_alg)
2358		return ERR_PTR(-ENOMEM);
2359
2360	t_alg->algt = *template;
2361
2362	switch (t_alg->algt.type) {
2363	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2364		alg = &t_alg->algt.alg.crypto;
2365		alg->cra_init = talitos_cra_init;
 
 
 
 
 
 
 
 
 
 
 
2366		break;
2367	case CRYPTO_ALG_TYPE_AEAD:
2368		alg = &t_alg->algt.alg.crypto;
2369		alg->cra_init = talitos_cra_init_aead;
 
 
 
 
 
 
 
 
 
 
2370		break;
2371	case CRYPTO_ALG_TYPE_AHASH:
2372		alg = &t_alg->algt.alg.hash.halg.base;
2373		alg->cra_init = talitos_cra_init_ahash;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2374		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2375		    !strcmp(alg->cra_name, "sha224")) {
 
2376			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
 
 
2377			t_alg->algt.desc_hdr_template =
2378					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2379					DESC_HDR_SEL0_MDEUA |
2380					DESC_HDR_MODE0_MDEU_SHA256;
2381		}
2382		break;
2383	default:
2384		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
 
2385		return ERR_PTR(-EINVAL);
2386	}
2387
2388	alg->cra_module = THIS_MODULE;
2389	alg->cra_priority = TALITOS_CRA_PRIORITY;
2390	alg->cra_alignmask = 0;
 
 
 
 
 
 
2391	alg->cra_ctxsize = sizeof(struct talitos_ctx);
 
2392
2393	t_alg->dev = dev;
2394
2395	return t_alg;
2396}
2397
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2398static int talitos_probe(struct platform_device *ofdev)
2399{
2400	struct device *dev = &ofdev->dev;
2401	struct device_node *np = ofdev->dev.of_node;
2402	struct talitos_private *priv;
2403	const unsigned int *prop;
2404	int i, err;
 
 
2405
2406	priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2407	if (!priv)
2408		return -ENOMEM;
2409
 
 
2410	dev_set_drvdata(dev, priv);
2411
2412	priv->ofdev = ofdev;
2413
2414	tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
2415
2416	INIT_LIST_HEAD(&priv->alg_list);
2417
2418	priv->irq = irq_of_parse_and_map(np, 0);
2419
2420	if (priv->irq == NO_IRQ) {
2421		dev_err(dev, "failed to map irq\n");
2422		err = -EINVAL;
2423		goto err_out;
2424	}
2425
2426	/* get the irq line */
2427	err = request_irq(priv->irq, talitos_interrupt, 0,
2428			  dev_driver_string(dev), dev);
2429	if (err) {
2430		dev_err(dev, "failed to request irq %d\n", priv->irq);
2431		irq_dispose_mapping(priv->irq);
2432		priv->irq = NO_IRQ;
2433		goto err_out;
2434	}
2435
2436	priv->reg = of_iomap(np, 0);
2437	if (!priv->reg) {
2438		dev_err(dev, "failed to of_iomap\n");
2439		err = -ENOMEM;
2440		goto err_out;
2441	}
2442
2443	/* get SEC version capabilities from device tree */
2444	prop = of_get_property(np, "fsl,num-channels", NULL);
2445	if (prop)
2446		priv->num_channels = *prop;
2447
2448	prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2449	if (prop)
2450		priv->chfifo_len = *prop;
2451
2452	prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2453	if (prop)
2454		priv->exec_units = *prop;
2455
2456	prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2457	if (prop)
2458		priv->desc_types = *prop;
2459
2460	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2461	    !priv->exec_units || !priv->desc_types) {
2462		dev_err(dev, "invalid property data in device tree node\n");
2463		err = -EINVAL;
2464		goto err_out;
2465	}
2466
2467	if (of_device_is_compatible(np, "fsl,sec3.0"))
2468		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2469
2470	if (of_device_is_compatible(np, "fsl,sec2.1"))
2471		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2472				  TALITOS_FTR_SHA224_HWINIT;
 
 
 
 
2473
2474	priv->chan = kzalloc(sizeof(struct talitos_channel) *
2475			     priv->num_channels, GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2476	if (!priv->chan) {
2477		dev_err(dev, "failed to allocate channel management space\n");
2478		err = -ENOMEM;
2479		goto err_out;
2480	}
2481
 
 
2482	for (i = 0; i < priv->num_channels; i++) {
 
 
 
 
2483		spin_lock_init(&priv->chan[i].head_lock);
2484		spin_lock_init(&priv->chan[i].tail_lock);
2485	}
2486
2487	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2488
2489	for (i = 0; i < priv->num_channels; i++) {
2490		priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2491					     priv->fifo_len, GFP_KERNEL);
2492		if (!priv->chan[i].fifo) {
2493			dev_err(dev, "failed to allocate request fifo %d\n", i);
2494			err = -ENOMEM;
2495			goto err_out;
2496		}
2497	}
2498
2499	for (i = 0; i < priv->num_channels; i++)
2500		atomic_set(&priv->chan[i].submit_count,
2501			   -(priv->chfifo_len - 1));
 
2502
2503	dma_set_mask(dev, DMA_BIT_MASK(36));
2504
2505	/* reset and initialize the h/w */
2506	err = init_device(dev);
2507	if (err) {
2508		dev_err(dev, "failed to initialize device\n");
2509		goto err_out;
2510	}
2511
2512	/* register the RNG, if available */
2513	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2514		err = talitos_register_rng(dev);
2515		if (err) {
2516			dev_err(dev, "failed to register hwrng: %d\n", err);
2517			goto err_out;
2518		} else
2519			dev_info(dev, "hwrng\n");
2520	}
2521
2522	/* register crypto algorithms the device supports */
2523	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2524		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2525			struct talitos_crypto_alg *t_alg;
2526			char *name = NULL;
2527
2528			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2529			if (IS_ERR(t_alg)) {
2530				err = PTR_ERR(t_alg);
 
 
2531				goto err_out;
2532			}
2533
2534			switch (t_alg->algt.type) {
2535			case CRYPTO_ALG_TYPE_ABLKCIPHER:
 
 
 
 
 
2536			case CRYPTO_ALG_TYPE_AEAD:
2537				err = crypto_register_alg(
2538						&t_alg->algt.alg.crypto);
2539				name = t_alg->algt.alg.crypto.cra_driver_name;
2540				break;
 
2541			case CRYPTO_ALG_TYPE_AHASH:
2542				err = crypto_register_ahash(
2543						&t_alg->algt.alg.hash);
2544				name =
2545				 t_alg->algt.alg.hash.halg.base.cra_driver_name;
2546				break;
2547			}
2548			if (err) {
2549				dev_err(dev, "%s alg registration failed\n",
2550					name);
2551				kfree(t_alg);
2552			} else {
2553				list_add_tail(&t_alg->entry, &priv->alg_list);
2554				dev_info(dev, "%s\n", name);
2555			}
2556		}
2557	}
 
 
 
2558
2559	return 0;
2560
2561err_out:
2562	talitos_remove(ofdev);
2563
2564	return err;
2565}
2566
2567static const struct of_device_id talitos_match[] = {
 
 
 
 
 
 
2568	{
2569		.compatible = "fsl,sec2.0",
2570	},
 
2571	{},
2572};
2573MODULE_DEVICE_TABLE(of, talitos_match);
2574
2575static struct platform_driver talitos_driver = {
2576	.driver = {
2577		.name = "talitos",
2578		.owner = THIS_MODULE,
2579		.of_match_table = talitos_match,
2580	},
2581	.probe = talitos_probe,
2582	.remove = talitos_remove,
2583};
2584
2585static int __init talitos_init(void)
2586{
2587	return platform_driver_register(&talitos_driver);
2588}
2589module_init(talitos_init);
2590
2591static void __exit talitos_exit(void)
2592{
2593	platform_driver_unregister(&talitos_driver);
2594}
2595module_exit(talitos_exit);
2596
2597MODULE_LICENSE("GPL");
2598MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2599MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");