Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Intel Keem Bay OCS HCU Crypto Driver.
  4 *
  5 * Copyright (C) 2018-2020 Intel Corporation
  6 */
  7
  8#include <linux/delay.h>
  9#include <linux/device.h>
 10#include <linux/iopoll.h>
 11#include <linux/irq.h>
 12#include <linux/module.h>
 13
 14#include <crypto/sha2.h>
 15
 16#include "ocs-hcu.h"
 17
 18/* Registers. */
 19#define OCS_HCU_MODE			0x00
 20#define OCS_HCU_CHAIN			0x04
 21#define OCS_HCU_OPERATION		0x08
 22#define OCS_HCU_KEY_0			0x0C
 23#define OCS_HCU_ISR			0x50
 24#define OCS_HCU_IER			0x54
 25#define OCS_HCU_STATUS			0x58
 26#define OCS_HCU_MSG_LEN_LO		0x60
 27#define OCS_HCU_MSG_LEN_HI		0x64
 28#define OCS_HCU_KEY_BYTE_ORDER_CFG	0x80
 29#define OCS_HCU_DMA_SRC_ADDR		0x400
 30#define OCS_HCU_DMA_SRC_SIZE		0x408
 31#define OCS_HCU_DMA_DST_SIZE		0x40C
 32#define OCS_HCU_DMA_DMA_MODE		0x410
 33#define OCS_HCU_DMA_NEXT_SRC_DESCR	0x418
 34#define OCS_HCU_DMA_MSI_ISR		0x480
 35#define OCS_HCU_DMA_MSI_IER		0x484
 36#define OCS_HCU_DMA_MSI_MASK		0x488
 37
 38/* Register bit definitions. */
 39#define HCU_MODE_ALGO_SHIFT		16
 40#define HCU_MODE_HMAC_SHIFT		22
 41
 42#define HCU_STATUS_BUSY			BIT(0)
 43
 44#define HCU_BYTE_ORDER_SWAP		BIT(0)
 45
 46#define HCU_IRQ_HASH_DONE		BIT(2)
 47#define HCU_IRQ_HASH_ERR_MASK		(BIT(3) | BIT(1) | BIT(0))
 48
 49#define HCU_DMA_IRQ_SRC_DONE		BIT(0)
 50#define HCU_DMA_IRQ_SAI_ERR		BIT(2)
 51#define HCU_DMA_IRQ_BAD_COMP_ERR	BIT(3)
 52#define HCU_DMA_IRQ_INBUF_RD_ERR	BIT(4)
 53#define HCU_DMA_IRQ_INBUF_WD_ERR	BIT(5)
 54#define HCU_DMA_IRQ_OUTBUF_WR_ERR	BIT(6)
 55#define HCU_DMA_IRQ_OUTBUF_RD_ERR	BIT(7)
 56#define HCU_DMA_IRQ_CRD_ERR		BIT(8)
 57#define HCU_DMA_IRQ_ERR_MASK		(HCU_DMA_IRQ_SAI_ERR | \
 58					 HCU_DMA_IRQ_BAD_COMP_ERR | \
 59					 HCU_DMA_IRQ_INBUF_RD_ERR | \
 60					 HCU_DMA_IRQ_INBUF_WD_ERR | \
 61					 HCU_DMA_IRQ_OUTBUF_WR_ERR | \
 62					 HCU_DMA_IRQ_OUTBUF_RD_ERR | \
 63					 HCU_DMA_IRQ_CRD_ERR)
 64
 65#define HCU_DMA_SNOOP_MASK		(0x7 << 28)
 66#define HCU_DMA_SRC_LL_EN		BIT(25)
 67#define HCU_DMA_EN			BIT(31)
 68
 69#define OCS_HCU_ENDIANNESS_VALUE	0x2A
 70
 71#define HCU_DMA_MSI_UNMASK		BIT(0)
 72#define HCU_DMA_MSI_DISABLE		0
 73#define HCU_IRQ_DISABLE			0
 74
 75#define OCS_HCU_START			BIT(0)
 76#define OCS_HCU_TERMINATE		BIT(1)
 77
 78#define OCS_LL_DMA_FLAG_TERMINATE	BIT(31)
 79
 80#define OCS_HCU_HW_KEY_LEN_U32		(OCS_HCU_HW_KEY_LEN / sizeof(u32))
 81
 82#define HCU_DATA_WRITE_ENDIANNESS_OFFSET	26
 83
 84#define OCS_HCU_NUM_CHAINS_SHA256_224_SM3	(SHA256_DIGEST_SIZE / sizeof(u32))
 85#define OCS_HCU_NUM_CHAINS_SHA384_512		(SHA512_DIGEST_SIZE / sizeof(u32))
 86
 87/*
 88 * While polling on a busy HCU, wait maximum 200us between one check and the
 89 * other.
 90 */
 91#define OCS_HCU_WAIT_BUSY_RETRY_DELAY_US	200
 92/* Wait on a busy HCU for maximum 1 second. */
 93#define OCS_HCU_WAIT_BUSY_TIMEOUT_US		1000000
 94
 95/**
 96 * struct ocs_hcu_dma_entry - An entry in an OCS DMA linked list.
 97 * @src_addr:  Source address of the data.
 98 * @src_len:   Length of data to be fetched.
 99 * @nxt_desc:  Next descriptor to fetch.
100 * @ll_flags:  Flags (Freeze @ terminate) for the DMA engine.
101 */
102struct ocs_hcu_dma_entry {
103	u32 src_addr;
104	u32 src_len;
105	u32 nxt_desc;
106	u32 ll_flags;
107};
108
109/**
110 * struct ocs_hcu_dma_list - OCS-specific DMA linked list.
111 * @head:	The head of the list (points to the array backing the list).
112 * @tail:	The current tail of the list; NULL if the list is empty.
113 * @dma_addr:	The DMA address of @head (i.e., the DMA address of the backing
114 *		array).
115 * @max_nents:	Maximum number of entries in the list (i.e., number of elements
116 *		in the backing array).
117 *
118 * The OCS DMA list is an array-backed list of OCS DMA descriptors. The array
119 * backing the list is allocated with dma_alloc_coherent() and pointed by
120 * @head.
121 */
122struct ocs_hcu_dma_list {
123	struct ocs_hcu_dma_entry	*head;
124	struct ocs_hcu_dma_entry	*tail;
125	dma_addr_t			dma_addr;
126	size_t				max_nents;
127};
128
129static inline u32 ocs_hcu_num_chains(enum ocs_hcu_algo algo)
130{
131	switch (algo) {
132	case OCS_HCU_ALGO_SHA224:
133	case OCS_HCU_ALGO_SHA256:
134	case OCS_HCU_ALGO_SM3:
135		return OCS_HCU_NUM_CHAINS_SHA256_224_SM3;
136	case OCS_HCU_ALGO_SHA384:
137	case OCS_HCU_ALGO_SHA512:
138		return OCS_HCU_NUM_CHAINS_SHA384_512;
139	default:
140		return 0;
141	};
142}
143
144static inline u32 ocs_hcu_digest_size(enum ocs_hcu_algo algo)
145{
146	switch (algo) {
147	case OCS_HCU_ALGO_SHA224:
148		return SHA224_DIGEST_SIZE;
149	case OCS_HCU_ALGO_SHA256:
150	case OCS_HCU_ALGO_SM3:
151		/* SM3 shares the same block size. */
152		return SHA256_DIGEST_SIZE;
153	case OCS_HCU_ALGO_SHA384:
154		return SHA384_DIGEST_SIZE;
155	case OCS_HCU_ALGO_SHA512:
156		return SHA512_DIGEST_SIZE;
157	default:
158		return 0;
159	}
160}
161
162/**
163 * ocs_hcu_wait_busy() - Wait for HCU OCS hardware to became usable.
164 * @hcu_dev:	OCS HCU device to wait for.
165 *
166 * Return: 0 if device free, -ETIMEOUT if device busy and internal timeout has
167 *	   expired.
168 */
169static int ocs_hcu_wait_busy(struct ocs_hcu_dev *hcu_dev)
170{
171	long val;
172
173	return readl_poll_timeout(hcu_dev->io_base + OCS_HCU_STATUS, val,
174				  !(val & HCU_STATUS_BUSY),
175				  OCS_HCU_WAIT_BUSY_RETRY_DELAY_US,
176				  OCS_HCU_WAIT_BUSY_TIMEOUT_US);
177}
178
179static void ocs_hcu_done_irq_en(struct ocs_hcu_dev *hcu_dev)
180{
181	/* Clear any pending interrupts. */
182	writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_ISR);
183	hcu_dev->irq_err = false;
184	/* Enable error and HCU done interrupts. */
185	writel(HCU_IRQ_HASH_DONE | HCU_IRQ_HASH_ERR_MASK,
186	       hcu_dev->io_base + OCS_HCU_IER);
187}
188
189static void ocs_hcu_dma_irq_en(struct ocs_hcu_dev *hcu_dev)
190{
191	/* Clear any pending interrupts. */
192	writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
193	hcu_dev->irq_err = false;
194	/* Only operating on DMA source completion and error interrupts. */
195	writel(HCU_DMA_IRQ_ERR_MASK | HCU_DMA_IRQ_SRC_DONE,
196	       hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
197	/* Unmask */
198	writel(HCU_DMA_MSI_UNMASK, hcu_dev->io_base + OCS_HCU_DMA_MSI_MASK);
199}
200
201static void ocs_hcu_irq_dis(struct ocs_hcu_dev *hcu_dev)
202{
203	writel(HCU_IRQ_DISABLE, hcu_dev->io_base + OCS_HCU_IER);
204	writel(HCU_DMA_MSI_DISABLE, hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
205}
206
207static int ocs_hcu_wait_and_disable_irq(struct ocs_hcu_dev *hcu_dev)
208{
209	int rc;
210
211	rc = wait_for_completion_interruptible(&hcu_dev->irq_done);
212	if (rc)
213		goto exit;
214
215	if (hcu_dev->irq_err) {
216		/* Unset flag and return error. */
217		hcu_dev->irq_err = false;
218		rc = -EIO;
219		goto exit;
220	}
221
222exit:
223	ocs_hcu_irq_dis(hcu_dev);
224
225	return rc;
226}
227
228/**
229 * ocs_hcu_get_intermediate_data() - Get intermediate data.
230 * @hcu_dev:	The target HCU device.
231 * @data:	Where to store the intermediate.
232 * @algo:	The algorithm being used.
233 *
234 * This function is used to save the current hashing process state in order to
235 * continue it in the future.
236 *
237 * Note: once all data has been processed, the intermediate data actually
238 * contains the hashing result. So this function is also used to retrieve the
239 * final result of a hashing process.
240 *
241 * Return: 0 on success, negative error code otherwise.
242 */
243static int ocs_hcu_get_intermediate_data(struct ocs_hcu_dev *hcu_dev,
244					 struct ocs_hcu_idata *data,
245					 enum ocs_hcu_algo algo)
246{
247	const int n = ocs_hcu_num_chains(algo);
248	u32 *chain;
249	int rc;
250	int i;
251
252	/* Data not requested. */
253	if (!data)
254		return -EINVAL;
255
256	chain = (u32 *)data->digest;
257
258	/* Ensure that the OCS is no longer busy before reading the chains. */
259	rc = ocs_hcu_wait_busy(hcu_dev);
260	if (rc)
261		return rc;
262
263	/*
264	 * This loops is safe because data->digest is an array of
265	 * SHA512_DIGEST_SIZE bytes and the maximum value returned by
266	 * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
267	 * to SHA512_DIGEST_SIZE / sizeof(u32).
268	 */
269	for (i = 0; i < n; i++)
270		chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
271
272	data->msg_len_lo = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
273	data->msg_len_hi = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
274
275	return 0;
276}
277
278/**
279 * ocs_hcu_set_intermediate_data() - Set intermediate data.
280 * @hcu_dev:	The target HCU device.
281 * @data:	The intermediate data to be set.
282 * @algo:	The algorithm being used.
283 *
284 * This function is used to continue a previous hashing process.
285 */
286static void ocs_hcu_set_intermediate_data(struct ocs_hcu_dev *hcu_dev,
287					  const struct ocs_hcu_idata *data,
288					  enum ocs_hcu_algo algo)
289{
290	const int n = ocs_hcu_num_chains(algo);
291	u32 *chain = (u32 *)data->digest;
292	int i;
293
294	/*
295	 * This loops is safe because data->digest is an array of
296	 * SHA512_DIGEST_SIZE bytes and the maximum value returned by
297	 * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
298	 * to SHA512_DIGEST_SIZE / sizeof(u32).
299	 */
300	for (i = 0; i < n; i++)
301		writel(chain[i], hcu_dev->io_base + OCS_HCU_CHAIN);
302
303	writel(data->msg_len_lo, hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
304	writel(data->msg_len_hi, hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
305}
306
307static int ocs_hcu_get_digest(struct ocs_hcu_dev *hcu_dev,
308			      enum ocs_hcu_algo algo, u8 *dgst, size_t dgst_len)
309{
310	u32 *chain;
311	int rc;
312	int i;
313
314	if (!dgst)
315		return -EINVAL;
316
317	/* Length of the output buffer must match the algo digest size. */
318	if (dgst_len != ocs_hcu_digest_size(algo))
319		return -EINVAL;
320
321	/* Ensure that the OCS is no longer busy before reading the chains. */
322	rc = ocs_hcu_wait_busy(hcu_dev);
323	if (rc)
324		return rc;
325
326	chain = (u32 *)dgst;
327	for (i = 0; i < dgst_len / sizeof(u32); i++)
328		chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
329
330	return 0;
331}
332
333/**
334 * ocs_hcu_hw_cfg() - Configure the HCU hardware.
335 * @hcu_dev:	The HCU device to configure.
336 * @algo:	The algorithm to be used by the HCU device.
337 * @use_hmac:	Whether or not HW HMAC should be used.
338 *
339 * Return: 0 on success, negative error code otherwise.
340 */
341static int ocs_hcu_hw_cfg(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
342			  bool use_hmac)
343{
344	u32 cfg;
345	int rc;
346
347	if (algo != OCS_HCU_ALGO_SHA256 && algo != OCS_HCU_ALGO_SHA224 &&
348	    algo != OCS_HCU_ALGO_SHA384 && algo != OCS_HCU_ALGO_SHA512 &&
349	    algo != OCS_HCU_ALGO_SM3)
350		return -EINVAL;
351
352	rc = ocs_hcu_wait_busy(hcu_dev);
353	if (rc)
354		return rc;
355
356	/* Ensure interrupts are disabled. */
357	ocs_hcu_irq_dis(hcu_dev);
358
359	/* Configure endianness, hashing algorithm and HW HMAC (if needed) */
360	cfg = OCS_HCU_ENDIANNESS_VALUE << HCU_DATA_WRITE_ENDIANNESS_OFFSET;
361	cfg |= algo << HCU_MODE_ALGO_SHIFT;
362	if (use_hmac)
363		cfg |= BIT(HCU_MODE_HMAC_SHIFT);
364
365	writel(cfg, hcu_dev->io_base + OCS_HCU_MODE);
366
367	return 0;
368}
369
370/**
371 * ocs_hcu_clear_key() - Clear key stored in OCS HMAC KEY registers.
372 * @hcu_dev:	The OCS HCU device whose key registers should be cleared.
373 */
374static void ocs_hcu_clear_key(struct ocs_hcu_dev *hcu_dev)
375{
376	int reg_off;
377
378	/* Clear OCS_HCU_KEY_[0..15] */
379	for (reg_off = 0; reg_off < OCS_HCU_HW_KEY_LEN; reg_off += sizeof(u32))
380		writel(0, hcu_dev->io_base + OCS_HCU_KEY_0 + reg_off);
381}
382
383/**
384 * ocs_hcu_write_key() - Write key to OCS HMAC KEY registers.
385 * @hcu_dev:	The OCS HCU device the key should be written to.
386 * @key:	The key to be written.
387 * @len:	The size of the key to write. It must be OCS_HCU_HW_KEY_LEN.
388 *
389 * Return:	0 on success, negative error code otherwise.
390 */
391static int ocs_hcu_write_key(struct ocs_hcu_dev *hcu_dev, const u8 *key, size_t len)
392{
393	u32 key_u32[OCS_HCU_HW_KEY_LEN_U32];
394	int i;
395
396	if (len > OCS_HCU_HW_KEY_LEN)
397		return -EINVAL;
398
399	/* Copy key into temporary u32 array. */
400	memcpy(key_u32, key, len);
401
402	/*
403	 * Hardware requires all the bytes of the HW Key vector to be
404	 * written. So pad with zero until we reach OCS_HCU_HW_KEY_LEN.
405	 */
406	memzero_explicit((u8 *)key_u32 + len, OCS_HCU_HW_KEY_LEN - len);
407
408	/*
409	 * OCS hardware expects the MSB of the key to be written at the highest
410	 * address of the HCU Key vector; in other word, the key must be
411	 * written in reverse order.
412	 *
413	 * Therefore, we first enable byte swapping for the HCU key vector;
414	 * so that bytes of 32-bit word written to OCS_HCU_KEY_[0..15] will be
415	 * swapped:
416	 * 3 <---> 0, 2 <---> 1.
417	 */
418	writel(HCU_BYTE_ORDER_SWAP,
419	       hcu_dev->io_base + OCS_HCU_KEY_BYTE_ORDER_CFG);
420	/*
421	 * And then we write the 32-bit words composing the key starting from
422	 * the end of the key.
423	 */
424	for (i = 0; i < OCS_HCU_HW_KEY_LEN_U32; i++)
425		writel(key_u32[OCS_HCU_HW_KEY_LEN_U32 - 1 - i],
426		       hcu_dev->io_base + OCS_HCU_KEY_0 + (sizeof(u32) * i));
427
428	memzero_explicit(key_u32, OCS_HCU_HW_KEY_LEN);
429
430	return 0;
431}
432
433/**
434 * ocs_hcu_ll_dma_start() - Start OCS HCU hashing via DMA
435 * @hcu_dev:	The OCS HCU device to use.
436 * @dma_list:	The OCS DMA list mapping the data to hash.
437 * @finalize:	Whether or not this is the last hashing operation and therefore
438 *		the final hash should be compute even if data is not
439 *		block-aligned.
440 *
441 * Return: 0 on success, negative error code otherwise.
442 */
443static int ocs_hcu_ll_dma_start(struct ocs_hcu_dev *hcu_dev,
444				const struct ocs_hcu_dma_list *dma_list,
445				bool finalize)
446{
447	u32 cfg = HCU_DMA_SNOOP_MASK | HCU_DMA_SRC_LL_EN | HCU_DMA_EN;
448	int rc;
449
450	if (!dma_list)
451		return -EINVAL;
452
453	/*
454	 * For final requests we use HCU_DONE IRQ to be notified when all input
455	 * data has been processed by the HCU; however, we cannot do so for
456	 * non-final requests, because we don't get a HCU_DONE IRQ when we
457	 * don't terminate the operation.
458	 *
459	 * Therefore, for non-final requests, we use the DMA IRQ, which
460	 * triggers when DMA has finishing feeding all the input data to the
461	 * HCU, but the HCU may still be processing it. This is fine, since we
462	 * will wait for the HCU processing to be completed when we try to read
463	 * intermediate results, in ocs_hcu_get_intermediate_data().
464	 */
465	if (finalize)
466		ocs_hcu_done_irq_en(hcu_dev);
467	else
468		ocs_hcu_dma_irq_en(hcu_dev);
469
470	reinit_completion(&hcu_dev->irq_done);
471	writel(dma_list->dma_addr, hcu_dev->io_base + OCS_HCU_DMA_NEXT_SRC_DESCR);
472	writel(0, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
473	writel(0, hcu_dev->io_base + OCS_HCU_DMA_DST_SIZE);
474
475	writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
476
477	writel(cfg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
478
479	if (finalize)
480		writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
481
482	rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
483	if (rc)
484		return rc;
485
486	return 0;
487}
488
489struct ocs_hcu_dma_list *ocs_hcu_dma_list_alloc(struct ocs_hcu_dev *hcu_dev,
490						int max_nents)
491{
492	struct ocs_hcu_dma_list *dma_list;
493
494	dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
495	if (!dma_list)
496		return NULL;
497
498	/* Total size of the DMA list to allocate. */
499	dma_list->head = dma_alloc_coherent(hcu_dev->dev,
500					    sizeof(*dma_list->head) * max_nents,
501					    &dma_list->dma_addr, GFP_KERNEL);
502	if (!dma_list->head) {
503		kfree(dma_list);
504		return NULL;
505	}
506	dma_list->max_nents = max_nents;
507	dma_list->tail = NULL;
508
509	return dma_list;
510}
511
512void ocs_hcu_dma_list_free(struct ocs_hcu_dev *hcu_dev,
513			   struct ocs_hcu_dma_list *dma_list)
514{
515	if (!dma_list)
516		return;
517
518	dma_free_coherent(hcu_dev->dev,
519			  sizeof(*dma_list->head) * dma_list->max_nents,
520			  dma_list->head, dma_list->dma_addr);
521
522	kfree(dma_list);
523}
524
525/* Add a new DMA entry at the end of the OCS DMA list. */
526int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev,
527			      struct ocs_hcu_dma_list *dma_list,
528			      dma_addr_t addr, u32 len)
529{
530	struct device *dev = hcu_dev->dev;
531	struct ocs_hcu_dma_entry *old_tail;
532	struct ocs_hcu_dma_entry *new_tail;
533
534	if (!len)
535		return 0;
536
537	if (!dma_list)
538		return -EINVAL;
539
540	if (addr & ~OCS_HCU_DMA_BIT_MASK) {
541		dev_err(dev,
542			"Unexpected error: Invalid DMA address for OCS HCU\n");
543		return -EINVAL;
544	}
545
546	old_tail = dma_list->tail;
547	new_tail = old_tail ? old_tail + 1 : dma_list->head;
548
549	/* Check if list is full. */
550	if (new_tail - dma_list->head >= dma_list->max_nents)
551		return -ENOMEM;
552
553	/*
554	 * If there was an old tail (i.e., this is not the first element we are
555	 * adding), un-terminate the old tail and make it point to the new one.
556	 */
557	if (old_tail) {
558		old_tail->ll_flags &= ~OCS_LL_DMA_FLAG_TERMINATE;
559		/*
560		 * The old tail 'nxt_desc' must point to the DMA address of the
561		 * new tail.
562		 */
563		old_tail->nxt_desc = dma_list->dma_addr +
564				     sizeof(*dma_list->tail) * (new_tail -
565								dma_list->head);
566	}
567
568	new_tail->src_addr = (u32)addr;
569	new_tail->src_len = (u32)len;
570	new_tail->ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
571	new_tail->nxt_desc = 0;
572
573	/* Update list tail with new tail. */
574	dma_list->tail = new_tail;
575
576	return 0;
577}
578
579/**
580 * ocs_hcu_hash_init() - Initialize hash operation context.
581 * @ctx:	The context to initialize.
582 * @algo:	The hashing algorithm to use.
583 *
584 * Return:	0 on success, negative error code otherwise.
585 */
586int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo)
587{
588	if (!ctx)
589		return -EINVAL;
590
591	ctx->algo = algo;
592	ctx->idata.msg_len_lo = 0;
593	ctx->idata.msg_len_hi = 0;
594	/* No need to set idata.digest to 0. */
595
596	return 0;
597}
598
599/**
600 * ocs_hcu_hash_update() - Perform a hashing iteration.
601 * @hcu_dev:	The OCS HCU device to use.
602 * @ctx:	The OCS HCU hashing context.
603 * @dma_list:	The OCS DMA list mapping the input data to process.
604 *
605 * Return: 0 on success; negative error code otherwise.
606 */
607int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
608			struct ocs_hcu_hash_ctx *ctx,
609			const struct ocs_hcu_dma_list *dma_list)
610{
611	int rc;
612
613	if (!hcu_dev || !ctx)
614		return -EINVAL;
615
616	/* Configure the hardware for the current request. */
617	rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
618	if (rc)
619		return rc;
620
621	/* If we already processed some data, idata needs to be set. */
622	if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
623		ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
624
625	/* Start linked-list DMA hashing. */
626	rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, false);
627	if (rc)
628		return rc;
629
630	/* Update idata and return. */
631	return ocs_hcu_get_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
632}
633
634/**
635 * ocs_hcu_hash_finup() - Update and finalize hash computation.
636 * @hcu_dev:	The OCS HCU device to use.
637 * @ctx:	The OCS HCU hashing context.
638 * @dma_list:	The OCS DMA list mapping the input data to process.
639 * @dgst:	The buffer where to save the computed digest.
640 * @dgst_len:	The length of @dgst.
641 *
642 * Return: 0 on success; negative error code otherwise.
643 */
644int ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev,
645		       const struct ocs_hcu_hash_ctx *ctx,
646		       const struct ocs_hcu_dma_list *dma_list,
647		       u8 *dgst, size_t dgst_len)
648{
649	int rc;
650
651	if (!hcu_dev || !ctx)
652		return -EINVAL;
653
654	/* Configure the hardware for the current request. */
655	rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
656	if (rc)
657		return rc;
658
659	/* If we already processed some data, idata needs to be set. */
660	if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
661		ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
662
663	/* Start linked-list DMA hashing. */
664	rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
665	if (rc)
666		return rc;
667
668	/* Get digest and return. */
669	return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
670}
671
672/**
673 * ocs_hcu_hash_final() - Finalize hash computation.
674 * @hcu_dev:		The OCS HCU device to use.
675 * @ctx:		The OCS HCU hashing context.
676 * @dgst:		The buffer where to save the computed digest.
677 * @dgst_len:		The length of @dgst.
678 *
679 * Return: 0 on success; negative error code otherwise.
680 */
681int ocs_hcu_hash_final(struct ocs_hcu_dev *hcu_dev,
682		       const struct ocs_hcu_hash_ctx *ctx, u8 *dgst,
683		       size_t dgst_len)
684{
685	int rc;
686
687	if (!hcu_dev || !ctx)
688		return -EINVAL;
689
690	/* Configure the hardware for the current request. */
691	rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
692	if (rc)
693		return rc;
694
695	/* If we already processed some data, idata needs to be set. */
696	if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
697		ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
698
699	/*
700	 * Enable HCU interrupts, so that HCU_DONE will be triggered once the
701	 * final hash is computed.
702	 */
703	ocs_hcu_done_irq_en(hcu_dev);
704	reinit_completion(&hcu_dev->irq_done);
705	writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
706
707	rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
708	if (rc)
709		return rc;
710
711	/* Get digest and return. */
712	return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
713}
714
715/**
716 * ocs_hcu_digest() - Compute hash digest.
717 * @hcu_dev:		The OCS HCU device to use.
718 * @algo:		The hash algorithm to use.
719 * @data:		The input data to process.
720 * @data_len:		The length of @data.
721 * @dgst:		The buffer where to save the computed digest.
722 * @dgst_len:		The length of @dgst.
723 *
724 * Return: 0 on success; negative error code otherwise.
725 */
726int ocs_hcu_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
727		   void *data, size_t data_len, u8 *dgst, size_t dgst_len)
728{
729	struct device *dev = hcu_dev->dev;
730	dma_addr_t dma_handle;
731	u32 reg;
732	int rc;
733
734	/* Configure the hardware for the current request. */
735	rc = ocs_hcu_hw_cfg(hcu_dev, algo, false);
736	if (rc)
737		return rc;
738
739	dma_handle = dma_map_single(dev, data, data_len, DMA_TO_DEVICE);
740	if (dma_mapping_error(dev, dma_handle))
741		return -EIO;
742
743	reg = HCU_DMA_SNOOP_MASK | HCU_DMA_EN;
744
745	ocs_hcu_done_irq_en(hcu_dev);
746
747	reinit_completion(&hcu_dev->irq_done);
748
749	writel(dma_handle, hcu_dev->io_base + OCS_HCU_DMA_SRC_ADDR);
750	writel(data_len, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
751	writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
752	writel(reg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
753
754	writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
755
756	rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
757	if (rc)
758		return rc;
759
760	dma_unmap_single(dev, dma_handle, data_len, DMA_TO_DEVICE);
761
762	return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
763}
764
765/**
766 * ocs_hcu_hmac() - Compute HMAC.
767 * @hcu_dev:		The OCS HCU device to use.
768 * @algo:		The hash algorithm to use with HMAC.
769 * @key:		The key to use.
770 * @dma_list:	The OCS DMA list mapping the input data to process.
771 * @key_len:		The length of @key.
772 * @dgst:		The buffer where to save the computed HMAC.
773 * @dgst_len:		The length of @dgst.
774 *
775 * Return: 0 on success; negative error code otherwise.
776 */
777int ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
778		 const u8 *key, size_t key_len,
779		 const struct ocs_hcu_dma_list *dma_list,
780		 u8 *dgst, size_t dgst_len)
781{
782	int rc;
783
784	/* Ensure 'key' is not NULL. */
785	if (!key || key_len == 0)
786		return -EINVAL;
787
788	/* Configure the hardware for the current request. */
789	rc = ocs_hcu_hw_cfg(hcu_dev, algo, true);
790	if (rc)
791		return rc;
792
793	rc = ocs_hcu_write_key(hcu_dev, key, key_len);
794	if (rc)
795		return rc;
796
797	rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
798
799	/* Clear HW key before processing return code. */
800	ocs_hcu_clear_key(hcu_dev);
801
802	if (rc)
803		return rc;
804
805	return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
806}
807
808irqreturn_t ocs_hcu_irq_handler(int irq, void *dev_id)
809{
810	struct ocs_hcu_dev *hcu_dev = dev_id;
811	u32 hcu_irq;
812	u32 dma_irq;
813
814	/* Read and clear the HCU interrupt. */
815	hcu_irq = readl(hcu_dev->io_base + OCS_HCU_ISR);
816	writel(hcu_irq, hcu_dev->io_base + OCS_HCU_ISR);
817
818	/* Read and clear the HCU DMA interrupt. */
819	dma_irq = readl(hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
820	writel(dma_irq, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
821
822	/* Check for errors. */
823	if (hcu_irq & HCU_IRQ_HASH_ERR_MASK || dma_irq & HCU_DMA_IRQ_ERR_MASK) {
824		hcu_dev->irq_err = true;
825		goto complete;
826	}
827
828	/* Check for DONE IRQs. */
829	if (hcu_irq & HCU_IRQ_HASH_DONE || dma_irq & HCU_DMA_IRQ_SRC_DONE)
830		goto complete;
831
832	return IRQ_NONE;
833
834complete:
835	complete(&hcu_dev->irq_done);
836
837	return IRQ_HANDLED;
838}
839
840MODULE_LICENSE("GPL");