Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
  3
  4#include <linux/kernel.h>
  5#include <linux/nospec.h>
  6#include "cc_driver.h"
  7#include "cc_buffer_mgr.h"
  8#include "cc_request_mgr.h"
 
  9#include "cc_pm.h"
 10
 11#define CC_MAX_POLL_ITER	10
 12/* The highest descriptor count in used */
 13#define CC_MAX_DESC_SEQ_LEN	23
 14
 15struct cc_req_mgr_handle {
 16	/* Request manager resources */
 17	unsigned int hw_queue_size; /* HW capability */
 18	unsigned int min_free_hw_slots;
 19	unsigned int max_used_sw_slots;
 20	struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
 21	u32 req_queue_head;
 22	u32 req_queue_tail;
 23	u32 axi_completed;
 24	u32 q_free_slots;
 25	/* This lock protects access to HW register
 26	 * that must be single request at a time
 27	 */
 28	spinlock_t hw_lock;
 29	struct cc_hw_desc compl_desc;
 30	u8 *dummy_comp_buff;
 31	dma_addr_t dummy_comp_buff_dma;
 32
 33	/* backlog queue */
 34	struct list_head backlog;
 35	unsigned int bl_len;
 36	spinlock_t bl_lock; /* protect backlog queue */
 37
 38#ifdef COMP_IN_WQ
 39	struct workqueue_struct *workq;
 40	struct delayed_work compwork;
 41#else
 42	struct tasklet_struct comptask;
 43#endif
 
 44};
 45
 46struct cc_bl_item {
 47	struct cc_crypto_req creq;
 48	struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
 49	unsigned int len;
 50	struct list_head list;
 51	bool notif;
 52};
 53
 54static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
 55	{ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
 56	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
 57	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
 58	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
 59	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
 60	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
 61	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
 62	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
 63	{ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
 64	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
 65	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
 66	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
 67	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
 68	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
 69	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
 70	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
 71};
 72
 73static void comp_handler(unsigned long devarg);
 74#ifdef COMP_IN_WQ
 75static void comp_work_handler(struct work_struct *work);
 76#endif
 77
 78static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
 79{
 80	alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
 81	slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
 82
 83	return cc_cpp_int_masks[alg][slot];
 84}
 85
 86void cc_req_mgr_fini(struct cc_drvdata *drvdata)
 87{
 88	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
 89	struct device *dev = drvdata_to_dev(drvdata);
 90
 91	if (!req_mgr_h)
 92		return; /* Not allocated */
 93
 94	if (req_mgr_h->dummy_comp_buff_dma) {
 95		dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
 96				  req_mgr_h->dummy_comp_buff_dma);
 97	}
 98
 99	dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
100						req_mgr_h->min_free_hw_slots));
101	dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
102
103#ifdef COMP_IN_WQ
 
104	destroy_workqueue(req_mgr_h->workq);
105#else
106	/* Kill tasklet */
107	tasklet_kill(&req_mgr_h->comptask);
108#endif
109	kfree_sensitive(req_mgr_h);
110	drvdata->request_mgr_handle = NULL;
111}
112
113int cc_req_mgr_init(struct cc_drvdata *drvdata)
114{
115	struct cc_req_mgr_handle *req_mgr_h;
116	struct device *dev = drvdata_to_dev(drvdata);
117	int rc = 0;
118
119	req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
120	if (!req_mgr_h) {
121		rc = -ENOMEM;
122		goto req_mgr_init_err;
123	}
124
125	drvdata->request_mgr_handle = req_mgr_h;
126
127	spin_lock_init(&req_mgr_h->hw_lock);
128	spin_lock_init(&req_mgr_h->bl_lock);
129	INIT_LIST_HEAD(&req_mgr_h->backlog);
130
131#ifdef COMP_IN_WQ
132	dev_dbg(dev, "Initializing completion workqueue\n");
133	req_mgr_h->workq = create_singlethread_workqueue("ccree");
134	if (!req_mgr_h->workq) {
135		dev_err(dev, "Failed creating work queue\n");
136		rc = -ENOMEM;
137		goto req_mgr_init_err;
138	}
139	INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
140#else
141	dev_dbg(dev, "Initializing completion tasklet\n");
142	tasklet_init(&req_mgr_h->comptask, comp_handler,
143		     (unsigned long)drvdata);
144#endif
145	req_mgr_h->hw_queue_size = cc_ioread(drvdata,
146					     CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
147	dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
148	if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
149		dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
150			req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
151		rc = -ENOMEM;
152		goto req_mgr_init_err;
153	}
154	req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
155	req_mgr_h->max_used_sw_slots = 0;
156
157	/* Allocate DMA word for "dummy" completion descriptor use */
158	req_mgr_h->dummy_comp_buff =
159		dma_alloc_coherent(dev, sizeof(u32),
160				   &req_mgr_h->dummy_comp_buff_dma,
161				   GFP_KERNEL);
162	if (!req_mgr_h->dummy_comp_buff) {
163		dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
164			sizeof(u32));
165		rc = -ENOMEM;
166		goto req_mgr_init_err;
167	}
168
169	/* Init. "dummy" completion descriptor */
170	hw_desc_init(&req_mgr_h->compl_desc);
171	set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
172	set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
173		      sizeof(u32), NS_BIT, 1);
174	set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
175	set_queue_last_ind(drvdata, &req_mgr_h->compl_desc);
176
177	return 0;
178
179req_mgr_init_err:
180	cc_req_mgr_fini(drvdata);
181	return rc;
182}
183
184static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
185			unsigned int seq_len)
186{
187	int i, w;
188	void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
189	struct device *dev = drvdata_to_dev(drvdata);
190
191	/*
192	 * We do indeed write all 6 command words to the same
193	 * register. The HW supports this.
194	 */
195
196	for (i = 0; i < seq_len; i++) {
197		for (w = 0; w <= 5; w++)
198			writel_relaxed(seq[i].word[w], reg);
199
200		if (cc_dump_desc)
201			dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
202				i, seq[i].word[0], seq[i].word[1],
203				seq[i].word[2], seq[i].word[3],
204				seq[i].word[4], seq[i].word[5]);
205	}
206}
207
208/**
209 * request_mgr_complete() - Completion will take place if and only if user
210 * requested completion by cc_send_sync_request().
211 *
212 * @dev: Device pointer
213 * @dx_compl_h: The completion event to signal
214 * @dummy: unused error code
215 */
216static void request_mgr_complete(struct device *dev, void *dx_compl_h,
217				 int dummy)
218{
219	struct completion *this_compl = dx_compl_h;
220
221	complete(this_compl);
222}
223
224static int cc_queues_status(struct cc_drvdata *drvdata,
225			    struct cc_req_mgr_handle *req_mgr_h,
226			    unsigned int total_seq_len)
227{
228	unsigned long poll_queue;
229	struct device *dev = drvdata_to_dev(drvdata);
230
231	/* SW queue is checked only once as it will not
232	 * be changed during the poll because the spinlock_bh
233	 * is held by the thread
234	 */
235	if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
236	    req_mgr_h->req_queue_tail) {
237		dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
238			req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
239		return -ENOSPC;
240	}
241
242	if (req_mgr_h->q_free_slots >= total_seq_len)
243		return 0;
244
245	/* Wait for space in HW queue. Poll constant num of iterations. */
246	for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
247		req_mgr_h->q_free_slots =
248			cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
249		if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
250			req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
251
252		if (req_mgr_h->q_free_slots >= total_seq_len) {
253			/* If there is enough place return */
254			return 0;
255		}
256
257		dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
258			req_mgr_h->q_free_slots, total_seq_len);
259	}
260	/* No room in the HW queue try again later */
261	dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
262		req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
263		req_mgr_h->q_free_slots, total_seq_len);
264	return -ENOSPC;
265}
266
267/**
268 * cc_do_send_request() - Enqueue caller request to crypto hardware.
269 * Need to be called with HW lock held and PM running
270 *
271 * @drvdata: Associated device driver context
272 * @cc_req: The request to enqueue
273 * @desc: The crypto sequence
274 * @len: The crypto sequence length
275 * @add_comp: If "true": add an artificial dout DMA to mark completion
276 *
 
277 */
278static void cc_do_send_request(struct cc_drvdata *drvdata,
279			       struct cc_crypto_req *cc_req,
280			       struct cc_hw_desc *desc, unsigned int len,
281			       bool add_comp)
282{
283	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
284	unsigned int used_sw_slots;
 
285	unsigned int total_seq_len = len; /*initial sequence length*/
 
286	struct device *dev = drvdata_to_dev(drvdata);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
288	used_sw_slots = ((req_mgr_h->req_queue_head -
289			  req_mgr_h->req_queue_tail) &
290			 (MAX_REQUEST_QUEUE_SIZE - 1));
291	if (used_sw_slots > req_mgr_h->max_used_sw_slots)
292		req_mgr_h->max_used_sw_slots = used_sw_slots;
293
294	/* Enqueue request - must be locked with HW lock*/
295	req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
296	req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
297				    (MAX_REQUEST_QUEUE_SIZE - 1);
 
298
299	dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
300
301	/*
302	 * We are about to push command to the HW via the command registers
303	 * that may reference host memory. We need to issue a memory barrier
304	 * to make sure there are no outstanding memory writes
305	 */
306	wmb();
307
308	/* STAT_PHASE_4: Push sequence */
 
 
309
310	enqueue_seq(drvdata, desc, len);
311
312	if (add_comp) {
313		enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
314		total_seq_len++;
315	}
316
317	if (req_mgr_h->q_free_slots < total_seq_len) {
318		/* This situation should never occur. Maybe indicating problem
319		 * with resuming power. Set the free slot count to 0 and hope
320		 * for the best.
321		 */
322		dev_err(dev, "HW free slot count mismatch.");
323		req_mgr_h->q_free_slots = 0;
324	} else {
325		/* Update the free slots in HW queue */
326		req_mgr_h->q_free_slots -= total_seq_len;
327	}
 
 
 
328}
329
330static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
331			       struct cc_bl_item *bli)
332{
333	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
334	struct device *dev = drvdata_to_dev(drvdata);
335
336	spin_lock_bh(&mgr->bl_lock);
337	list_add_tail(&bli->list, &mgr->backlog);
338	++mgr->bl_len;
339	dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
340	spin_unlock_bh(&mgr->bl_lock);
341	tasklet_schedule(&mgr->comptask);
342}
343
344static void cc_proc_backlog(struct cc_drvdata *drvdata)
345{
346	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
347	struct cc_bl_item *bli;
348	struct cc_crypto_req *creq;
349	void *req;
 
 
350	struct device *dev = drvdata_to_dev(drvdata);
351	int rc;
352
353	spin_lock(&mgr->bl_lock);
354
355	while (mgr->bl_len) {
356		bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
357		dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
358
359		spin_unlock(&mgr->bl_lock);
360
361
362		creq = &bli->creq;
363		req = creq->user_arg;
364
365		/*
366		 * Notify the request we're moving out of the backlog
367		 * but only if we haven't done so already.
368		 */
369		if (!bli->notif) {
370			creq->user_cb(dev, req, -EINPROGRESS);
371			bli->notif = true;
372		}
373
 
 
 
374		spin_lock(&mgr->hw_lock);
375
376		rc = cc_queues_status(drvdata, mgr, bli->len);
377		if (rc) {
378			/*
379			 * There is still no room in the FIFO for
380			 * this request. Bail out. We'll return here
381			 * on the next completion irq.
382			 */
383			spin_unlock(&mgr->hw_lock);
384			return;
385		}
386
387		cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len,
388				   false);
 
389		spin_unlock(&mgr->hw_lock);
390
 
 
 
 
 
391		/* Remove ourselves from the backlog list */
392		spin_lock(&mgr->bl_lock);
393		list_del(&bli->list);
394		--mgr->bl_len;
395		kfree(bli);
396	}
397
398	spin_unlock(&mgr->bl_lock);
399}
400
401int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
402		    struct cc_hw_desc *desc, unsigned int len,
403		    struct crypto_async_request *req)
404{
405	int rc;
406	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
 
 
407	struct device *dev = drvdata_to_dev(drvdata);
408	bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
409	gfp_t flags = cc_gfp_flags(req);
410	struct cc_bl_item *bli;
411
412	rc = cc_pm_get(dev);
413	if (rc) {
414		dev_err(dev, "cc_pm_get returned %x\n", rc);
415		return rc;
416	}
417
418	spin_lock_bh(&mgr->hw_lock);
419	rc = cc_queues_status(drvdata, mgr, len);
420
421#ifdef CC_DEBUG_FORCE_BACKLOG
422	if (backlog_ok)
423		rc = -ENOSPC;
424#endif /* CC_DEBUG_FORCE_BACKLOG */
425
426	if (rc == -ENOSPC && backlog_ok) {
427		spin_unlock_bh(&mgr->hw_lock);
428
429		bli = kmalloc(sizeof(*bli), flags);
430		if (!bli) {
431			cc_pm_put_suspend(dev);
432			return -ENOMEM;
433		}
434
435		memcpy(&bli->creq, cc_req, sizeof(*cc_req));
436		memcpy(&bli->desc, desc, len * sizeof(*desc));
437		bli->len = len;
438		bli->notif = false;
439		cc_enqueue_backlog(drvdata, bli);
440		return -EBUSY;
441	}
442
443	if (!rc) {
444		cc_do_send_request(drvdata, cc_req, desc, len, false);
445		rc = -EINPROGRESS;
446	}
447
448	spin_unlock_bh(&mgr->hw_lock);
449	return rc;
450}
451
452int cc_send_sync_request(struct cc_drvdata *drvdata,
453			 struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
454			 unsigned int len)
455{
456	int rc;
457	struct device *dev = drvdata_to_dev(drvdata);
458	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
459
460	init_completion(&cc_req->seq_compl);
461	cc_req->user_cb = request_mgr_complete;
462	cc_req->user_arg = &cc_req->seq_compl;
463
464	rc = cc_pm_get(dev);
465	if (rc) {
466		dev_err(dev, "cc_pm_get returned %x\n", rc);
467		return rc;
468	}
469
470	while (true) {
471		spin_lock_bh(&mgr->hw_lock);
472		rc = cc_queues_status(drvdata, mgr, len + 1);
473
474		if (!rc)
475			break;
476
477		spin_unlock_bh(&mgr->hw_lock);
 
 
 
 
478		wait_for_completion_interruptible(&drvdata->hw_queue_avail);
479		reinit_completion(&drvdata->hw_queue_avail);
480	}
481
482	cc_do_send_request(drvdata, cc_req, desc, len, true);
483	spin_unlock_bh(&mgr->hw_lock);
 
 
 
 
 
 
484	wait_for_completion(&cc_req->seq_compl);
485	return 0;
486}
487
488/**
489 * send_request_init() - Enqueue caller request to crypto hardware during init
490 * process.
491 * Assume this function is not called in the middle of a flow,
492 * since we set QUEUE_LAST_IND flag in the last descriptor.
493 *
494 * @drvdata: Associated device driver context
495 * @desc: The crypto sequence
496 * @len: The crypto sequence length
497 *
498 * Return:
499 * Returns "0" upon success
500 */
501int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
502		      unsigned int len)
503{
504	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
505	unsigned int total_seq_len = len; /*initial sequence length*/
506	int rc = 0;
507
508	/* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
509	 */
510	rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
511	if (rc)
512		return rc;
513
514	set_queue_last_ind(drvdata, &desc[(len - 1)]);
515
516	/*
517	 * We are about to push command to the HW via the command registers
518	 * that may reference host memory. We need to issue a memory barrier
519	 * to make sure there are no outstanding memory writes
520	 */
521	wmb();
522	enqueue_seq(drvdata, desc, len);
523
524	/* Update the free slots in HW queue */
525	req_mgr_h->q_free_slots =
526		cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
527
528	return 0;
529}
530
531void complete_request(struct cc_drvdata *drvdata)
532{
533	struct cc_req_mgr_handle *request_mgr_handle =
534						drvdata->request_mgr_handle;
535
536	complete(&drvdata->hw_queue_avail);
537#ifdef COMP_IN_WQ
538	queue_delayed_work(request_mgr_handle->workq,
539			   &request_mgr_handle->compwork, 0);
540#else
541	tasklet_schedule(&request_mgr_handle->comptask);
542#endif
543}
544
545#ifdef COMP_IN_WQ
546static void comp_work_handler(struct work_struct *work)
547{
548	struct cc_drvdata *drvdata =
549		container_of(work, struct cc_drvdata, compwork.work);
550
551	comp_handler((unsigned long)drvdata);
552}
553#endif
554
555static void proc_completions(struct cc_drvdata *drvdata)
556{
557	struct cc_crypto_req *cc_req;
558	struct device *dev = drvdata_to_dev(drvdata);
559	struct cc_req_mgr_handle *request_mgr_handle =
560						drvdata->request_mgr_handle;
561	unsigned int *tail = &request_mgr_handle->req_queue_tail;
562	unsigned int *head = &request_mgr_handle->req_queue_head;
563	int rc;
564	u32 mask;
565
566	while (request_mgr_handle->axi_completed) {
567		request_mgr_handle->axi_completed--;
568
569		/* Dequeue request */
570		if (*head == *tail) {
571			/* We are supposed to handle a completion but our
572			 * queue is empty. This is not normal. Return and
573			 * hope for the best.
574			 */
575			dev_err(dev, "Request queue is empty head == tail %u\n",
576				*head);
577			break;
578		}
579
580		cc_req = &request_mgr_handle->req_queue[*tail];
581
582		if (cc_req->cpp.is_cpp) {
583
584			dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
585				cc_req->cpp.slot, cc_req->cpp.alg);
586			mask = cc_cpp_int_mask(cc_req->cpp.alg,
587					       cc_req->cpp.slot);
588			rc = (drvdata->irq & mask ? -EPERM : 0);
589			dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
590				drvdata->irq, rc);
591		} else {
592			dev_dbg(dev, "None CPP request completion\n");
593			rc = 0;
594		}
595
596		if (cc_req->user_cb)
597			cc_req->user_cb(dev, cc_req->user_arg, rc);
598		*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
599		dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
600		dev_dbg(dev, "Request completed. axi_completed=%d\n",
601			request_mgr_handle->axi_completed);
602		cc_pm_put_suspend(dev);
603	}
604}
605
606static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
607{
608	return FIELD_GET(AXIM_MON_COMP_VALUE,
609			 cc_ioread(drvdata, drvdata->axim_mon_offset));
610}
611
612/* Deferred service handler, run as interrupt-fired tasklet */
613static void comp_handler(unsigned long devarg)
614{
615	struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
616	struct cc_req_mgr_handle *request_mgr_handle =
617						drvdata->request_mgr_handle;
618	struct device *dev = drvdata_to_dev(drvdata);
619	u32 irq;
620
621	dev_dbg(dev, "Completion handler called!\n");
622	irq = (drvdata->irq & drvdata->comp_mask);
623
624	/* To avoid the interrupt from firing as we unmask it,
625	 * we clear it now
626	 */
627	cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
628
629	/* Avoid race with above clear: Test completion counter once more */
630
631	request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
632
633	dev_dbg(dev, "AXI completion after updated: %d\n",
634		request_mgr_handle->axi_completed);
 
 
 
635
636	while (request_mgr_handle->axi_completed) {
637		do {
638			drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
639			irq = (drvdata->irq & drvdata->comp_mask);
640			proc_completions(drvdata);
641
642			/* At this point (after proc_completions()),
643			 * request_mgr_handle->axi_completed is 0.
644			 */
645			request_mgr_handle->axi_completed +=
 
 
 
646						cc_axi_comp_count(drvdata);
647		} while (request_mgr_handle->axi_completed > 0);
648
649		cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
 
650
651		request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
 
 
652	}
653
654	/* after verifying that there is nothing to do,
655	 * unmask AXI completion interrupt
656	 */
657	cc_iowrite(drvdata, CC_REG(HOST_IMR),
658		   cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
659
660	cc_proc_backlog(drvdata);
661	dev_dbg(dev, "Comp. handler done.\n");
662}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
  3
  4#include <linux/kernel.h>
 
  5#include "cc_driver.h"
  6#include "cc_buffer_mgr.h"
  7#include "cc_request_mgr.h"
  8#include "cc_ivgen.h"
  9#include "cc_pm.h"
 10
 11#define CC_MAX_POLL_ITER	10
 12/* The highest descriptor count in used */
 13#define CC_MAX_DESC_SEQ_LEN	23
 14
 15struct cc_req_mgr_handle {
 16	/* Request manager resources */
 17	unsigned int hw_queue_size; /* HW capability */
 18	unsigned int min_free_hw_slots;
 19	unsigned int max_used_sw_slots;
 20	struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
 21	u32 req_queue_head;
 22	u32 req_queue_tail;
 23	u32 axi_completed;
 24	u32 q_free_slots;
 25	/* This lock protects access to HW register
 26	 * that must be single request at a time
 27	 */
 28	spinlock_t hw_lock;
 29	struct cc_hw_desc compl_desc;
 30	u8 *dummy_comp_buff;
 31	dma_addr_t dummy_comp_buff_dma;
 32
 33	/* backlog queue */
 34	struct list_head backlog;
 35	unsigned int bl_len;
 36	spinlock_t bl_lock; /* protect backlog queue */
 37
 38#ifdef COMP_IN_WQ
 39	struct workqueue_struct *workq;
 40	struct delayed_work compwork;
 41#else
 42	struct tasklet_struct comptask;
 43#endif
 44	bool is_runtime_suspended;
 45};
 46
 47struct cc_bl_item {
 48	struct cc_crypto_req creq;
 49	struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
 50	unsigned int len;
 51	struct list_head list;
 52	bool notif;
 53};
 54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 55static void comp_handler(unsigned long devarg);
 56#ifdef COMP_IN_WQ
 57static void comp_work_handler(struct work_struct *work);
 58#endif
 59
 
 
 
 
 
 
 
 
 60void cc_req_mgr_fini(struct cc_drvdata *drvdata)
 61{
 62	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
 63	struct device *dev = drvdata_to_dev(drvdata);
 64
 65	if (!req_mgr_h)
 66		return; /* Not allocated */
 67
 68	if (req_mgr_h->dummy_comp_buff_dma) {
 69		dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
 70				  req_mgr_h->dummy_comp_buff_dma);
 71	}
 72
 73	dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
 74						req_mgr_h->min_free_hw_slots));
 75	dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
 76
 77#ifdef COMP_IN_WQ
 78	flush_workqueue(req_mgr_h->workq);
 79	destroy_workqueue(req_mgr_h->workq);
 80#else
 81	/* Kill tasklet */
 82	tasklet_kill(&req_mgr_h->comptask);
 83#endif
 84	kzfree(req_mgr_h);
 85	drvdata->request_mgr_handle = NULL;
 86}
 87
 88int cc_req_mgr_init(struct cc_drvdata *drvdata)
 89{
 90	struct cc_req_mgr_handle *req_mgr_h;
 91	struct device *dev = drvdata_to_dev(drvdata);
 92	int rc = 0;
 93
 94	req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
 95	if (!req_mgr_h) {
 96		rc = -ENOMEM;
 97		goto req_mgr_init_err;
 98	}
 99
100	drvdata->request_mgr_handle = req_mgr_h;
101
102	spin_lock_init(&req_mgr_h->hw_lock);
103	spin_lock_init(&req_mgr_h->bl_lock);
104	INIT_LIST_HEAD(&req_mgr_h->backlog);
105
106#ifdef COMP_IN_WQ
107	dev_dbg(dev, "Initializing completion workqueue\n");
108	req_mgr_h->workq = create_singlethread_workqueue("ccree");
109	if (!req_mgr_h->workq) {
110		dev_err(dev, "Failed creating work queue\n");
111		rc = -ENOMEM;
112		goto req_mgr_init_err;
113	}
114	INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
115#else
116	dev_dbg(dev, "Initializing completion tasklet\n");
117	tasklet_init(&req_mgr_h->comptask, comp_handler,
118		     (unsigned long)drvdata);
119#endif
120	req_mgr_h->hw_queue_size = cc_ioread(drvdata,
121					     CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
122	dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
123	if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
124		dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
125			req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
126		rc = -ENOMEM;
127		goto req_mgr_init_err;
128	}
129	req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
130	req_mgr_h->max_used_sw_slots = 0;
131
132	/* Allocate DMA word for "dummy" completion descriptor use */
133	req_mgr_h->dummy_comp_buff =
134		dma_alloc_coherent(dev, sizeof(u32),
135				   &req_mgr_h->dummy_comp_buff_dma,
136				   GFP_KERNEL);
137	if (!req_mgr_h->dummy_comp_buff) {
138		dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
139			sizeof(u32));
140		rc = -ENOMEM;
141		goto req_mgr_init_err;
142	}
143
144	/* Init. "dummy" completion descriptor */
145	hw_desc_init(&req_mgr_h->compl_desc);
146	set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
147	set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
148		      sizeof(u32), NS_BIT, 1);
149	set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
150	set_queue_last_ind(drvdata, &req_mgr_h->compl_desc);
151
152	return 0;
153
154req_mgr_init_err:
155	cc_req_mgr_fini(drvdata);
156	return rc;
157}
158
159static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
160			unsigned int seq_len)
161{
162	int i, w;
163	void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
164	struct device *dev = drvdata_to_dev(drvdata);
165
166	/*
167	 * We do indeed write all 6 command words to the same
168	 * register. The HW supports this.
169	 */
170
171	for (i = 0; i < seq_len; i++) {
172		for (w = 0; w <= 5; w++)
173			writel_relaxed(seq[i].word[w], reg);
174
175		if (cc_dump_desc)
176			dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
177				i, seq[i].word[0], seq[i].word[1],
178				seq[i].word[2], seq[i].word[3],
179				seq[i].word[4], seq[i].word[5]);
180	}
181}
182
183/*!
184 * Completion will take place if and only if user requested completion
185 * by cc_send_sync_request().
186 *
187 * \param dev
188 * \param dx_compl_h The completion event to signal
 
189 */
190static void request_mgr_complete(struct device *dev, void *dx_compl_h,
191				 int dummy)
192{
193	struct completion *this_compl = dx_compl_h;
194
195	complete(this_compl);
196}
197
198static int cc_queues_status(struct cc_drvdata *drvdata,
199			    struct cc_req_mgr_handle *req_mgr_h,
200			    unsigned int total_seq_len)
201{
202	unsigned long poll_queue;
203	struct device *dev = drvdata_to_dev(drvdata);
204
205	/* SW queue is checked only once as it will not
206	 * be chaned during the poll because the spinlock_bh
207	 * is held by the thread
208	 */
209	if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
210	    req_mgr_h->req_queue_tail) {
211		dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
212			req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
213		return -ENOSPC;
214	}
215
216	if (req_mgr_h->q_free_slots >= total_seq_len)
217		return 0;
218
219	/* Wait for space in HW queue. Poll constant num of iterations. */
220	for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
221		req_mgr_h->q_free_slots =
222			cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
223		if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
224			req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
225
226		if (req_mgr_h->q_free_slots >= total_seq_len) {
227			/* If there is enough place return */
228			return 0;
229		}
230
231		dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
232			req_mgr_h->q_free_slots, total_seq_len);
233	}
234	/* No room in the HW queue try again later */
235	dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
236		req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
237		req_mgr_h->q_free_slots, total_seq_len);
238	return -ENOSPC;
239}
240
241/*!
242 * Enqueue caller request to crypto hardware.
243 * Need to be called with HW lock held and PM running
244 *
245 * \param drvdata
246 * \param cc_req The request to enqueue
247 * \param desc The crypto sequence
248 * \param len The crypto sequence length
249 * \param add_comp If "true": add an artificial dout DMA to mark completion
250 *
251 * \return int Returns -EINPROGRESS or error code
252 */
253static int cc_do_send_request(struct cc_drvdata *drvdata,
254			      struct cc_crypto_req *cc_req,
255			      struct cc_hw_desc *desc, unsigned int len,
256				bool add_comp, bool ivgen)
257{
258	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
259	unsigned int used_sw_slots;
260	unsigned int iv_seq_len = 0;
261	unsigned int total_seq_len = len; /*initial sequence length*/
262	struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
263	struct device *dev = drvdata_to_dev(drvdata);
264	int rc;
265
266	if (ivgen) {
267		dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
268			cc_req->ivgen_dma_addr_len,
269			&cc_req->ivgen_dma_addr[0],
270			&cc_req->ivgen_dma_addr[1],
271			&cc_req->ivgen_dma_addr[2],
272			cc_req->ivgen_size);
273
274		/* Acquire IV from pool */
275		rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr,
276			       cc_req->ivgen_dma_addr_len,
277			       cc_req->ivgen_size, iv_seq, &iv_seq_len);
278
279		if (rc) {
280			dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
281			return rc;
282		}
283
284		total_seq_len += iv_seq_len;
285	}
286
287	used_sw_slots = ((req_mgr_h->req_queue_head -
288			  req_mgr_h->req_queue_tail) &
289			 (MAX_REQUEST_QUEUE_SIZE - 1));
290	if (used_sw_slots > req_mgr_h->max_used_sw_slots)
291		req_mgr_h->max_used_sw_slots = used_sw_slots;
292
293	/* Enqueue request - must be locked with HW lock*/
294	req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
295	req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
296				    (MAX_REQUEST_QUEUE_SIZE - 1);
297	/* TODO: Use circ_buf.h ? */
298
299	dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
300
301	/*
302	 * We are about to push command to the HW via the command registers
303	 * that may refernece hsot memory. We need to issue a memory barrier
304	 * to make sure there are no outstnading memory writes
305	 */
306	wmb();
307
308	/* STAT_PHASE_4: Push sequence */
309	if (ivgen)
310		enqueue_seq(drvdata, iv_seq, iv_seq_len);
311
312	enqueue_seq(drvdata, desc, len);
313
314	if (add_comp) {
315		enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
316		total_seq_len++;
317	}
318
319	if (req_mgr_h->q_free_slots < total_seq_len) {
320		/* This situation should never occur. Maybe indicating problem
321		 * with resuming power. Set the free slot count to 0 and hope
322		 * for the best.
323		 */
324		dev_err(dev, "HW free slot count mismatch.");
325		req_mgr_h->q_free_slots = 0;
326	} else {
327		/* Update the free slots in HW queue */
328		req_mgr_h->q_free_slots -= total_seq_len;
329	}
330
331	/* Operation still in process */
332	return -EINPROGRESS;
333}
334
335static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
336			       struct cc_bl_item *bli)
337{
338	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
 
339
340	spin_lock_bh(&mgr->bl_lock);
341	list_add_tail(&bli->list, &mgr->backlog);
342	++mgr->bl_len;
 
343	spin_unlock_bh(&mgr->bl_lock);
344	tasklet_schedule(&mgr->comptask);
345}
346
347static void cc_proc_backlog(struct cc_drvdata *drvdata)
348{
349	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
350	struct cc_bl_item *bli;
351	struct cc_crypto_req *creq;
352	struct crypto_async_request *req;
353	bool ivgen;
354	unsigned int total_len;
355	struct device *dev = drvdata_to_dev(drvdata);
356	int rc;
357
358	spin_lock(&mgr->bl_lock);
359
360	while (mgr->bl_len) {
361		bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
 
 
362		spin_unlock(&mgr->bl_lock);
363
 
364		creq = &bli->creq;
365		req = (struct crypto_async_request *)creq->user_arg;
366
367		/*
368		 * Notify the request we're moving out of the backlog
369		 * but only if we haven't done so already.
370		 */
371		if (!bli->notif) {
372			req->complete(req, -EINPROGRESS);
373			bli->notif = true;
374		}
375
376		ivgen = !!creq->ivgen_dma_addr_len;
377		total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
378
379		spin_lock(&mgr->hw_lock);
380
381		rc = cc_queues_status(drvdata, mgr, total_len);
382		if (rc) {
383			/*
384			 * There is still not room in the FIFO for
385			 * this request. Bail out. We'll return here
386			 * on the next completion irq.
387			 */
388			spin_unlock(&mgr->hw_lock);
389			return;
390		}
391
392		rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
393					bli->len, false, ivgen);
394
395		spin_unlock(&mgr->hw_lock);
396
397		if (rc != -EINPROGRESS) {
398			cc_pm_put_suspend(dev);
399			creq->user_cb(dev, req, rc);
400		}
401
402		/* Remove ourselves from the backlog list */
403		spin_lock(&mgr->bl_lock);
404		list_del(&bli->list);
405		--mgr->bl_len;
 
406	}
407
408	spin_unlock(&mgr->bl_lock);
409}
410
411int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
412		    struct cc_hw_desc *desc, unsigned int len,
413		    struct crypto_async_request *req)
414{
415	int rc;
416	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
417	bool ivgen = !!cc_req->ivgen_dma_addr_len;
418	unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
419	struct device *dev = drvdata_to_dev(drvdata);
420	bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
421	gfp_t flags = cc_gfp_flags(req);
422	struct cc_bl_item *bli;
423
424	rc = cc_pm_get(dev);
425	if (rc) {
426		dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
427		return rc;
428	}
429
430	spin_lock_bh(&mgr->hw_lock);
431	rc = cc_queues_status(drvdata, mgr, total_len);
432
433#ifdef CC_DEBUG_FORCE_BACKLOG
434	if (backlog_ok)
435		rc = -ENOSPC;
436#endif /* CC_DEBUG_FORCE_BACKLOG */
437
438	if (rc == -ENOSPC && backlog_ok) {
439		spin_unlock_bh(&mgr->hw_lock);
440
441		bli = kmalloc(sizeof(*bli), flags);
442		if (!bli) {
443			cc_pm_put_suspend(dev);
444			return -ENOMEM;
445		}
446
447		memcpy(&bli->creq, cc_req, sizeof(*cc_req));
448		memcpy(&bli->desc, desc, len * sizeof(*desc));
449		bli->len = len;
450		bli->notif = false;
451		cc_enqueue_backlog(drvdata, bli);
452		return -EBUSY;
453	}
454
455	if (!rc)
456		rc = cc_do_send_request(drvdata, cc_req, desc, len, false,
457					ivgen);
 
458
459	spin_unlock_bh(&mgr->hw_lock);
460	return rc;
461}
462
463int cc_send_sync_request(struct cc_drvdata *drvdata,
464			 struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
465			 unsigned int len)
466{
467	int rc;
468	struct device *dev = drvdata_to_dev(drvdata);
469	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
470
471	init_completion(&cc_req->seq_compl);
472	cc_req->user_cb = request_mgr_complete;
473	cc_req->user_arg = &cc_req->seq_compl;
474
475	rc = cc_pm_get(dev);
476	if (rc) {
477		dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
478		return rc;
479	}
480
481	while (true) {
482		spin_lock_bh(&mgr->hw_lock);
483		rc = cc_queues_status(drvdata, mgr, len + 1);
484
485		if (!rc)
486			break;
487
488		spin_unlock_bh(&mgr->hw_lock);
489		if (rc != -EAGAIN) {
490			cc_pm_put_suspend(dev);
491			return rc;
492		}
493		wait_for_completion_interruptible(&drvdata->hw_queue_avail);
494		reinit_completion(&drvdata->hw_queue_avail);
495	}
496
497	rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false);
498	spin_unlock_bh(&mgr->hw_lock);
499
500	if (rc != -EINPROGRESS) {
501		cc_pm_put_suspend(dev);
502		return rc;
503	}
504
505	wait_for_completion(&cc_req->seq_compl);
506	return 0;
507}
508
509/*!
510 * Enqueue caller request to crypto hardware during init process.
511 * assume this function is not called in middle of a flow,
 
512 * since we set QUEUE_LAST_IND flag in the last descriptor.
513 *
514 * \param drvdata
515 * \param desc The crypto sequence
516 * \param len The crypto sequence length
517 *
518 * \return int Returns "0" upon success
 
519 */
520int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
521		      unsigned int len)
522{
523	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
524	unsigned int total_seq_len = len; /*initial sequence length*/
525	int rc = 0;
526
527	/* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
528	 */
529	rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
530	if (rc)
531		return rc;
532
533	set_queue_last_ind(drvdata, &desc[(len - 1)]);
534
535	/*
536	 * We are about to push command to the HW via the command registers
537	 * that may refernece hsot memory. We need to issue a memory barrier
538	 * to make sure there are no outstnading memory writes
539	 */
540	wmb();
541	enqueue_seq(drvdata, desc, len);
542
543	/* Update the free slots in HW queue */
544	req_mgr_h->q_free_slots =
545		cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
546
547	return 0;
548}
549
550void complete_request(struct cc_drvdata *drvdata)
551{
552	struct cc_req_mgr_handle *request_mgr_handle =
553						drvdata->request_mgr_handle;
554
555	complete(&drvdata->hw_queue_avail);
556#ifdef COMP_IN_WQ
557	queue_delayed_work(request_mgr_handle->workq,
558			   &request_mgr_handle->compwork, 0);
559#else
560	tasklet_schedule(&request_mgr_handle->comptask);
561#endif
562}
563
564#ifdef COMP_IN_WQ
565static void comp_work_handler(struct work_struct *work)
566{
567	struct cc_drvdata *drvdata =
568		container_of(work, struct cc_drvdata, compwork.work);
569
570	comp_handler((unsigned long)drvdata);
571}
572#endif
573
574static void proc_completions(struct cc_drvdata *drvdata)
575{
576	struct cc_crypto_req *cc_req;
577	struct device *dev = drvdata_to_dev(drvdata);
578	struct cc_req_mgr_handle *request_mgr_handle =
579						drvdata->request_mgr_handle;
580	unsigned int *tail = &request_mgr_handle->req_queue_tail;
581	unsigned int *head = &request_mgr_handle->req_queue_head;
 
 
582
583	while (request_mgr_handle->axi_completed) {
584		request_mgr_handle->axi_completed--;
585
586		/* Dequeue request */
587		if (*head == *tail) {
588			/* We are supposed to handle a completion but our
589			 * queue is empty. This is not normal. Return and
590			 * hope for the best.
591			 */
592			dev_err(dev, "Request queue is empty head == tail %u\n",
593				*head);
594			break;
595		}
596
597		cc_req = &request_mgr_handle->req_queue[*tail];
598
 
 
 
 
 
 
 
 
 
 
 
 
 
 
599		if (cc_req->user_cb)
600			cc_req->user_cb(dev, cc_req->user_arg, 0);
601		*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
602		dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
603		dev_dbg(dev, "Request completed. axi_completed=%d\n",
604			request_mgr_handle->axi_completed);
605		cc_pm_put_suspend(dev);
606	}
607}
608
609static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
610{
611	return FIELD_GET(AXIM_MON_COMP_VALUE,
612			 cc_ioread(drvdata, drvdata->axim_mon_offset));
613}
614
615/* Deferred service handler, run as interrupt-fired tasklet */
616static void comp_handler(unsigned long devarg)
617{
618	struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
619	struct cc_req_mgr_handle *request_mgr_handle =
620						drvdata->request_mgr_handle;
 
 
621
622	u32 irq;
 
 
 
 
 
 
 
 
623
624	irq = (drvdata->irq & CC_COMP_IRQ_MASK);
625
626	if (irq & CC_COMP_IRQ_MASK) {
627		/* To avoid the interrupt from firing as we unmask it,
628		 * we clear it now
629		 */
630		cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
631
632		/* Avoid race with above clear: Test completion counter
633		 * once more
634		 */
635		request_mgr_handle->axi_completed +=
636				cc_axi_comp_count(drvdata);
637
638		while (request_mgr_handle->axi_completed) {
639			do {
640				proc_completions(drvdata);
641				/* At this point (after proc_completions()),
642				 * request_mgr_handle->axi_completed is 0.
643				 */
644				request_mgr_handle->axi_completed =
645						cc_axi_comp_count(drvdata);
646			} while (request_mgr_handle->axi_completed > 0);
647
648			cc_iowrite(drvdata, CC_REG(HOST_ICR),
649				   CC_COMP_IRQ_MASK);
650
651			request_mgr_handle->axi_completed +=
652					cc_axi_comp_count(drvdata);
653		}
654	}
655	/* after verifing that there is nothing to do,
 
656	 * unmask AXI completion interrupt
657	 */
658	cc_iowrite(drvdata, CC_REG(HOST_IMR),
659		   cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
660
661	cc_proc_backlog(drvdata);
 
662}
663
664/*
665 * resume the queue configuration - no need to take the lock as this happens
666 * inside the spin lock protection
667 */
668#if defined(CONFIG_PM)
669int cc_resume_req_queue(struct cc_drvdata *drvdata)
670{
671	struct cc_req_mgr_handle *request_mgr_handle =
672		drvdata->request_mgr_handle;
673
674	spin_lock_bh(&request_mgr_handle->hw_lock);
675	request_mgr_handle->is_runtime_suspended = false;
676	spin_unlock_bh(&request_mgr_handle->hw_lock);
677
678	return 0;
679}
680
681/*
682 * suspend the queue configuration. Since it is used for the runtime suspend
683 * only verify that the queue can be suspended.
684 */
685int cc_suspend_req_queue(struct cc_drvdata *drvdata)
686{
687	struct cc_req_mgr_handle *request_mgr_handle =
688						drvdata->request_mgr_handle;
689
690	/* lock the send_request */
691	spin_lock_bh(&request_mgr_handle->hw_lock);
692	if (request_mgr_handle->req_queue_head !=
693	    request_mgr_handle->req_queue_tail) {
694		spin_unlock_bh(&request_mgr_handle->hw_lock);
695		return -EBUSY;
696	}
697	request_mgr_handle->is_runtime_suspended = true;
698	spin_unlock_bh(&request_mgr_handle->hw_lock);
699
700	return 0;
701}
702
703bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
704{
705	struct cc_req_mgr_handle *request_mgr_handle =
706						drvdata->request_mgr_handle;
707
708	return	request_mgr_handle->is_runtime_suspended;
709}
710
711#endif