Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
  3
  4#include <linux/kernel.h>
  5#include <linux/nospec.h>
  6#include "cc_driver.h"
  7#include "cc_buffer_mgr.h"
  8#include "cc_request_mgr.h"
  9#include "cc_pm.h"
 10
 11#define CC_MAX_POLL_ITER	10
 12/* The highest descriptor count in used */
 13#define CC_MAX_DESC_SEQ_LEN	23
 14
 15struct cc_req_mgr_handle {
 16	/* Request manager resources */
 17	unsigned int hw_queue_size; /* HW capability */
 18	unsigned int min_free_hw_slots;
 19	unsigned int max_used_sw_slots;
 20	struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
 21	u32 req_queue_head;
 22	u32 req_queue_tail;
 23	u32 axi_completed;
 24	u32 q_free_slots;
 25	/* This lock protects access to HW register
 26	 * that must be single request at a time
 27	 */
 28	spinlock_t hw_lock;
 29	struct cc_hw_desc compl_desc;
 30	u8 *dummy_comp_buff;
 31	dma_addr_t dummy_comp_buff_dma;
 32
 33	/* backlog queue */
 34	struct list_head backlog;
 35	unsigned int bl_len;
 36	spinlock_t bl_lock; /* protect backlog queue */
 37
 38#ifdef COMP_IN_WQ
 39	struct workqueue_struct *workq;
 40	struct delayed_work compwork;
 41#else
 42	struct tasklet_struct comptask;
 43#endif
 
 44};
 45
 46struct cc_bl_item {
 47	struct cc_crypto_req creq;
 48	struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
 49	unsigned int len;
 50	struct list_head list;
 51	bool notif;
 52};
 53
 54static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
 55	{ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
 56	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
 57	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
 58	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
 59	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
 60	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
 61	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
 62	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
 63	{ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
 64	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
 65	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
 66	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
 67	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
 68	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
 69	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
 70	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
 71};
 72
 73static void comp_handler(unsigned long devarg);
 74#ifdef COMP_IN_WQ
 75static void comp_work_handler(struct work_struct *work);
 76#endif
 77
 78static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
 79{
 80	alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
 81	slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
 82
 83	return cc_cpp_int_masks[alg][slot];
 84}
 85
 86void cc_req_mgr_fini(struct cc_drvdata *drvdata)
 87{
 88	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
 89	struct device *dev = drvdata_to_dev(drvdata);
 90
 91	if (!req_mgr_h)
 92		return; /* Not allocated */
 93
 94	if (req_mgr_h->dummy_comp_buff_dma) {
 95		dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
 96				  req_mgr_h->dummy_comp_buff_dma);
 97	}
 98
 99	dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
100						req_mgr_h->min_free_hw_slots));
101	dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
102
103#ifdef COMP_IN_WQ
 
104	destroy_workqueue(req_mgr_h->workq);
105#else
106	/* Kill tasklet */
107	tasklet_kill(&req_mgr_h->comptask);
108#endif
109	kfree_sensitive(req_mgr_h);
110	drvdata->request_mgr_handle = NULL;
111}
112
113int cc_req_mgr_init(struct cc_drvdata *drvdata)
114{
115	struct cc_req_mgr_handle *req_mgr_h;
116	struct device *dev = drvdata_to_dev(drvdata);
117	int rc = 0;
118
119	req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
120	if (!req_mgr_h) {
121		rc = -ENOMEM;
122		goto req_mgr_init_err;
123	}
124
125	drvdata->request_mgr_handle = req_mgr_h;
126
127	spin_lock_init(&req_mgr_h->hw_lock);
128	spin_lock_init(&req_mgr_h->bl_lock);
129	INIT_LIST_HEAD(&req_mgr_h->backlog);
130
131#ifdef COMP_IN_WQ
132	dev_dbg(dev, "Initializing completion workqueue\n");
133	req_mgr_h->workq = create_singlethread_workqueue("ccree");
134	if (!req_mgr_h->workq) {
135		dev_err(dev, "Failed creating work queue\n");
136		rc = -ENOMEM;
137		goto req_mgr_init_err;
138	}
139	INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
140#else
141	dev_dbg(dev, "Initializing completion tasklet\n");
142	tasklet_init(&req_mgr_h->comptask, comp_handler,
143		     (unsigned long)drvdata);
144#endif
145	req_mgr_h->hw_queue_size = cc_ioread(drvdata,
146					     CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
147	dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
148	if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
149		dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
150			req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
151		rc = -ENOMEM;
152		goto req_mgr_init_err;
153	}
154	req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
155	req_mgr_h->max_used_sw_slots = 0;
156
157	/* Allocate DMA word for "dummy" completion descriptor use */
158	req_mgr_h->dummy_comp_buff =
159		dma_alloc_coherent(dev, sizeof(u32),
160				   &req_mgr_h->dummy_comp_buff_dma,
161				   GFP_KERNEL);
162	if (!req_mgr_h->dummy_comp_buff) {
163		dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
164			sizeof(u32));
165		rc = -ENOMEM;
166		goto req_mgr_init_err;
167	}
168
169	/* Init. "dummy" completion descriptor */
170	hw_desc_init(&req_mgr_h->compl_desc);
171	set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
172	set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
173		      sizeof(u32), NS_BIT, 1);
174	set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
175	set_queue_last_ind(drvdata, &req_mgr_h->compl_desc);
176
177	return 0;
178
179req_mgr_init_err:
180	cc_req_mgr_fini(drvdata);
181	return rc;
182}
183
184static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
185			unsigned int seq_len)
186{
187	int i, w;
188	void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
189	struct device *dev = drvdata_to_dev(drvdata);
190
191	/*
192	 * We do indeed write all 6 command words to the same
193	 * register. The HW supports this.
194	 */
195
196	for (i = 0; i < seq_len; i++) {
197		for (w = 0; w <= 5; w++)
198			writel_relaxed(seq[i].word[w], reg);
199
200		if (cc_dump_desc)
201			dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
202				i, seq[i].word[0], seq[i].word[1],
203				seq[i].word[2], seq[i].word[3],
204				seq[i].word[4], seq[i].word[5]);
205	}
206}
207
208/**
209 * request_mgr_complete() - Completion will take place if and only if user
210 * requested completion by cc_send_sync_request().
211 *
212 * @dev: Device pointer
213 * @dx_compl_h: The completion event to signal
214 * @dummy: unused error code
215 */
216static void request_mgr_complete(struct device *dev, void *dx_compl_h,
217				 int dummy)
218{
219	struct completion *this_compl = dx_compl_h;
220
221	complete(this_compl);
222}
223
224static int cc_queues_status(struct cc_drvdata *drvdata,
225			    struct cc_req_mgr_handle *req_mgr_h,
226			    unsigned int total_seq_len)
227{
228	unsigned long poll_queue;
229	struct device *dev = drvdata_to_dev(drvdata);
230
231	/* SW queue is checked only once as it will not
232	 * be changed during the poll because the spinlock_bh
233	 * is held by the thread
234	 */
235	if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
236	    req_mgr_h->req_queue_tail) {
237		dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
238			req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
239		return -ENOSPC;
240	}
241
242	if (req_mgr_h->q_free_slots >= total_seq_len)
243		return 0;
244
245	/* Wait for space in HW queue. Poll constant num of iterations. */
246	for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
247		req_mgr_h->q_free_slots =
248			cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
249		if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
250			req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
251
252		if (req_mgr_h->q_free_slots >= total_seq_len) {
253			/* If there is enough place return */
254			return 0;
255		}
256
257		dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
258			req_mgr_h->q_free_slots, total_seq_len);
259	}
260	/* No room in the HW queue try again later */
261	dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
262		req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
263		req_mgr_h->q_free_slots, total_seq_len);
264	return -ENOSPC;
265}
266
267/**
268 * cc_do_send_request() - Enqueue caller request to crypto hardware.
269 * Need to be called with HW lock held and PM running
270 *
271 * @drvdata: Associated device driver context
272 * @cc_req: The request to enqueue
273 * @desc: The crypto sequence
274 * @len: The crypto sequence length
275 * @add_comp: If "true": add an artificial dout DMA to mark completion
276 *
 
277 */
278static void cc_do_send_request(struct cc_drvdata *drvdata,
279			       struct cc_crypto_req *cc_req,
280			       struct cc_hw_desc *desc, unsigned int len,
281			       bool add_comp)
282{
283	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
284	unsigned int used_sw_slots;
285	unsigned int total_seq_len = len; /*initial sequence length*/
286	struct device *dev = drvdata_to_dev(drvdata);
287
288	used_sw_slots = ((req_mgr_h->req_queue_head -
289			  req_mgr_h->req_queue_tail) &
290			 (MAX_REQUEST_QUEUE_SIZE - 1));
291	if (used_sw_slots > req_mgr_h->max_used_sw_slots)
292		req_mgr_h->max_used_sw_slots = used_sw_slots;
293
294	/* Enqueue request - must be locked with HW lock*/
295	req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
296	req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
297				    (MAX_REQUEST_QUEUE_SIZE - 1);
 
298
299	dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
300
301	/*
302	 * We are about to push command to the HW via the command registers
303	 * that may reference host memory. We need to issue a memory barrier
304	 * to make sure there are no outstanding memory writes
305	 */
306	wmb();
307
308	/* STAT_PHASE_4: Push sequence */
309
310	enqueue_seq(drvdata, desc, len);
311
312	if (add_comp) {
313		enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
314		total_seq_len++;
315	}
316
317	if (req_mgr_h->q_free_slots < total_seq_len) {
318		/* This situation should never occur. Maybe indicating problem
319		 * with resuming power. Set the free slot count to 0 and hope
320		 * for the best.
321		 */
322		dev_err(dev, "HW free slot count mismatch.");
323		req_mgr_h->q_free_slots = 0;
324	} else {
325		/* Update the free slots in HW queue */
326		req_mgr_h->q_free_slots -= total_seq_len;
327	}
 
 
 
328}
329
330static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
331			       struct cc_bl_item *bli)
332{
333	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
334	struct device *dev = drvdata_to_dev(drvdata);
335
336	spin_lock_bh(&mgr->bl_lock);
337	list_add_tail(&bli->list, &mgr->backlog);
338	++mgr->bl_len;
339	dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
340	spin_unlock_bh(&mgr->bl_lock);
341	tasklet_schedule(&mgr->comptask);
342}
343
344static void cc_proc_backlog(struct cc_drvdata *drvdata)
345{
346	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
347	struct cc_bl_item *bli;
348	struct cc_crypto_req *creq;
349	void *req;
350	struct device *dev = drvdata_to_dev(drvdata);
351	int rc;
352
353	spin_lock(&mgr->bl_lock);
354
355	while (mgr->bl_len) {
356		bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
357		dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
358
359		spin_unlock(&mgr->bl_lock);
360
361
362		creq = &bli->creq;
363		req = creq->user_arg;
364
365		/*
366		 * Notify the request we're moving out of the backlog
367		 * but only if we haven't done so already.
368		 */
369		if (!bli->notif) {
370			creq->user_cb(dev, req, -EINPROGRESS);
371			bli->notif = true;
372		}
373
374		spin_lock(&mgr->hw_lock);
375
376		rc = cc_queues_status(drvdata, mgr, bli->len);
377		if (rc) {
378			/*
379			 * There is still no room in the FIFO for
380			 * this request. Bail out. We'll return here
381			 * on the next completion irq.
382			 */
383			spin_unlock(&mgr->hw_lock);
384			return;
385		}
386
387		cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len,
388				   false);
 
389		spin_unlock(&mgr->hw_lock);
390
 
 
 
 
 
391		/* Remove ourselves from the backlog list */
392		spin_lock(&mgr->bl_lock);
393		list_del(&bli->list);
394		--mgr->bl_len;
395		kfree(bli);
396	}
397
398	spin_unlock(&mgr->bl_lock);
399}
400
401int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
402		    struct cc_hw_desc *desc, unsigned int len,
403		    struct crypto_async_request *req)
404{
405	int rc;
406	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
407	struct device *dev = drvdata_to_dev(drvdata);
408	bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
409	gfp_t flags = cc_gfp_flags(req);
410	struct cc_bl_item *bli;
411
412	rc = cc_pm_get(dev);
413	if (rc) {
414		dev_err(dev, "cc_pm_get returned %x\n", rc);
415		return rc;
416	}
417
418	spin_lock_bh(&mgr->hw_lock);
419	rc = cc_queues_status(drvdata, mgr, len);
420
421#ifdef CC_DEBUG_FORCE_BACKLOG
422	if (backlog_ok)
423		rc = -ENOSPC;
424#endif /* CC_DEBUG_FORCE_BACKLOG */
425
426	if (rc == -ENOSPC && backlog_ok) {
427		spin_unlock_bh(&mgr->hw_lock);
428
429		bli = kmalloc(sizeof(*bli), flags);
430		if (!bli) {
431			cc_pm_put_suspend(dev);
432			return -ENOMEM;
433		}
434
435		memcpy(&bli->creq, cc_req, sizeof(*cc_req));
436		memcpy(&bli->desc, desc, len * sizeof(*desc));
437		bli->len = len;
438		bli->notif = false;
439		cc_enqueue_backlog(drvdata, bli);
440		return -EBUSY;
441	}
442
443	if (!rc) {
444		cc_do_send_request(drvdata, cc_req, desc, len, false);
445		rc = -EINPROGRESS;
446	}
447
448	spin_unlock_bh(&mgr->hw_lock);
449	return rc;
450}
451
452int cc_send_sync_request(struct cc_drvdata *drvdata,
453			 struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
454			 unsigned int len)
455{
456	int rc;
457	struct device *dev = drvdata_to_dev(drvdata);
458	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
459
460	init_completion(&cc_req->seq_compl);
461	cc_req->user_cb = request_mgr_complete;
462	cc_req->user_arg = &cc_req->seq_compl;
463
464	rc = cc_pm_get(dev);
465	if (rc) {
466		dev_err(dev, "cc_pm_get returned %x\n", rc);
467		return rc;
468	}
469
470	while (true) {
471		spin_lock_bh(&mgr->hw_lock);
472		rc = cc_queues_status(drvdata, mgr, len + 1);
473
474		if (!rc)
475			break;
476
477		spin_unlock_bh(&mgr->hw_lock);
 
 
 
 
478		wait_for_completion_interruptible(&drvdata->hw_queue_avail);
479		reinit_completion(&drvdata->hw_queue_avail);
480	}
481
482	cc_do_send_request(drvdata, cc_req, desc, len, true);
483	spin_unlock_bh(&mgr->hw_lock);
 
 
 
 
 
 
484	wait_for_completion(&cc_req->seq_compl);
485	return 0;
486}
487
488/**
489 * send_request_init() - Enqueue caller request to crypto hardware during init
490 * process.
491 * Assume this function is not called in the middle of a flow,
492 * since we set QUEUE_LAST_IND flag in the last descriptor.
493 *
494 * @drvdata: Associated device driver context
495 * @desc: The crypto sequence
496 * @len: The crypto sequence length
497 *
498 * Return:
499 * Returns "0" upon success
500 */
501int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
502		      unsigned int len)
503{
504	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
505	unsigned int total_seq_len = len; /*initial sequence length*/
506	int rc = 0;
507
508	/* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
509	 */
510	rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
511	if (rc)
512		return rc;
513
514	set_queue_last_ind(drvdata, &desc[(len - 1)]);
515
516	/*
517	 * We are about to push command to the HW via the command registers
518	 * that may reference host memory. We need to issue a memory barrier
519	 * to make sure there are no outstanding memory writes
520	 */
521	wmb();
522	enqueue_seq(drvdata, desc, len);
523
524	/* Update the free slots in HW queue */
525	req_mgr_h->q_free_slots =
526		cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
527
528	return 0;
529}
530
531void complete_request(struct cc_drvdata *drvdata)
532{
533	struct cc_req_mgr_handle *request_mgr_handle =
534						drvdata->request_mgr_handle;
535
536	complete(&drvdata->hw_queue_avail);
537#ifdef COMP_IN_WQ
538	queue_delayed_work(request_mgr_handle->workq,
539			   &request_mgr_handle->compwork, 0);
540#else
541	tasklet_schedule(&request_mgr_handle->comptask);
542#endif
543}
544
545#ifdef COMP_IN_WQ
546static void comp_work_handler(struct work_struct *work)
547{
548	struct cc_drvdata *drvdata =
549		container_of(work, struct cc_drvdata, compwork.work);
550
551	comp_handler((unsigned long)drvdata);
552}
553#endif
554
555static void proc_completions(struct cc_drvdata *drvdata)
556{
557	struct cc_crypto_req *cc_req;
558	struct device *dev = drvdata_to_dev(drvdata);
559	struct cc_req_mgr_handle *request_mgr_handle =
560						drvdata->request_mgr_handle;
561	unsigned int *tail = &request_mgr_handle->req_queue_tail;
562	unsigned int *head = &request_mgr_handle->req_queue_head;
563	int rc;
564	u32 mask;
565
566	while (request_mgr_handle->axi_completed) {
567		request_mgr_handle->axi_completed--;
568
569		/* Dequeue request */
570		if (*head == *tail) {
571			/* We are supposed to handle a completion but our
572			 * queue is empty. This is not normal. Return and
573			 * hope for the best.
574			 */
575			dev_err(dev, "Request queue is empty head == tail %u\n",
576				*head);
577			break;
578		}
579
580		cc_req = &request_mgr_handle->req_queue[*tail];
581
582		if (cc_req->cpp.is_cpp) {
583
584			dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
585				cc_req->cpp.slot, cc_req->cpp.alg);
586			mask = cc_cpp_int_mask(cc_req->cpp.alg,
587					       cc_req->cpp.slot);
588			rc = (drvdata->irq & mask ? -EPERM : 0);
589			dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
590				drvdata->irq, rc);
591		} else {
592			dev_dbg(dev, "None CPP request completion\n");
593			rc = 0;
594		}
595
596		if (cc_req->user_cb)
597			cc_req->user_cb(dev, cc_req->user_arg, rc);
598		*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
599		dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
600		dev_dbg(dev, "Request completed. axi_completed=%d\n",
601			request_mgr_handle->axi_completed);
602		cc_pm_put_suspend(dev);
603	}
604}
605
606static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
607{
608	return FIELD_GET(AXIM_MON_COMP_VALUE,
609			 cc_ioread(drvdata, drvdata->axim_mon_offset));
610}
611
612/* Deferred service handler, run as interrupt-fired tasklet */
613static void comp_handler(unsigned long devarg)
614{
615	struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
616	struct cc_req_mgr_handle *request_mgr_handle =
617						drvdata->request_mgr_handle;
618	struct device *dev = drvdata_to_dev(drvdata);
619	u32 irq;
620
621	dev_dbg(dev, "Completion handler called!\n");
622	irq = (drvdata->irq & drvdata->comp_mask);
623
624	/* To avoid the interrupt from firing as we unmask it,
625	 * we clear it now
626	 */
627	cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
628
629	/* Avoid race with above clear: Test completion counter once more */
630
631	request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
632
633	dev_dbg(dev, "AXI completion after updated: %d\n",
634		request_mgr_handle->axi_completed);
635
636	while (request_mgr_handle->axi_completed) {
637		do {
638			drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
639			irq = (drvdata->irq & drvdata->comp_mask);
640			proc_completions(drvdata);
641
642			/* At this point (after proc_completions()),
643			 * request_mgr_handle->axi_completed is 0.
644			 */
645			request_mgr_handle->axi_completed +=
646						cc_axi_comp_count(drvdata);
647		} while (request_mgr_handle->axi_completed > 0);
648
649		cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
650
651		request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
652	}
653
654	/* after verifying that there is nothing to do,
655	 * unmask AXI completion interrupt
656	 */
657	cc_iowrite(drvdata, CC_REG(HOST_IMR),
658		   cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
659
660	cc_proc_backlog(drvdata);
661	dev_dbg(dev, "Comp. handler done.\n");
662}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
  3
  4#include <linux/kernel.h>
  5#include <linux/nospec.h>
  6#include "cc_driver.h"
  7#include "cc_buffer_mgr.h"
  8#include "cc_request_mgr.h"
  9#include "cc_pm.h"
 10
 11#define CC_MAX_POLL_ITER	10
 12/* The highest descriptor count in used */
 13#define CC_MAX_DESC_SEQ_LEN	23
 14
 15struct cc_req_mgr_handle {
 16	/* Request manager resources */
 17	unsigned int hw_queue_size; /* HW capability */
 18	unsigned int min_free_hw_slots;
 19	unsigned int max_used_sw_slots;
 20	struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
 21	u32 req_queue_head;
 22	u32 req_queue_tail;
 23	u32 axi_completed;
 24	u32 q_free_slots;
 25	/* This lock protects access to HW register
 26	 * that must be single request at a time
 27	 */
 28	spinlock_t hw_lock;
 29	struct cc_hw_desc compl_desc;
 30	u8 *dummy_comp_buff;
 31	dma_addr_t dummy_comp_buff_dma;
 32
 33	/* backlog queue */
 34	struct list_head backlog;
 35	unsigned int bl_len;
 36	spinlock_t bl_lock; /* protect backlog queue */
 37
 38#ifdef COMP_IN_WQ
 39	struct workqueue_struct *workq;
 40	struct delayed_work compwork;
 41#else
 42	struct tasklet_struct comptask;
 43#endif
 44	bool is_runtime_suspended;
 45};
 46
 47struct cc_bl_item {
 48	struct cc_crypto_req creq;
 49	struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
 50	unsigned int len;
 51	struct list_head list;
 52	bool notif;
 53};
 54
 55static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
 56	{ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
 57	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
 58	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
 59	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
 60	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
 61	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
 62	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
 63	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
 64	{ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
 65	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
 66	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
 67	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
 68	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
 69	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
 70	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
 71	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
 72};
 73
 74static void comp_handler(unsigned long devarg);
 75#ifdef COMP_IN_WQ
 76static void comp_work_handler(struct work_struct *work);
 77#endif
 78
 79static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
 80{
 81	alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
 82	slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
 83
 84	return cc_cpp_int_masks[alg][slot];
 85}
 86
 87void cc_req_mgr_fini(struct cc_drvdata *drvdata)
 88{
 89	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
 90	struct device *dev = drvdata_to_dev(drvdata);
 91
 92	if (!req_mgr_h)
 93		return; /* Not allocated */
 94
 95	if (req_mgr_h->dummy_comp_buff_dma) {
 96		dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
 97				  req_mgr_h->dummy_comp_buff_dma);
 98	}
 99
100	dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
101						req_mgr_h->min_free_hw_slots));
102	dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
103
104#ifdef COMP_IN_WQ
105	flush_workqueue(req_mgr_h->workq);
106	destroy_workqueue(req_mgr_h->workq);
107#else
108	/* Kill tasklet */
109	tasklet_kill(&req_mgr_h->comptask);
110#endif
111	kzfree(req_mgr_h);
112	drvdata->request_mgr_handle = NULL;
113}
114
115int cc_req_mgr_init(struct cc_drvdata *drvdata)
116{
117	struct cc_req_mgr_handle *req_mgr_h;
118	struct device *dev = drvdata_to_dev(drvdata);
119	int rc = 0;
120
121	req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
122	if (!req_mgr_h) {
123		rc = -ENOMEM;
124		goto req_mgr_init_err;
125	}
126
127	drvdata->request_mgr_handle = req_mgr_h;
128
129	spin_lock_init(&req_mgr_h->hw_lock);
130	spin_lock_init(&req_mgr_h->bl_lock);
131	INIT_LIST_HEAD(&req_mgr_h->backlog);
132
133#ifdef COMP_IN_WQ
134	dev_dbg(dev, "Initializing completion workqueue\n");
135	req_mgr_h->workq = create_singlethread_workqueue("ccree");
136	if (!req_mgr_h->workq) {
137		dev_err(dev, "Failed creating work queue\n");
138		rc = -ENOMEM;
139		goto req_mgr_init_err;
140	}
141	INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
142#else
143	dev_dbg(dev, "Initializing completion tasklet\n");
144	tasklet_init(&req_mgr_h->comptask, comp_handler,
145		     (unsigned long)drvdata);
146#endif
147	req_mgr_h->hw_queue_size = cc_ioread(drvdata,
148					     CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
149	dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
150	if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
151		dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
152			req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
153		rc = -ENOMEM;
154		goto req_mgr_init_err;
155	}
156	req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
157	req_mgr_h->max_used_sw_slots = 0;
158
159	/* Allocate DMA word for "dummy" completion descriptor use */
160	req_mgr_h->dummy_comp_buff =
161		dma_alloc_coherent(dev, sizeof(u32),
162				   &req_mgr_h->dummy_comp_buff_dma,
163				   GFP_KERNEL);
164	if (!req_mgr_h->dummy_comp_buff) {
165		dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
166			sizeof(u32));
167		rc = -ENOMEM;
168		goto req_mgr_init_err;
169	}
170
171	/* Init. "dummy" completion descriptor */
172	hw_desc_init(&req_mgr_h->compl_desc);
173	set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
174	set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
175		      sizeof(u32), NS_BIT, 1);
176	set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
177	set_queue_last_ind(drvdata, &req_mgr_h->compl_desc);
178
179	return 0;
180
181req_mgr_init_err:
182	cc_req_mgr_fini(drvdata);
183	return rc;
184}
185
186static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
187			unsigned int seq_len)
188{
189	int i, w;
190	void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
191	struct device *dev = drvdata_to_dev(drvdata);
192
193	/*
194	 * We do indeed write all 6 command words to the same
195	 * register. The HW supports this.
196	 */
197
198	for (i = 0; i < seq_len; i++) {
199		for (w = 0; w <= 5; w++)
200			writel_relaxed(seq[i].word[w], reg);
201
202		if (cc_dump_desc)
203			dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
204				i, seq[i].word[0], seq[i].word[1],
205				seq[i].word[2], seq[i].word[3],
206				seq[i].word[4], seq[i].word[5]);
207	}
208}
209
210/*!
211 * Completion will take place if and only if user requested completion
212 * by cc_send_sync_request().
213 *
214 * \param dev
215 * \param dx_compl_h The completion event to signal
 
216 */
217static void request_mgr_complete(struct device *dev, void *dx_compl_h,
218				 int dummy)
219{
220	struct completion *this_compl = dx_compl_h;
221
222	complete(this_compl);
223}
224
225static int cc_queues_status(struct cc_drvdata *drvdata,
226			    struct cc_req_mgr_handle *req_mgr_h,
227			    unsigned int total_seq_len)
228{
229	unsigned long poll_queue;
230	struct device *dev = drvdata_to_dev(drvdata);
231
232	/* SW queue is checked only once as it will not
233	 * be chaned during the poll because the spinlock_bh
234	 * is held by the thread
235	 */
236	if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
237	    req_mgr_h->req_queue_tail) {
238		dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
239			req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
240		return -ENOSPC;
241	}
242
243	if (req_mgr_h->q_free_slots >= total_seq_len)
244		return 0;
245
246	/* Wait for space in HW queue. Poll constant num of iterations. */
247	for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
248		req_mgr_h->q_free_slots =
249			cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
250		if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
251			req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
252
253		if (req_mgr_h->q_free_slots >= total_seq_len) {
254			/* If there is enough place return */
255			return 0;
256		}
257
258		dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
259			req_mgr_h->q_free_slots, total_seq_len);
260	}
261	/* No room in the HW queue try again later */
262	dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
263		req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
264		req_mgr_h->q_free_slots, total_seq_len);
265	return -ENOSPC;
266}
267
268/*!
269 * Enqueue caller request to crypto hardware.
270 * Need to be called with HW lock held and PM running
271 *
272 * \param drvdata
273 * \param cc_req The request to enqueue
274 * \param desc The crypto sequence
275 * \param len The crypto sequence length
276 * \param add_comp If "true": add an artificial dout DMA to mark completion
277 *
278 * \return int Returns -EINPROGRESS or error code
279 */
280static int cc_do_send_request(struct cc_drvdata *drvdata,
281			      struct cc_crypto_req *cc_req,
282			      struct cc_hw_desc *desc, unsigned int len,
283				bool add_comp)
284{
285	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
286	unsigned int used_sw_slots;
287	unsigned int total_seq_len = len; /*initial sequence length*/
288	struct device *dev = drvdata_to_dev(drvdata);
289
290	used_sw_slots = ((req_mgr_h->req_queue_head -
291			  req_mgr_h->req_queue_tail) &
292			 (MAX_REQUEST_QUEUE_SIZE - 1));
293	if (used_sw_slots > req_mgr_h->max_used_sw_slots)
294		req_mgr_h->max_used_sw_slots = used_sw_slots;
295
296	/* Enqueue request - must be locked with HW lock*/
297	req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
298	req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
299				    (MAX_REQUEST_QUEUE_SIZE - 1);
300	/* TODO: Use circ_buf.h ? */
301
302	dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
303
304	/*
305	 * We are about to push command to the HW via the command registers
306	 * that may refernece hsot memory. We need to issue a memory barrier
307	 * to make sure there are no outstnading memory writes
308	 */
309	wmb();
310
311	/* STAT_PHASE_4: Push sequence */
312
313	enqueue_seq(drvdata, desc, len);
314
315	if (add_comp) {
316		enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
317		total_seq_len++;
318	}
319
320	if (req_mgr_h->q_free_slots < total_seq_len) {
321		/* This situation should never occur. Maybe indicating problem
322		 * with resuming power. Set the free slot count to 0 and hope
323		 * for the best.
324		 */
325		dev_err(dev, "HW free slot count mismatch.");
326		req_mgr_h->q_free_slots = 0;
327	} else {
328		/* Update the free slots in HW queue */
329		req_mgr_h->q_free_slots -= total_seq_len;
330	}
331
332	/* Operation still in process */
333	return -EINPROGRESS;
334}
335
336static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
337			       struct cc_bl_item *bli)
338{
339	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
340	struct device *dev = drvdata_to_dev(drvdata);
341
342	spin_lock_bh(&mgr->bl_lock);
343	list_add_tail(&bli->list, &mgr->backlog);
344	++mgr->bl_len;
345	dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
346	spin_unlock_bh(&mgr->bl_lock);
347	tasklet_schedule(&mgr->comptask);
348}
349
350static void cc_proc_backlog(struct cc_drvdata *drvdata)
351{
352	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
353	struct cc_bl_item *bli;
354	struct cc_crypto_req *creq;
355	void *req;
356	struct device *dev = drvdata_to_dev(drvdata);
357	int rc;
358
359	spin_lock(&mgr->bl_lock);
360
361	while (mgr->bl_len) {
362		bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
363		dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
364
365		spin_unlock(&mgr->bl_lock);
366
367
368		creq = &bli->creq;
369		req = creq->user_arg;
370
371		/*
372		 * Notify the request we're moving out of the backlog
373		 * but only if we haven't done so already.
374		 */
375		if (!bli->notif) {
376			creq->user_cb(dev, req, -EINPROGRESS);
377			bli->notif = true;
378		}
379
380		spin_lock(&mgr->hw_lock);
381
382		rc = cc_queues_status(drvdata, mgr, bli->len);
383		if (rc) {
384			/*
385			 * There is still not room in the FIFO for
386			 * this request. Bail out. We'll return here
387			 * on the next completion irq.
388			 */
389			spin_unlock(&mgr->hw_lock);
390			return;
391		}
392
393		rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
394					bli->len, false);
395
396		spin_unlock(&mgr->hw_lock);
397
398		if (rc != -EINPROGRESS) {
399			cc_pm_put_suspend(dev);
400			creq->user_cb(dev, req, rc);
401		}
402
403		/* Remove ourselves from the backlog list */
404		spin_lock(&mgr->bl_lock);
405		list_del(&bli->list);
406		--mgr->bl_len;
 
407	}
408
409	spin_unlock(&mgr->bl_lock);
410}
411
412int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
413		    struct cc_hw_desc *desc, unsigned int len,
414		    struct crypto_async_request *req)
415{
416	int rc;
417	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
418	struct device *dev = drvdata_to_dev(drvdata);
419	bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
420	gfp_t flags = cc_gfp_flags(req);
421	struct cc_bl_item *bli;
422
423	rc = cc_pm_get(dev);
424	if (rc) {
425		dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
426		return rc;
427	}
428
429	spin_lock_bh(&mgr->hw_lock);
430	rc = cc_queues_status(drvdata, mgr, len);
431
432#ifdef CC_DEBUG_FORCE_BACKLOG
433	if (backlog_ok)
434		rc = -ENOSPC;
435#endif /* CC_DEBUG_FORCE_BACKLOG */
436
437	if (rc == -ENOSPC && backlog_ok) {
438		spin_unlock_bh(&mgr->hw_lock);
439
440		bli = kmalloc(sizeof(*bli), flags);
441		if (!bli) {
442			cc_pm_put_suspend(dev);
443			return -ENOMEM;
444		}
445
446		memcpy(&bli->creq, cc_req, sizeof(*cc_req));
447		memcpy(&bli->desc, desc, len * sizeof(*desc));
448		bli->len = len;
449		bli->notif = false;
450		cc_enqueue_backlog(drvdata, bli);
451		return -EBUSY;
452	}
453
454	if (!rc)
455		rc = cc_do_send_request(drvdata, cc_req, desc, len, false);
 
 
456
457	spin_unlock_bh(&mgr->hw_lock);
458	return rc;
459}
460
461int cc_send_sync_request(struct cc_drvdata *drvdata,
462			 struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
463			 unsigned int len)
464{
465	int rc;
466	struct device *dev = drvdata_to_dev(drvdata);
467	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
468
469	init_completion(&cc_req->seq_compl);
470	cc_req->user_cb = request_mgr_complete;
471	cc_req->user_arg = &cc_req->seq_compl;
472
473	rc = cc_pm_get(dev);
474	if (rc) {
475		dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
476		return rc;
477	}
478
479	while (true) {
480		spin_lock_bh(&mgr->hw_lock);
481		rc = cc_queues_status(drvdata, mgr, len + 1);
482
483		if (!rc)
484			break;
485
486		spin_unlock_bh(&mgr->hw_lock);
487		if (rc != -EAGAIN) {
488			cc_pm_put_suspend(dev);
489			return rc;
490		}
491		wait_for_completion_interruptible(&drvdata->hw_queue_avail);
492		reinit_completion(&drvdata->hw_queue_avail);
493	}
494
495	rc = cc_do_send_request(drvdata, cc_req, desc, len, true);
496	spin_unlock_bh(&mgr->hw_lock);
497
498	if (rc != -EINPROGRESS) {
499		cc_pm_put_suspend(dev);
500		return rc;
501	}
502
503	wait_for_completion(&cc_req->seq_compl);
504	return 0;
505}
506
507/*!
508 * Enqueue caller request to crypto hardware during init process.
509 * assume this function is not called in middle of a flow,
 
510 * since we set QUEUE_LAST_IND flag in the last descriptor.
511 *
512 * \param drvdata
513 * \param desc The crypto sequence
514 * \param len The crypto sequence length
515 *
516 * \return int Returns "0" upon success
 
517 */
518int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
519		      unsigned int len)
520{
521	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
522	unsigned int total_seq_len = len; /*initial sequence length*/
523	int rc = 0;
524
525	/* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
526	 */
527	rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
528	if (rc)
529		return rc;
530
531	set_queue_last_ind(drvdata, &desc[(len - 1)]);
532
533	/*
534	 * We are about to push command to the HW via the command registers
535	 * that may refernece hsot memory. We need to issue a memory barrier
536	 * to make sure there are no outstnading memory writes
537	 */
538	wmb();
539	enqueue_seq(drvdata, desc, len);
540
541	/* Update the free slots in HW queue */
542	req_mgr_h->q_free_slots =
543		cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
544
545	return 0;
546}
547
548void complete_request(struct cc_drvdata *drvdata)
549{
550	struct cc_req_mgr_handle *request_mgr_handle =
551						drvdata->request_mgr_handle;
552
553	complete(&drvdata->hw_queue_avail);
554#ifdef COMP_IN_WQ
555	queue_delayed_work(request_mgr_handle->workq,
556			   &request_mgr_handle->compwork, 0);
557#else
558	tasklet_schedule(&request_mgr_handle->comptask);
559#endif
560}
561
562#ifdef COMP_IN_WQ
563static void comp_work_handler(struct work_struct *work)
564{
565	struct cc_drvdata *drvdata =
566		container_of(work, struct cc_drvdata, compwork.work);
567
568	comp_handler((unsigned long)drvdata);
569}
570#endif
571
572static void proc_completions(struct cc_drvdata *drvdata)
573{
574	struct cc_crypto_req *cc_req;
575	struct device *dev = drvdata_to_dev(drvdata);
576	struct cc_req_mgr_handle *request_mgr_handle =
577						drvdata->request_mgr_handle;
578	unsigned int *tail = &request_mgr_handle->req_queue_tail;
579	unsigned int *head = &request_mgr_handle->req_queue_head;
580	int rc;
581	u32 mask;
582
583	while (request_mgr_handle->axi_completed) {
584		request_mgr_handle->axi_completed--;
585
586		/* Dequeue request */
587		if (*head == *tail) {
588			/* We are supposed to handle a completion but our
589			 * queue is empty. This is not normal. Return and
590			 * hope for the best.
591			 */
592			dev_err(dev, "Request queue is empty head == tail %u\n",
593				*head);
594			break;
595		}
596
597		cc_req = &request_mgr_handle->req_queue[*tail];
598
599		if (cc_req->cpp.is_cpp) {
600
601			dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
602				cc_req->cpp.slot, cc_req->cpp.alg);
603			mask = cc_cpp_int_mask(cc_req->cpp.alg,
604					       cc_req->cpp.slot);
605			rc = (drvdata->irq & mask ? -EPERM : 0);
606			dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
607				drvdata->irq, rc);
608		} else {
609			dev_dbg(dev, "None CPP request completion\n");
610			rc = 0;
611		}
612
613		if (cc_req->user_cb)
614			cc_req->user_cb(dev, cc_req->user_arg, rc);
615		*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
616		dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
617		dev_dbg(dev, "Request completed. axi_completed=%d\n",
618			request_mgr_handle->axi_completed);
619		cc_pm_put_suspend(dev);
620	}
621}
622
623static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
624{
625	return FIELD_GET(AXIM_MON_COMP_VALUE,
626			 cc_ioread(drvdata, drvdata->axim_mon_offset));
627}
628
629/* Deferred service handler, run as interrupt-fired tasklet */
630static void comp_handler(unsigned long devarg)
631{
632	struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
633	struct cc_req_mgr_handle *request_mgr_handle =
634						drvdata->request_mgr_handle;
635	struct device *dev = drvdata_to_dev(drvdata);
636	u32 irq;
637
638	dev_dbg(dev, "Completion handler called!\n");
639	irq = (drvdata->irq & drvdata->comp_mask);
640
641	/* To avoid the interrupt from firing as we unmask it,
642	 * we clear it now
643	 */
644	cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
645
646	/* Avoid race with above clear: Test completion counter once more */
647
648	request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
649
650	dev_dbg(dev, "AXI completion after updated: %d\n",
651		request_mgr_handle->axi_completed);
652
653	while (request_mgr_handle->axi_completed) {
654		do {
655			drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
656			irq = (drvdata->irq & drvdata->comp_mask);
657			proc_completions(drvdata);
658
659			/* At this point (after proc_completions()),
660			 * request_mgr_handle->axi_completed is 0.
661			 */
662			request_mgr_handle->axi_completed +=
663						cc_axi_comp_count(drvdata);
664		} while (request_mgr_handle->axi_completed > 0);
665
666		cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
667
668		request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
669	}
670
671	/* after verifing that there is nothing to do,
672	 * unmask AXI completion interrupt
673	 */
674	cc_iowrite(drvdata, CC_REG(HOST_IMR),
675		   cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
676
677	cc_proc_backlog(drvdata);
678	dev_dbg(dev, "Comp. handler done.\n");
679}
680
681/*
682 * resume the queue configuration - no need to take the lock as this happens
683 * inside the spin lock protection
684 */
685#if defined(CONFIG_PM)
686int cc_resume_req_queue(struct cc_drvdata *drvdata)
687{
688	struct cc_req_mgr_handle *request_mgr_handle =
689		drvdata->request_mgr_handle;
690
691	spin_lock_bh(&request_mgr_handle->hw_lock);
692	request_mgr_handle->is_runtime_suspended = false;
693	spin_unlock_bh(&request_mgr_handle->hw_lock);
694
695	return 0;
696}
697
698/*
699 * suspend the queue configuration. Since it is used for the runtime suspend
700 * only verify that the queue can be suspended.
701 */
702int cc_suspend_req_queue(struct cc_drvdata *drvdata)
703{
704	struct cc_req_mgr_handle *request_mgr_handle =
705						drvdata->request_mgr_handle;
706
707	/* lock the send_request */
708	spin_lock_bh(&request_mgr_handle->hw_lock);
709	if (request_mgr_handle->req_queue_head !=
710	    request_mgr_handle->req_queue_tail) {
711		spin_unlock_bh(&request_mgr_handle->hw_lock);
712		return -EBUSY;
713	}
714	request_mgr_handle->is_runtime_suspended = true;
715	spin_unlock_bh(&request_mgr_handle->hw_lock);
716
717	return 0;
718}
719
720bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
721{
722	struct cc_req_mgr_handle *request_mgr_handle =
723						drvdata->request_mgr_handle;
724
725	return	request_mgr_handle->is_runtime_suspended;
726}
727
728#endif