Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * CAAM/SEC 4.x QI transport/backend driver
  4 * Queue Interface backend functionality
  5 *
  6 * Copyright 2013-2016 Freescale Semiconductor, Inc.
  7 * Copyright 2016-2017, 2019-2020 NXP
  8 */
  9
 10#include <linux/cpumask.h>
 
 
 
 11#include <linux/kthread.h>
 
 
 
 
 12#include <soc/fsl/qman.h>
 13
 
 14#include "regs.h"
 15#include "qi.h"
 16#include "desc.h"
 17#include "intern.h"
 18#include "desc_constr.h"
 19
 20#define PREHDR_RSLS_SHIFT	31
 21#define PREHDR_ABS		BIT(25)
 22
 23/*
 24 * Use a reasonable backlog of frames (per CPU) as congestion threshold,
 25 * so that resources used by the in-flight buffers do not become a memory hog.
 26 */
 27#define MAX_RSP_FQ_BACKLOG_PER_CPU	256
 28
 29#define CAAM_QI_ENQUEUE_RETRIES	10000
 30
 31#define CAAM_NAPI_WEIGHT	63
 32
 33/*
 34 * caam_napi - struct holding CAAM NAPI-related params
 35 * @irqtask: IRQ task for QI backend
 36 * @p: QMan portal
 37 */
 38struct caam_napi {
 39	struct napi_struct irqtask;
 40	struct qman_portal *p;
 41};
 42
 43/*
 44 * caam_qi_pcpu_priv - percpu private data structure to main list of pending
 45 *                     responses expected on each cpu.
 46 * @caam_napi: CAAM NAPI params
 47 * @net_dev: netdev used by NAPI
 48 * @rsp_fq: response FQ from CAAM
 49 */
 50struct caam_qi_pcpu_priv {
 51	struct caam_napi caam_napi;
 52	struct net_device net_dev;
 53	struct qman_fq *rsp_fq;
 54} ____cacheline_aligned;
 55
 56static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
 57static DEFINE_PER_CPU(int, last_cpu);
 58
 59/*
 60 * caam_qi_priv - CAAM QI backend private params
 61 * @cgr: QMan congestion group
 62 */
 63struct caam_qi_priv {
 64	struct qman_cgr cgr;
 65};
 66
 67static struct caam_qi_priv qipriv ____cacheline_aligned;
 68
 69/*
 70 * This is written by only one core - the one that initialized the CGR - and
 71 * read by multiple cores (all the others).
 72 */
 73bool caam_congested __read_mostly;
 74EXPORT_SYMBOL(caam_congested);
 75
 76#ifdef CONFIG_DEBUG_FS
 77/*
 78 * This is a counter for the number of times the congestion group (where all
 79 * the request and response queueus are) reached congestion. Incremented
 80 * each time the congestion callback is called with congested == true.
 81 */
 82static u64 times_congested;
 83#endif
 84
 85/*
 86 * This is a a cache of buffers, from which the users of CAAM QI driver
 87 * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
 88 * doing malloc on the hotpath.
 89 * NOTE: A more elegant solution would be to have some headroom in the frames
 90 *       being processed. This could be added by the dpaa-ethernet driver.
 91 *       This would pose a problem for userspace application processing which
 92 *       cannot know of this limitation. So for now, this will work.
 93 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
 94 */
 95static struct kmem_cache *qi_cache;
 96
 97static void *caam_iova_to_virt(struct iommu_domain *domain,
 98			       dma_addr_t iova_addr)
 99{
100	phys_addr_t phys_addr;
101
102	phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
103
104	return phys_to_virt(phys_addr);
105}
106
107int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
108{
109	struct qm_fd fd;
110	dma_addr_t addr;
111	int ret;
112	int num_retries = 0;
113
114	qm_fd_clear_fd(&fd);
115	qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
116
117	addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
118			      DMA_BIDIRECTIONAL);
119	if (dma_mapping_error(qidev, addr)) {
120		dev_err(qidev, "DMA mapping error for QI enqueue request\n");
121		return -EIO;
122	}
123	qm_fd_addr_set64(&fd, addr);
124
125	do {
126		ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
127		if (likely(!ret)) {
128			refcount_inc(&req->drv_ctx->refcnt);
129			return 0;
130		}
131
132		if (ret != -EBUSY)
133			break;
134		num_retries++;
135	} while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
136
137	dev_err(qidev, "qman_enqueue failed: %d\n", ret);
138
139	return ret;
140}
141EXPORT_SYMBOL(caam_qi_enqueue);
142
143static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
144			   const union qm_mr_entry *msg)
145{
146	const struct qm_fd *fd;
147	struct caam_drv_req *drv_req;
148	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
149	struct caam_drv_private *priv = dev_get_drvdata(qidev);
150
151	fd = &msg->ern.fd;
152
153	drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
154	if (!drv_req) {
155		dev_err(qidev,
156			"Can't find original request for CAAM response\n");
157		return;
158	}
159
160	refcount_dec(&drv_req->drv_ctx->refcnt);
161
162	if (qm_fd_get_format(fd) != qm_fd_compound) {
163		dev_err(qidev, "Non-compound FD from CAAM\n");
164		return;
165	}
166
167	dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
168			 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
169
170	if (fd->status)
171		drv_req->cbk(drv_req, be32_to_cpu(fd->status));
172	else
173		drv_req->cbk(drv_req, JRSTA_SSRC_QI);
174}
175
176static struct qman_fq *create_caam_req_fq(struct device *qidev,
177					  struct qman_fq *rsp_fq,
178					  dma_addr_t hwdesc,
179					  int fq_sched_flag)
180{
181	int ret;
182	struct qman_fq *req_fq;
183	struct qm_mcc_initfq opts;
184
185	req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
186	if (!req_fq)
187		return ERR_PTR(-ENOMEM);
188
189	req_fq->cb.ern = caam_fq_ern_cb;
190	req_fq->cb.fqs = NULL;
191
192	ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
193				QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
194	if (ret) {
195		dev_err(qidev, "Failed to create session req FQ\n");
196		goto create_req_fq_fail;
197	}
198
199	memset(&opts, 0, sizeof(opts));
200	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
201				   QM_INITFQ_WE_CONTEXTB |
202				   QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
203	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
204	qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
205	opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
206	qm_fqd_context_a_set64(&opts.fqd, hwdesc);
207	opts.fqd.cgid = qipriv.cgr.cgrid;
208
209	ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
210	if (ret) {
211		dev_err(qidev, "Failed to init session req FQ\n");
212		goto init_req_fq_fail;
213	}
214
215	dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
216		smp_processor_id());
217	return req_fq;
218
219init_req_fq_fail:
220	qman_destroy_fq(req_fq);
221create_req_fq_fail:
222	kfree(req_fq);
223	return ERR_PTR(ret);
224}
225
226static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
227{
228	int ret;
229
230	ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
231				    QMAN_VOLATILE_FLAG_FINISH,
232				    QM_VDQCR_PRECEDENCE_VDQCR |
233				    QM_VDQCR_NUMFRAMES_TILLEMPTY);
234	if (ret) {
235		dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
236		return ret;
237	}
238
239	do {
240		struct qman_portal *p;
241
242		p = qman_get_affine_portal(smp_processor_id());
243		qman_p_poll_dqrr(p, 16);
244	} while (fq->flags & QMAN_FQ_STATE_NE);
245
246	return 0;
247}
248
249static int kill_fq(struct device *qidev, struct qman_fq *fq)
250{
251	u32 flags;
252	int ret;
253
254	ret = qman_retire_fq(fq, &flags);
255	if (ret < 0) {
256		dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
257		return ret;
258	}
259
260	if (!ret)
261		goto empty_fq;
262
263	/* Async FQ retirement condition */
264	if (ret == 1) {
265		/* Retry till FQ gets in retired state */
266		do {
267			msleep(20);
268		} while (fq->state != qman_fq_state_retired);
269
270		WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
271		WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
272	}
273
274empty_fq:
275	if (fq->flags & QMAN_FQ_STATE_NE) {
276		ret = empty_retired_fq(qidev, fq);
277		if (ret) {
278			dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
279				fq->fqid);
280			return ret;
281		}
282	}
283
284	ret = qman_oos_fq(fq);
285	if (ret)
286		dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
287
288	qman_destroy_fq(fq);
289	kfree(fq);
290
291	return ret;
292}
293
294static int empty_caam_fq(struct qman_fq *fq, struct caam_drv_ctx *drv_ctx)
295{
296	int ret;
297	int retries = 10;
298	struct qm_mcr_queryfq_np np;
299
300	/* Wait till the older CAAM FQ get empty */
301	do {
302		ret = qman_query_fq_np(fq, &np);
303		if (ret)
304			return ret;
305
306		if (!qm_mcr_np_get(&np, frm_cnt))
307			break;
308
309		msleep(20);
310	} while (1);
311
312	/* Wait until pending jobs from this FQ are processed by CAAM */
313	do {
314		if (refcount_read(&drv_ctx->refcnt) == 1)
315			break;
316
317		msleep(20);
318	} while (--retries);
319
320	if (!retries)
321		dev_warn_once(drv_ctx->qidev, "%d frames from FQID %u still pending in CAAM\n",
322			      refcount_read(&drv_ctx->refcnt), fq->fqid);
323
324	return 0;
325}
326
327int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
328{
329	int ret;
330	u32 num_words;
331	struct qman_fq *new_fq, *old_fq;
332	struct device *qidev = drv_ctx->qidev;
333
334	num_words = desc_len(sh_desc);
335	if (num_words > MAX_SDLEN) {
336		dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
337		return -EINVAL;
338	}
339
340	/* Note down older req FQ */
341	old_fq = drv_ctx->req_fq;
342
343	/* Create a new req FQ in parked state */
344	new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
345				    drv_ctx->context_a, 0);
346	if (IS_ERR(new_fq)) {
347		dev_err(qidev, "FQ allocation for shdesc update failed\n");
348		return PTR_ERR(new_fq);
349	}
350
351	/* Hook up new FQ to context so that new requests keep queuing */
352	drv_ctx->req_fq = new_fq;
353
354	/* Empty and remove the older FQ */
355	ret = empty_caam_fq(old_fq, drv_ctx);
356	if (ret) {
357		dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
358
359		/* We can revert to older FQ */
360		drv_ctx->req_fq = old_fq;
361
362		if (kill_fq(qidev, new_fq))
363			dev_warn(qidev, "New CAAM FQ kill failed\n");
364
365		return ret;
366	}
367
368	/*
369	 * Re-initialise pre-header. Set RSLS and SDLEN.
370	 * Update the shared descriptor for driver context.
371	 */
372	drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
373					   num_words);
374	drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
375	memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
376	dma_sync_single_for_device(qidev, drv_ctx->context_a,
377				   sizeof(drv_ctx->sh_desc) +
378				   sizeof(drv_ctx->prehdr),
379				   DMA_BIDIRECTIONAL);
380
381	/* Put the new FQ in scheduled state */
382	ret = qman_schedule_fq(new_fq);
383	if (ret) {
384		dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
385
386		/*
387		 * We can kill new FQ and revert to old FQ.
388		 * Since the desc is already modified, it is success case
389		 */
390
391		drv_ctx->req_fq = old_fq;
392
393		if (kill_fq(qidev, new_fq))
394			dev_warn(qidev, "New CAAM FQ kill failed\n");
395	} else if (kill_fq(qidev, old_fq)) {
396		dev_warn(qidev, "Old CAAM FQ kill failed\n");
397	}
398
399	return 0;
400}
401EXPORT_SYMBOL(caam_drv_ctx_update);
402
403struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
404				       int *cpu,
405				       u32 *sh_desc)
406{
407	size_t size;
408	u32 num_words;
409	dma_addr_t hwdesc;
410	struct caam_drv_ctx *drv_ctx;
411	const cpumask_t *cpus = qman_affine_cpus();
412
413	num_words = desc_len(sh_desc);
414	if (num_words > MAX_SDLEN) {
415		dev_err(qidev, "Invalid descriptor len: %d words\n",
416			num_words);
417		return ERR_PTR(-EINVAL);
418	}
419
420	drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
421	if (!drv_ctx)
422		return ERR_PTR(-ENOMEM);
423
424	/*
425	 * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
426	 * and dma-map them.
427	 */
428	drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
429					   num_words);
430	drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
431	memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
432	size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
433	hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
434				DMA_BIDIRECTIONAL);
435	if (dma_mapping_error(qidev, hwdesc)) {
436		dev_err(qidev, "DMA map error for preheader + shdesc\n");
437		kfree(drv_ctx);
438		return ERR_PTR(-ENOMEM);
439	}
440	drv_ctx->context_a = hwdesc;
441
442	/* If given CPU does not own the portal, choose another one that does */
443	if (!cpumask_test_cpu(*cpu, cpus)) {
444		int *pcpu = &get_cpu_var(last_cpu);
445
446		*pcpu = cpumask_next(*pcpu, cpus);
447		if (*pcpu >= nr_cpu_ids)
448			*pcpu = cpumask_first(cpus);
449		*cpu = *pcpu;
450
451		put_cpu_var(last_cpu);
452	}
453	drv_ctx->cpu = *cpu;
454
455	/* Find response FQ hooked with this CPU */
456	drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
457
458	/* Attach request FQ */
459	drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
460					     QMAN_INITFQ_FLAG_SCHED);
461	if (IS_ERR(drv_ctx->req_fq)) {
462		dev_err(qidev, "create_caam_req_fq failed\n");
463		dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
464		kfree(drv_ctx);
465		return ERR_PTR(-ENOMEM);
466	}
467
468	/* init reference counter used to track references to request FQ */
469	refcount_set(&drv_ctx->refcnt, 1);
470
471	drv_ctx->qidev = qidev;
472	return drv_ctx;
473}
474EXPORT_SYMBOL(caam_drv_ctx_init);
475
476void *qi_cache_alloc(gfp_t flags)
477{
478	return kmem_cache_alloc(qi_cache, flags);
479}
480EXPORT_SYMBOL(qi_cache_alloc);
481
482void qi_cache_free(void *obj)
483{
484	kmem_cache_free(qi_cache, obj);
485}
486EXPORT_SYMBOL(qi_cache_free);
487
488static int caam_qi_poll(struct napi_struct *napi, int budget)
489{
490	struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
491
492	int cleaned = qman_p_poll_dqrr(np->p, budget);
493
494	if (cleaned < budget) {
495		napi_complete(napi);
496		qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
497	}
498
499	return cleaned;
500}
501
502void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
503{
504	if (IS_ERR_OR_NULL(drv_ctx))
505		return;
506
507	/* Remove request FQ */
508	if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
509		dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
510
511	dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
512			 sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
513			 DMA_BIDIRECTIONAL);
514	kfree(drv_ctx);
515}
516EXPORT_SYMBOL(caam_drv_ctx_rel);
517
518static void caam_qi_shutdown(void *data)
519{
520	int i;
521	struct device *qidev = data;
522	struct caam_qi_priv *priv = &qipriv;
523	const cpumask_t *cpus = qman_affine_cpus();
524
525	for_each_cpu(i, cpus) {
526		struct napi_struct *irqtask;
527
528		irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
529		napi_disable(irqtask);
530		netif_napi_del(irqtask);
531
532		if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
533			dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
 
534	}
535
536	qman_delete_cgr_safe(&priv->cgr);
537	qman_release_cgrid(priv->cgr.cgrid);
538
539	kmem_cache_destroy(qi_cache);
540}
541
542static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
543{
544	caam_congested = congested;
545
546	if (congested) {
547#ifdef CONFIG_DEBUG_FS
548		times_congested++;
549#endif
550		pr_debug_ratelimited("CAAM entered congestion\n");
551
552	} else {
553		pr_debug_ratelimited("CAAM exited congestion\n");
554	}
555}
556
557static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
 
558{
559	/*
560	 * In case of threaded ISR, for RT kernels in_irq() does not return
561	 * appropriate value, so use in_serving_softirq to distinguish between
562	 * softirq and irq contexts.
563	 */
564	if (unlikely(in_irq() || !in_serving_softirq())) {
565		/* Disable QMan IRQ source and invoke NAPI */
566		qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
567		np->p = p;
568		napi_schedule(&np->irqtask);
569		return 1;
570	}
571	return 0;
572}
573
574static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
575						    struct qman_fq *rsp_fq,
576						    const struct qm_dqrr_entry *dqrr)
 
577{
578	struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
579	struct caam_drv_req *drv_req;
580	const struct qm_fd *fd;
581	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
582	struct caam_drv_private *priv = dev_get_drvdata(qidev);
583	u32 status;
584
585	if (caam_qi_napi_schedule(p, caam_napi))
586		return qman_cb_dqrr_stop;
587
588	fd = &dqrr->fd;
589
590	drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
591	if (unlikely(!drv_req)) {
592		dev_err(qidev,
593			"Can't find original request for caam response\n");
594		return qman_cb_dqrr_consume;
595	}
596
597	refcount_dec(&drv_req->drv_ctx->refcnt);
598
599	status = be32_to_cpu(fd->status);
600	if (unlikely(status)) {
601		u32 ssrc = status & JRSTA_SSRC_MASK;
602		u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
603
604		if (ssrc != JRSTA_SSRC_CCB_ERROR ||
605		    err_id != JRSTA_CCBERR_ERRID_ICVCHK)
606			dev_err_ratelimited(qidev,
607					    "Error: %#x in CAAM response FD\n",
608					    status);
609	}
610
611	if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
612		dev_err(qidev, "Non-compound FD from CAAM\n");
613		return qman_cb_dqrr_consume;
614	}
615
616	dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
617			 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
618
619	drv_req->cbk(drv_req, status);
620	return qman_cb_dqrr_consume;
621}
622
623static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
624{
625	struct qm_mcc_initfq opts;
626	struct qman_fq *fq;
627	int ret;
628
629	fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
630	if (!fq)
631		return -ENOMEM;
632
633	fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
634
635	ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
636			     QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
637	if (ret) {
638		dev_err(qidev, "Rsp FQ create failed\n");
639		kfree(fq);
640		return -ENODEV;
641	}
642
643	memset(&opts, 0, sizeof(opts));
644	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
645				   QM_INITFQ_WE_CONTEXTB |
646				   QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
647	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
648				       QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
649	qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
650	opts.fqd.cgid = qipriv.cgr.cgrid;
651	opts.fqd.context_a.stashing.exclusive =	QM_STASHING_EXCL_CTX |
652						QM_STASHING_EXCL_DATA;
653	qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
654
655	ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
656	if (ret) {
657		dev_err(qidev, "Rsp FQ init failed\n");
658		kfree(fq);
659		return -ENODEV;
660	}
661
662	per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
663
664	dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
665	return 0;
666}
667
668static int init_cgr(struct device *qidev)
669{
670	int ret;
671	struct qm_mcc_initcgr opts;
672	const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
673			MAX_RSP_FQ_BACKLOG_PER_CPU;
674
675	ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
676	if (ret) {
677		dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
678		return ret;
679	}
680
681	qipriv.cgr.cb = cgr_cb;
682	memset(&opts, 0, sizeof(opts));
683	opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
684				   QM_CGR_WE_MODE);
685	opts.cgr.cscn_en = QM_CGR_EN;
686	opts.cgr.mode = QMAN_CGR_MODE_FRAME;
687	qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
688
689	ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
690	if (ret) {
691		dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
692			qipriv.cgr.cgrid);
693		return ret;
694	}
695
696	dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
697	return 0;
698}
699
700static int alloc_rsp_fqs(struct device *qidev)
701{
702	int ret, i;
703	const cpumask_t *cpus = qman_affine_cpus();
704
705	/*Now create response FQs*/
706	for_each_cpu(i, cpus) {
707		ret = alloc_rsp_fq_cpu(qidev, i);
708		if (ret) {
709			dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
710			return ret;
711		}
712	}
713
714	return 0;
715}
716
717static void free_rsp_fqs(void)
718{
719	int i;
720	const cpumask_t *cpus = qman_affine_cpus();
721
722	for_each_cpu(i, cpus)
723		kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
724}
725
 
 
 
 
 
 
 
 
 
 
 
726int caam_qi_init(struct platform_device *caam_pdev)
727{
728	int err, i;
729	struct device *ctrldev = &caam_pdev->dev, *qidev;
730	struct caam_drv_private *ctrlpriv;
731	const cpumask_t *cpus = qman_affine_cpus();
 
732
733	ctrlpriv = dev_get_drvdata(ctrldev);
734	qidev = ctrldev;
 
 
 
735
736	/* Initialize the congestion detection */
737	err = init_cgr(qidev);
738	if (err) {
739		dev_err(qidev, "CGR initialization failed: %d\n", err);
740		return err;
741	}
742
743	/* Initialise response FQs */
744	err = alloc_rsp_fqs(qidev);
745	if (err) {
746		dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
747		free_rsp_fqs();
748		return err;
749	}
750
751	/*
752	 * Enable the NAPI contexts on each of the core which has an affine
753	 * portal.
754	 */
755	for_each_cpu(i, cpus) {
756		struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
757		struct caam_napi *caam_napi = &priv->caam_napi;
758		struct napi_struct *irqtask = &caam_napi->irqtask;
759		struct net_device *net_dev = &priv->net_dev;
760
 
 
 
 
 
 
 
761		net_dev->dev = *qidev;
762		INIT_LIST_HEAD(&net_dev->napi_list);
763
764		netif_napi_add(net_dev, irqtask, caam_qi_poll,
765			       CAAM_NAPI_WEIGHT);
766
767		napi_enable(irqtask);
768	}
769
770	qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
771				     SLAB_CACHE_DMA, NULL);
772	if (!qi_cache) {
773		dev_err(qidev, "Can't allocate CAAM cache\n");
774		free_rsp_fqs();
775		return -ENOMEM;
776	}
777
778#ifdef CONFIG_DEBUG_FS
779	debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
780			    &times_congested, &caam_fops_u64_ro);
781#endif
782
783	err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
784	if (err)
785		return err;
786
787	dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
788	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
789}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * CAAM/SEC 4.x QI transport/backend driver
  4 * Queue Interface backend functionality
  5 *
  6 * Copyright 2013-2016 Freescale Semiconductor, Inc.
  7 * Copyright 2016-2017, 2019-2020 NXP
  8 */
  9
 10#include <linux/cpumask.h>
 11#include <linux/device.h>
 12#include <linux/dma-mapping.h>
 13#include <linux/kernel.h>
 14#include <linux/kthread.h>
 15#include <linux/netdevice.h>
 16#include <linux/platform_device.h>
 17#include <linux/slab.h>
 18#include <linux/string.h>
 19#include <soc/fsl/qman.h>
 20
 21#include "debugfs.h"
 22#include "regs.h"
 23#include "qi.h"
 24#include "desc.h"
 25#include "intern.h"
 26#include "desc_constr.h"
 27
 28#define PREHDR_RSLS_SHIFT	31
 29#define PREHDR_ABS		BIT(25)
 30
 31/*
 32 * Use a reasonable backlog of frames (per CPU) as congestion threshold,
 33 * so that resources used by the in-flight buffers do not become a memory hog.
 34 */
 35#define MAX_RSP_FQ_BACKLOG_PER_CPU	256
 36
 37#define CAAM_QI_ENQUEUE_RETRIES	10000
 38
 39#define CAAM_NAPI_WEIGHT	63
 40
 41/*
 42 * caam_napi - struct holding CAAM NAPI-related params
 43 * @irqtask: IRQ task for QI backend
 44 * @p: QMan portal
 45 */
 46struct caam_napi {
 47	struct napi_struct irqtask;
 48	struct qman_portal *p;
 49};
 50
 51/*
 52 * caam_qi_pcpu_priv - percpu private data structure to main list of pending
 53 *                     responses expected on each cpu.
 54 * @caam_napi: CAAM NAPI params
 55 * @net_dev: netdev used by NAPI
 56 * @rsp_fq: response FQ from CAAM
 57 */
 58struct caam_qi_pcpu_priv {
 59	struct caam_napi caam_napi;
 60	struct net_device *net_dev;
 61	struct qman_fq *rsp_fq;
 62} ____cacheline_aligned;
 63
 64static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
 65static DEFINE_PER_CPU(int, last_cpu);
 66
 67/*
 68 * caam_qi_priv - CAAM QI backend private params
 69 * @cgr: QMan congestion group
 70 */
 71struct caam_qi_priv {
 72	struct qman_cgr cgr;
 73};
 74
 75static struct caam_qi_priv qipriv ____cacheline_aligned;
 76
 77/*
 78 * This is written by only one core - the one that initialized the CGR - and
 79 * read by multiple cores (all the others).
 80 */
 81bool caam_congested __read_mostly;
 82EXPORT_SYMBOL(caam_congested);
 83
 
 84/*
 85 * This is a cache of buffers, from which the users of CAAM QI driver
 
 
 
 
 
 
 
 
 86 * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
 87 * doing malloc on the hotpath.
 88 * NOTE: A more elegant solution would be to have some headroom in the frames
 89 *       being processed. This could be added by the dpaa-ethernet driver.
 90 *       This would pose a problem for userspace application processing which
 91 *       cannot know of this limitation. So for now, this will work.
 92 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
 93 */
 94static struct kmem_cache *qi_cache;
 95
 96static void *caam_iova_to_virt(struct iommu_domain *domain,
 97			       dma_addr_t iova_addr)
 98{
 99	phys_addr_t phys_addr;
100
101	phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
102
103	return phys_to_virt(phys_addr);
104}
105
106int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
107{
108	struct qm_fd fd;
109	dma_addr_t addr;
110	int ret;
111	int num_retries = 0;
112
113	qm_fd_clear_fd(&fd);
114	qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
115
116	addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
117			      DMA_BIDIRECTIONAL);
118	if (dma_mapping_error(qidev, addr)) {
119		dev_err(qidev, "DMA mapping error for QI enqueue request\n");
120		return -EIO;
121	}
122	qm_fd_addr_set64(&fd, addr);
123
124	do {
125		ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
126		if (likely(!ret)) {
127			refcount_inc(&req->drv_ctx->refcnt);
128			return 0;
129		}
130
131		if (ret != -EBUSY)
132			break;
133		num_retries++;
134	} while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
135
136	dev_err(qidev, "qman_enqueue failed: %d\n", ret);
137
138	return ret;
139}
140EXPORT_SYMBOL(caam_qi_enqueue);
141
142static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
143			   const union qm_mr_entry *msg)
144{
145	const struct qm_fd *fd;
146	struct caam_drv_req *drv_req;
147	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev->dev);
148	struct caam_drv_private *priv = dev_get_drvdata(qidev);
149
150	fd = &msg->ern.fd;
151
152	drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
153	if (!drv_req) {
154		dev_err(qidev,
155			"Can't find original request for CAAM response\n");
156		return;
157	}
158
159	refcount_dec(&drv_req->drv_ctx->refcnt);
160
161	if (qm_fd_get_format(fd) != qm_fd_compound) {
162		dev_err(qidev, "Non-compound FD from CAAM\n");
163		return;
164	}
165
166	dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
167			 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
168
169	if (fd->status)
170		drv_req->cbk(drv_req, be32_to_cpu(fd->status));
171	else
172		drv_req->cbk(drv_req, JRSTA_SSRC_QI);
173}
174
175static struct qman_fq *create_caam_req_fq(struct device *qidev,
176					  struct qman_fq *rsp_fq,
177					  dma_addr_t hwdesc,
178					  int fq_sched_flag)
179{
180	int ret;
181	struct qman_fq *req_fq;
182	struct qm_mcc_initfq opts;
183
184	req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
185	if (!req_fq)
186		return ERR_PTR(-ENOMEM);
187
188	req_fq->cb.ern = caam_fq_ern_cb;
189	req_fq->cb.fqs = NULL;
190
191	ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
192				QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
193	if (ret) {
194		dev_err(qidev, "Failed to create session req FQ\n");
195		goto create_req_fq_fail;
196	}
197
198	memset(&opts, 0, sizeof(opts));
199	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
200				   QM_INITFQ_WE_CONTEXTB |
201				   QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
202	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
203	qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
204	opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
205	qm_fqd_context_a_set64(&opts.fqd, hwdesc);
206	opts.fqd.cgid = qipriv.cgr.cgrid;
207
208	ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
209	if (ret) {
210		dev_err(qidev, "Failed to init session req FQ\n");
211		goto init_req_fq_fail;
212	}
213
214	dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
215		smp_processor_id());
216	return req_fq;
217
218init_req_fq_fail:
219	qman_destroy_fq(req_fq);
220create_req_fq_fail:
221	kfree(req_fq);
222	return ERR_PTR(ret);
223}
224
225static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
226{
227	int ret;
228
229	ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
230				    QMAN_VOLATILE_FLAG_FINISH,
231				    QM_VDQCR_PRECEDENCE_VDQCR |
232				    QM_VDQCR_NUMFRAMES_TILLEMPTY);
233	if (ret) {
234		dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
235		return ret;
236	}
237
238	do {
239		struct qman_portal *p;
240
241		p = qman_get_affine_portal(smp_processor_id());
242		qman_p_poll_dqrr(p, 16);
243	} while (fq->flags & QMAN_FQ_STATE_NE);
244
245	return 0;
246}
247
248static int kill_fq(struct device *qidev, struct qman_fq *fq)
249{
250	u32 flags;
251	int ret;
252
253	ret = qman_retire_fq(fq, &flags);
254	if (ret < 0) {
255		dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
256		return ret;
257	}
258
259	if (!ret)
260		goto empty_fq;
261
262	/* Async FQ retirement condition */
263	if (ret == 1) {
264		/* Retry till FQ gets in retired state */
265		do {
266			msleep(20);
267		} while (fq->state != qman_fq_state_retired);
268
269		WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
270		WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
271	}
272
273empty_fq:
274	if (fq->flags & QMAN_FQ_STATE_NE) {
275		ret = empty_retired_fq(qidev, fq);
276		if (ret) {
277			dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
278				fq->fqid);
279			return ret;
280		}
281	}
282
283	ret = qman_oos_fq(fq);
284	if (ret)
285		dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
286
287	qman_destroy_fq(fq);
288	kfree(fq);
289
290	return ret;
291}
292
293static int empty_caam_fq(struct qman_fq *fq, struct caam_drv_ctx *drv_ctx)
294{
295	int ret;
296	int retries = 10;
297	struct qm_mcr_queryfq_np np;
298
299	/* Wait till the older CAAM FQ get empty */
300	do {
301		ret = qman_query_fq_np(fq, &np);
302		if (ret)
303			return ret;
304
305		if (!qm_mcr_np_get(&np, frm_cnt))
306			break;
307
308		msleep(20);
309	} while (1);
310
311	/* Wait until pending jobs from this FQ are processed by CAAM */
312	do {
313		if (refcount_read(&drv_ctx->refcnt) == 1)
314			break;
315
316		msleep(20);
317	} while (--retries);
318
319	if (!retries)
320		dev_warn_once(drv_ctx->qidev, "%d frames from FQID %u still pending in CAAM\n",
321			      refcount_read(&drv_ctx->refcnt), fq->fqid);
322
323	return 0;
324}
325
326int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
327{
328	int ret;
329	u32 num_words;
330	struct qman_fq *new_fq, *old_fq;
331	struct device *qidev = drv_ctx->qidev;
332
333	num_words = desc_len(sh_desc);
334	if (num_words > MAX_SDLEN) {
335		dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
336		return -EINVAL;
337	}
338
339	/* Note down older req FQ */
340	old_fq = drv_ctx->req_fq;
341
342	/* Create a new req FQ in parked state */
343	new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
344				    drv_ctx->context_a, 0);
345	if (IS_ERR(new_fq)) {
346		dev_err(qidev, "FQ allocation for shdesc update failed\n");
347		return PTR_ERR(new_fq);
348	}
349
350	/* Hook up new FQ to context so that new requests keep queuing */
351	drv_ctx->req_fq = new_fq;
352
353	/* Empty and remove the older FQ */
354	ret = empty_caam_fq(old_fq, drv_ctx);
355	if (ret) {
356		dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
357
358		/* We can revert to older FQ */
359		drv_ctx->req_fq = old_fq;
360
361		if (kill_fq(qidev, new_fq))
362			dev_warn(qidev, "New CAAM FQ kill failed\n");
363
364		return ret;
365	}
366
367	/*
368	 * Re-initialise pre-header. Set RSLS and SDLEN.
369	 * Update the shared descriptor for driver context.
370	 */
371	drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
372					   num_words);
373	drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
374	memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
375	dma_sync_single_for_device(qidev, drv_ctx->context_a,
376				   sizeof(drv_ctx->sh_desc) +
377				   sizeof(drv_ctx->prehdr),
378				   DMA_BIDIRECTIONAL);
379
380	/* Put the new FQ in scheduled state */
381	ret = qman_schedule_fq(new_fq);
382	if (ret) {
383		dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
384
385		/*
386		 * We can kill new FQ and revert to old FQ.
387		 * Since the desc is already modified, it is success case
388		 */
389
390		drv_ctx->req_fq = old_fq;
391
392		if (kill_fq(qidev, new_fq))
393			dev_warn(qidev, "New CAAM FQ kill failed\n");
394	} else if (kill_fq(qidev, old_fq)) {
395		dev_warn(qidev, "Old CAAM FQ kill failed\n");
396	}
397
398	return 0;
399}
400EXPORT_SYMBOL(caam_drv_ctx_update);
401
402struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
403				       int *cpu,
404				       u32 *sh_desc)
405{
406	size_t size;
407	u32 num_words;
408	dma_addr_t hwdesc;
409	struct caam_drv_ctx *drv_ctx;
410	const cpumask_t *cpus = qman_affine_cpus();
411
412	num_words = desc_len(sh_desc);
413	if (num_words > MAX_SDLEN) {
414		dev_err(qidev, "Invalid descriptor len: %d words\n",
415			num_words);
416		return ERR_PTR(-EINVAL);
417	}
418
419	drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
420	if (!drv_ctx)
421		return ERR_PTR(-ENOMEM);
422
423	/*
424	 * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
425	 * and dma-map them.
426	 */
427	drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
428					   num_words);
429	drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
430	memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
431	size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
432	hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
433				DMA_BIDIRECTIONAL);
434	if (dma_mapping_error(qidev, hwdesc)) {
435		dev_err(qidev, "DMA map error for preheader + shdesc\n");
436		kfree(drv_ctx);
437		return ERR_PTR(-ENOMEM);
438	}
439	drv_ctx->context_a = hwdesc;
440
441	/* If given CPU does not own the portal, choose another one that does */
442	if (!cpumask_test_cpu(*cpu, cpus)) {
443		int *pcpu = &get_cpu_var(last_cpu);
444
445		*pcpu = cpumask_next(*pcpu, cpus);
446		if (*pcpu >= nr_cpu_ids)
447			*pcpu = cpumask_first(cpus);
448		*cpu = *pcpu;
449
450		put_cpu_var(last_cpu);
451	}
452	drv_ctx->cpu = *cpu;
453
454	/* Find response FQ hooked with this CPU */
455	drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
456
457	/* Attach request FQ */
458	drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
459					     QMAN_INITFQ_FLAG_SCHED);
460	if (IS_ERR(drv_ctx->req_fq)) {
461		dev_err(qidev, "create_caam_req_fq failed\n");
462		dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
463		kfree(drv_ctx);
464		return ERR_PTR(-ENOMEM);
465	}
466
467	/* init reference counter used to track references to request FQ */
468	refcount_set(&drv_ctx->refcnt, 1);
469
470	drv_ctx->qidev = qidev;
471	return drv_ctx;
472}
473EXPORT_SYMBOL(caam_drv_ctx_init);
474
475void *qi_cache_alloc(gfp_t flags)
476{
477	return kmem_cache_alloc(qi_cache, flags);
478}
479EXPORT_SYMBOL(qi_cache_alloc);
480
481void qi_cache_free(void *obj)
482{
483	kmem_cache_free(qi_cache, obj);
484}
485EXPORT_SYMBOL(qi_cache_free);
486
487static int caam_qi_poll(struct napi_struct *napi, int budget)
488{
489	struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
490
491	int cleaned = qman_p_poll_dqrr(np->p, budget);
492
493	if (cleaned < budget) {
494		napi_complete(napi);
495		qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
496	}
497
498	return cleaned;
499}
500
501void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
502{
503	if (IS_ERR_OR_NULL(drv_ctx))
504		return;
505
506	/* Remove request FQ */
507	if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
508		dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
509
510	dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
511			 sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
512			 DMA_BIDIRECTIONAL);
513	kfree(drv_ctx);
514}
515EXPORT_SYMBOL(caam_drv_ctx_rel);
516
517static void caam_qi_shutdown(void *data)
518{
519	int i;
520	struct device *qidev = data;
521	struct caam_qi_priv *priv = &qipriv;
522	const cpumask_t *cpus = qman_affine_cpus();
523
524	for_each_cpu(i, cpus) {
525		struct napi_struct *irqtask;
526
527		irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
528		napi_disable(irqtask);
529		netif_napi_del(irqtask);
530
531		if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
532			dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
533		free_netdev(per_cpu(pcpu_qipriv.net_dev, i));
534	}
535
536	qman_delete_cgr_safe(&priv->cgr);
537	qman_release_cgrid(priv->cgr.cgrid);
538
539	kmem_cache_destroy(qi_cache);
540}
541
542static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
543{
544	caam_congested = congested;
545
546	if (congested) {
547		caam_debugfs_qi_congested();
548
 
549		pr_debug_ratelimited("CAAM entered congestion\n");
550
551	} else {
552		pr_debug_ratelimited("CAAM exited congestion\n");
553	}
554}
555
556static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np,
557				 bool sched_napi)
558{
559	if (sched_napi) {
 
 
 
 
 
560		/* Disable QMan IRQ source and invoke NAPI */
561		qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
562		np->p = p;
563		napi_schedule(&np->irqtask);
564		return 1;
565	}
566	return 0;
567}
568
569static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
570						    struct qman_fq *rsp_fq,
571						    const struct qm_dqrr_entry *dqrr,
572						    bool sched_napi)
573{
574	struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
575	struct caam_drv_req *drv_req;
576	const struct qm_fd *fd;
577	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev->dev);
578	struct caam_drv_private *priv = dev_get_drvdata(qidev);
579	u32 status;
580
581	if (caam_qi_napi_schedule(p, caam_napi, sched_napi))
582		return qman_cb_dqrr_stop;
583
584	fd = &dqrr->fd;
585
586	drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
587	if (unlikely(!drv_req)) {
588		dev_err(qidev,
589			"Can't find original request for caam response\n");
590		return qman_cb_dqrr_consume;
591	}
592
593	refcount_dec(&drv_req->drv_ctx->refcnt);
594
595	status = be32_to_cpu(fd->status);
596	if (unlikely(status)) {
597		u32 ssrc = status & JRSTA_SSRC_MASK;
598		u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
599
600		if (ssrc != JRSTA_SSRC_CCB_ERROR ||
601		    err_id != JRSTA_CCBERR_ERRID_ICVCHK)
602			dev_err_ratelimited(qidev,
603					    "Error: %#x in CAAM response FD\n",
604					    status);
605	}
606
607	if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
608		dev_err(qidev, "Non-compound FD from CAAM\n");
609		return qman_cb_dqrr_consume;
610	}
611
612	dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
613			 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
614
615	drv_req->cbk(drv_req, status);
616	return qman_cb_dqrr_consume;
617}
618
619static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
620{
621	struct qm_mcc_initfq opts;
622	struct qman_fq *fq;
623	int ret;
624
625	fq = kzalloc(sizeof(*fq), GFP_KERNEL);
626	if (!fq)
627		return -ENOMEM;
628
629	fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
630
631	ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
632			     QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
633	if (ret) {
634		dev_err(qidev, "Rsp FQ create failed\n");
635		kfree(fq);
636		return -ENODEV;
637	}
638
639	memset(&opts, 0, sizeof(opts));
640	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
641				   QM_INITFQ_WE_CONTEXTB |
642				   QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
643	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
644				       QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
645	qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
646	opts.fqd.cgid = qipriv.cgr.cgrid;
647	opts.fqd.context_a.stashing.exclusive =	QM_STASHING_EXCL_CTX |
648						QM_STASHING_EXCL_DATA;
649	qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
650
651	ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
652	if (ret) {
653		dev_err(qidev, "Rsp FQ init failed\n");
654		kfree(fq);
655		return -ENODEV;
656	}
657
658	per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
659
660	dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
661	return 0;
662}
663
664static int init_cgr(struct device *qidev)
665{
666	int ret;
667	struct qm_mcc_initcgr opts;
668	const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
669			MAX_RSP_FQ_BACKLOG_PER_CPU;
670
671	ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
672	if (ret) {
673		dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
674		return ret;
675	}
676
677	qipriv.cgr.cb = cgr_cb;
678	memset(&opts, 0, sizeof(opts));
679	opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
680				   QM_CGR_WE_MODE);
681	opts.cgr.cscn_en = QM_CGR_EN;
682	opts.cgr.mode = QMAN_CGR_MODE_FRAME;
683	qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
684
685	ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
686	if (ret) {
687		dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
688			qipriv.cgr.cgrid);
689		return ret;
690	}
691
692	dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
693	return 0;
694}
695
696static int alloc_rsp_fqs(struct device *qidev)
697{
698	int ret, i;
699	const cpumask_t *cpus = qman_affine_cpus();
700
701	/*Now create response FQs*/
702	for_each_cpu(i, cpus) {
703		ret = alloc_rsp_fq_cpu(qidev, i);
704		if (ret) {
705			dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
706			return ret;
707		}
708	}
709
710	return 0;
711}
712
713static void free_rsp_fqs(void)
714{
715	int i;
716	const cpumask_t *cpus = qman_affine_cpus();
717
718	for_each_cpu(i, cpus)
719		kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
720}
721
722static void free_caam_qi_pcpu_netdev(const cpumask_t *cpus)
723{
724	struct caam_qi_pcpu_priv *priv;
725	int i;
726
727	for_each_cpu(i, cpus) {
728		priv = per_cpu_ptr(&pcpu_qipriv, i);
729		free_netdev(priv->net_dev);
730	}
731}
732
733int caam_qi_init(struct platform_device *caam_pdev)
734{
735	int err, i;
736	struct device *qidev = &caam_pdev->dev;
737	struct caam_drv_private *ctrlpriv;
738	const cpumask_t *cpus = qman_affine_cpus();
739	cpumask_var_t clean_mask;
740
741	err = -ENOMEM;
742	if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL))
743		goto fail_cpumask;
744
745	ctrlpriv = dev_get_drvdata(qidev);
746
747	/* Initialize the congestion detection */
748	err = init_cgr(qidev);
749	if (err) {
750		dev_err(qidev, "CGR initialization failed: %d\n", err);
751		goto fail_cgr;
752	}
753
754	/* Initialise response FQs */
755	err = alloc_rsp_fqs(qidev);
756	if (err) {
757		dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
758		goto fail_fqs;
 
759	}
760
761	/*
762	 * Enable the NAPI contexts on each of the core which has an affine
763	 * portal.
764	 */
765	for_each_cpu(i, cpus) {
766		struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
767		struct caam_napi *caam_napi = &priv->caam_napi;
768		struct napi_struct *irqtask = &caam_napi->irqtask;
769		struct net_device *net_dev;
770
771		net_dev = alloc_netdev_dummy(0);
772		if (!net_dev) {
773			err = -ENOMEM;
774			goto fail;
775		}
776		cpumask_set_cpu(i, clean_mask);
777		priv->net_dev = net_dev;
778		net_dev->dev = *qidev;
 
779
780		netif_napi_add_tx_weight(net_dev, irqtask, caam_qi_poll,
781					 CAAM_NAPI_WEIGHT);
782
783		napi_enable(irqtask);
784	}
785
786	qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE,
787				     dma_get_cache_alignment(), 0, NULL);
788	if (!qi_cache) {
789		dev_err(qidev, "Can't allocate CAAM cache\n");
790		err = -ENOMEM;
791		goto fail;
792	}
793
794	caam_debugfs_qi_init(ctrlpriv);
 
 
 
795
796	err = devm_add_action_or_reset(qidev, caam_qi_shutdown, qidev);
797	if (err)
798		goto fail2;
799
800	dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
801	goto free_cpumask;
802
803fail2:
804	kmem_cache_destroy(qi_cache);
805fail:
806	free_caam_qi_pcpu_netdev(clean_mask);
807fail_fqs:
808	free_rsp_fqs();
809	qman_delete_cgr_safe(&qipriv.cgr);
810	qman_release_cgrid(qipriv.cgr.cgrid);
811fail_cgr:
812free_cpumask:
813	free_cpumask_var(clean_mask);
814fail_cpumask:
815	return err;
816}