Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (C) 2022 MediaTek Inc.
  3 *
  4 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
  5 *	   Sujuan Chen <sujuan.chen@mediatek.com>
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/dma-mapping.h>
 10#include <linux/interrupt.h>
 11#include <linux/mfd/syscon.h>
 12#include <linux/of.h>
 13#include <linux/of_irq.h>
 14#include <linux/bitfield.h>
 15
 16#include "mtk_wed.h"
 17#include "mtk_wed_regs.h"
 18#include "mtk_wed_wo.h"
 19
 20static u32
 21mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
 22{
 23	u32 val;
 24
 25	if (regmap_read(wo->mmio.regs, reg, &val))
 26		val = ~0;
 27
 28	return val;
 29}
 30
 31static void
 32mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
 33{
 34	regmap_write(wo->mmio.regs, reg, val);
 35}
 36
 37static u32
 38mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
 39{
 40	u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
 41
 42	return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
 43}
 44
 45static void
 46mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
 47{
 48	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
 49}
 50
 51static void
 52mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
 53{
 54	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
 55}
 56
 57static void
 58mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
 59{
 60	unsigned long flags;
 61
 62	spin_lock_irqsave(&wo->mmio.lock, flags);
 63	wo->mmio.irq_mask &= ~mask;
 64	wo->mmio.irq_mask |= val;
 65	if (set)
 66		mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
 67	spin_unlock_irqrestore(&wo->mmio.lock, flags);
 68}
 69
 70static void
 71mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
 72{
 73	mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
 74	tasklet_schedule(&wo->mmio.irq_tasklet);
 75}
 76
 77static void
 78mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
 79{
 80	mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
 81}
 82
 83static void
 84mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
 85{
 86	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
 87	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
 88}
 89
 90static void
 91mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
 92		      u32 val)
 93{
 94	wmb();
 95	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
 96}
 97
 98static void *
 99mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
100		   bool flush)
101{
102	int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
103	int index = (q->tail + 1) % q->n_desc;
104	struct mtk_wed_wo_queue_entry *entry;
105	struct mtk_wed_wo_queue_desc *desc;
106	void *buf;
107
108	if (!q->queued)
109		return NULL;
110
111	if (flush)
112		q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
113	else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
114		return NULL;
115
116	q->tail = index;
117	q->queued--;
118
119	desc = &q->desc[index];
120	entry = &q->entry[index];
121	buf = entry->buf;
122	if (len)
123		*len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
124				 le32_to_cpu(READ_ONCE(desc->ctrl)));
125	if (buf)
126		dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
127				 DMA_FROM_DEVICE);
128	entry->buf = NULL;
129
130	return buf;
131}
132
133static int
134mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
135			bool rx)
136{
137	enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
138	int n_buf = 0;
139
140	while (q->queued < q->n_desc) {
141		struct mtk_wed_wo_queue_entry *entry;
142		dma_addr_t addr;
143		void *buf;
144
145		buf = page_frag_alloc(&q->cache, q->buf_size,
146				      GFP_ATOMIC | GFP_DMA32);
147		if (!buf)
148			break;
149
150		addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
151		if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
152			skb_free_frag(buf);
153			break;
154		}
155
156		q->head = (q->head + 1) % q->n_desc;
157		entry = &q->entry[q->head];
158		entry->addr = addr;
159		entry->len = q->buf_size;
160		q->entry[q->head].buf = buf;
161
162		if (rx) {
163			struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
164			u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
165				   FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
166					      entry->len);
167
168			WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
169			WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
170		}
171		q->queued++;
172		n_buf++;
173	}
174
175	return n_buf;
176}
177
178static void
179mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
180{
181	mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
182	mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
183}
184
185static void
186mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
187{
188	for (;;) {
189		struct mtk_wed_mcu_hdr *hdr;
190		struct sk_buff *skb;
191		void *data;
192		u32 len;
193
194		data = mtk_wed_wo_dequeue(wo, q, &len, false);
195		if (!data)
196			break;
197
198		skb = build_skb(data, q->buf_size);
199		if (!skb) {
200			skb_free_frag(data);
201			continue;
202		}
203
204		__skb_put(skb, len);
205		if (mtk_wed_mcu_check_msg(wo, skb)) {
206			dev_kfree_skb(skb);
207			continue;
208		}
209
210		hdr = (struct mtk_wed_mcu_hdr *)skb->data;
211		if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
212			mtk_wed_mcu_rx_event(wo, skb);
213		else
214			mtk_wed_mcu_rx_unsolicited_event(wo, skb);
215	}
216
217	if (mtk_wed_wo_queue_refill(wo, q, true)) {
218		u32 index = (q->head - 1) % q->n_desc;
219
220		mtk_wed_wo_queue_kick(wo, q, index);
221	}
222}
223
224static irqreturn_t
225mtk_wed_wo_irq_handler(int irq, void *data)
226{
227	struct mtk_wed_wo *wo = data;
228
229	mtk_wed_wo_set_isr(wo, 0);
230	tasklet_schedule(&wo->mmio.irq_tasklet);
231
232	return IRQ_HANDLED;
233}
234
235static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
236{
237	struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
238	u32 intr, mask;
239
240	/* disable interrupts */
241	mtk_wed_wo_set_isr(wo, 0);
242
243	intr = mtk_wed_wo_get_isr(wo);
244	intr &= wo->mmio.irq_mask;
245	mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
246	mtk_wed_wo_irq_disable(wo, mask);
247
248	if (intr & MTK_WED_WO_RXCH_INT_MASK) {
249		mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
250		mtk_wed_wo_rx_complete(wo);
251	}
252}
253
254/* mtk wed wo hw queues */
255
256static int
257mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
258		       int n_desc, int buf_size, int index,
259		       struct mtk_wed_wo_queue_regs *regs)
260{
261	q->regs = *regs;
262	q->n_desc = n_desc;
263	q->buf_size = buf_size;
264
265	q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
266				      &q->desc_dma, GFP_KERNEL);
267	if (!q->desc)
268		return -ENOMEM;
269
270	q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
271				GFP_KERNEL);
272	if (!q->entry)
273		return -ENOMEM;
274
275	return 0;
276}
277
278static void
279mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
280{
281	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
282	dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
283			  q->desc_dma);
284}
285
286static void
287mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
288{
289	struct page *page;
290	int i;
291
292	for (i = 0; i < q->n_desc; i++) {
293		struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
294
295		if (!entry->buf)
296			continue;
297
298		dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
299				 DMA_TO_DEVICE);
300		skb_free_frag(entry->buf);
301		entry->buf = NULL;
302	}
303
304	if (!q->cache.va)
305		return;
306
307	page = virt_to_page(q->cache.va);
308	__page_frag_cache_drain(page, q->cache.pagecnt_bias);
309	memset(&q->cache, 0, sizeof(q->cache));
310}
311
312static void
313mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
314{
315	struct page *page;
316
317	for (;;) {
318		void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
319
320		if (!buf)
321			break;
322
323		skb_free_frag(buf);
324	}
325
326	if (!q->cache.va)
327		return;
328
329	page = virt_to_page(q->cache.va);
330	__page_frag_cache_drain(page, q->cache.pagecnt_bias);
331	memset(&q->cache, 0, sizeof(q->cache));
332}
333
334static void
335mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
336{
337	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
338	mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
339	mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
340}
341
342int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
343			    struct sk_buff *skb)
344{
345	struct mtk_wed_wo_queue_entry *entry;
346	struct mtk_wed_wo_queue_desc *desc;
347	int ret = 0, index;
348	u32 ctrl;
349
350	q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
351	index = (q->head + 1) % q->n_desc;
352	if (q->tail == index) {
353		ret = -ENOMEM;
354		goto out;
355	}
356
357	entry = &q->entry[index];
358	if (skb->len > entry->len) {
359		ret = -ENOMEM;
360		goto out;
361	}
362
363	desc = &q->desc[index];
364	q->head = index;
365
366	dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
367				DMA_TO_DEVICE);
368	memcpy(entry->buf, skb->data, skb->len);
369	dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
370				   DMA_TO_DEVICE);
371
372	ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
373	       MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
374	WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
375	WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
376
377	mtk_wed_wo_queue_kick(wo, q, q->head);
378	mtk_wed_wo_kickout(wo);
379out:
380	dev_kfree_skb(skb);
381
382	return ret;
383}
384
385static int
386mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
387{
388	return 0;
389}
390
391static int
392mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
393{
394	struct mtk_wed_wo_queue_regs regs;
395	struct device_node *np;
396	int ret;
397
398	np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
399	if (!np)
400		return -ENODEV;
401
402	wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
403	if (IS_ERR(wo->mmio.regs)) {
404		ret = PTR_ERR(wo->mmio.regs);
405		goto error_put;
406	}
407
408	wo->mmio.irq = irq_of_parse_and_map(np, 0);
409	wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
410	spin_lock_init(&wo->mmio.lock);
411	tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
412
413	ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
414			       mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
415			       KBUILD_MODNAME, wo);
416	if (ret)
417		goto error;
418
419	regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
420	regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
421	regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
422	regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
423
424	ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
425				     MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
426				     &regs);
427	if (ret)
428		goto error;
429
430	mtk_wed_wo_queue_refill(wo, &wo->q_tx, false);
431	mtk_wed_wo_queue_reset(wo, &wo->q_tx);
432
433	regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
434	regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
435	regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
436	regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
437
438	ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
439				     MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
440				     &regs);
441	if (ret)
442		goto error;
443
444	mtk_wed_wo_queue_refill(wo, &wo->q_rx, true);
445	mtk_wed_wo_queue_reset(wo, &wo->q_rx);
446
447	/* rx queue irqmask */
448	mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
449
450	return 0;
451
452error:
453	devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
454error_put:
455	of_node_put(np);
456	return ret;
457}
458
459static void
460mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
461{
462	/* disable interrupts */
463	mtk_wed_wo_set_isr(wo, 0);
464
465	tasklet_disable(&wo->mmio.irq_tasklet);
466
467	disable_irq(wo->mmio.irq);
468	devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
469
470	mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
471	mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
472	mtk_wed_wo_queue_free(wo, &wo->q_tx);
473	mtk_wed_wo_queue_free(wo, &wo->q_rx);
474}
475
476int mtk_wed_wo_init(struct mtk_wed_hw *hw)
477{
478	struct mtk_wed_wo *wo;
479	int ret;
480
481	wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
482	if (!wo)
483		return -ENOMEM;
484
485	hw->wed_wo = wo;
486	wo->hw = hw;
487
488	ret = mtk_wed_wo_hardware_init(wo);
489	if (ret)
490		return ret;
491
492	ret = mtk_wed_mcu_init(wo);
493	if (ret)
494		return ret;
495
496	return mtk_wed_wo_exception_init(wo);
497}
498
499void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
500{
501	struct mtk_wed_wo *wo = hw->wed_wo;
502
503	mtk_wed_wo_hw_deinit(wo);
504}