Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (C) 2022 MediaTek Inc.
  3 *
  4 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
  5 *	   Sujuan Chen <sujuan.chen@mediatek.com>
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/dma-mapping.h>
 
 10#include <linux/interrupt.h>
 
 11#include <linux/mfd/syscon.h>
 12#include <linux/of.h>
 13#include <linux/of_irq.h>
 14#include <linux/bitfield.h>
 15
 16#include "mtk_wed.h"
 17#include "mtk_wed_regs.h"
 18#include "mtk_wed_wo.h"
 19
 20static u32
 21mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
 22{
 23	u32 val;
 24
 25	if (regmap_read(wo->mmio.regs, reg, &val))
 26		val = ~0;
 27
 28	return val;
 29}
 30
 31static void
 32mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
 33{
 34	regmap_write(wo->mmio.regs, reg, val);
 35}
 36
 37static u32
 38mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
 39{
 40	u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
 41
 42	return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
 43}
 44
 45static void
 46mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
 47{
 48	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
 49}
 50
 51static void
 52mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
 53{
 54	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
 55}
 56
 57static void
 58mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
 59{
 60	unsigned long flags;
 61
 62	spin_lock_irqsave(&wo->mmio.lock, flags);
 63	wo->mmio.irq_mask &= ~mask;
 64	wo->mmio.irq_mask |= val;
 65	if (set)
 66		mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
 67	spin_unlock_irqrestore(&wo->mmio.lock, flags);
 68}
 69
 70static void
 71mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
 72{
 73	mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
 74	tasklet_schedule(&wo->mmio.irq_tasklet);
 75}
 76
 77static void
 78mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
 79{
 80	mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
 81}
 82
 83static void
 84mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
 85{
 86	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
 87	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
 88}
 89
 90static void
 91mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
 92		      u32 val)
 93{
 94	wmb();
 95	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
 96}
 97
 98static void *
 99mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
100		   bool flush)
101{
102	int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
103	int index = (q->tail + 1) % q->n_desc;
104	struct mtk_wed_wo_queue_entry *entry;
105	struct mtk_wed_wo_queue_desc *desc;
106	void *buf;
107
108	if (!q->queued)
109		return NULL;
110
111	if (flush)
112		q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
113	else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
114		return NULL;
115
116	q->tail = index;
117	q->queued--;
118
119	desc = &q->desc[index];
120	entry = &q->entry[index];
121	buf = entry->buf;
122	if (len)
123		*len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
124				 le32_to_cpu(READ_ONCE(desc->ctrl)));
125	if (buf)
126		dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
127				 DMA_FROM_DEVICE);
128	entry->buf = NULL;
129
130	return buf;
131}
132
133static int
134mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
135			bool rx)
136{
137	enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
138	int n_buf = 0;
139
 
140	while (q->queued < q->n_desc) {
141		struct mtk_wed_wo_queue_entry *entry;
142		dma_addr_t addr;
143		void *buf;
144
145		buf = page_frag_alloc(&q->cache, q->buf_size,
146				      GFP_ATOMIC | GFP_DMA32);
147		if (!buf)
148			break;
149
150		addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
151		if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
152			skb_free_frag(buf);
153			break;
154		}
155
156		q->head = (q->head + 1) % q->n_desc;
157		entry = &q->entry[q->head];
158		entry->addr = addr;
159		entry->len = q->buf_size;
160		q->entry[q->head].buf = buf;
161
162		if (rx) {
163			struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
164			u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
165				   FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
166					      entry->len);
167
168			WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
169			WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
170		}
171		q->queued++;
172		n_buf++;
173	}
 
174
175	return n_buf;
176}
177
178static void
179mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
180{
181	mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
182	mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
183}
184
185static void
186mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
187{
188	for (;;) {
189		struct mtk_wed_mcu_hdr *hdr;
190		struct sk_buff *skb;
191		void *data;
192		u32 len;
193
194		data = mtk_wed_wo_dequeue(wo, q, &len, false);
195		if (!data)
196			break;
197
198		skb = build_skb(data, q->buf_size);
199		if (!skb) {
200			skb_free_frag(data);
201			continue;
202		}
203
204		__skb_put(skb, len);
205		if (mtk_wed_mcu_check_msg(wo, skb)) {
206			dev_kfree_skb(skb);
207			continue;
208		}
209
210		hdr = (struct mtk_wed_mcu_hdr *)skb->data;
211		if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
212			mtk_wed_mcu_rx_event(wo, skb);
213		else
214			mtk_wed_mcu_rx_unsolicited_event(wo, skb);
215	}
216
217	if (mtk_wed_wo_queue_refill(wo, q, true)) {
218		u32 index = (q->head - 1) % q->n_desc;
219
220		mtk_wed_wo_queue_kick(wo, q, index);
221	}
222}
223
224static irqreturn_t
225mtk_wed_wo_irq_handler(int irq, void *data)
226{
227	struct mtk_wed_wo *wo = data;
228
229	mtk_wed_wo_set_isr(wo, 0);
230	tasklet_schedule(&wo->mmio.irq_tasklet);
231
232	return IRQ_HANDLED;
233}
234
235static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
236{
237	struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
238	u32 intr, mask;
239
240	/* disable interrupts */
241	mtk_wed_wo_set_isr(wo, 0);
242
243	intr = mtk_wed_wo_get_isr(wo);
244	intr &= wo->mmio.irq_mask;
245	mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
246	mtk_wed_wo_irq_disable(wo, mask);
247
248	if (intr & MTK_WED_WO_RXCH_INT_MASK) {
249		mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
250		mtk_wed_wo_rx_complete(wo);
251	}
252}
253
254/* mtk wed wo hw queues */
255
256static int
257mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
258		       int n_desc, int buf_size, int index,
259		       struct mtk_wed_wo_queue_regs *regs)
260{
 
261	q->regs = *regs;
262	q->n_desc = n_desc;
263	q->buf_size = buf_size;
264
265	q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
266				      &q->desc_dma, GFP_KERNEL);
267	if (!q->desc)
268		return -ENOMEM;
269
270	q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
271				GFP_KERNEL);
272	if (!q->entry)
273		return -ENOMEM;
274
275	return 0;
276}
277
278static void
279mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
280{
281	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
282	dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
283			  q->desc_dma);
284}
285
286static void
287mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
288{
289	struct page *page;
290	int i;
291
 
292	for (i = 0; i < q->n_desc; i++) {
293		struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
294
295		if (!entry->buf)
296			continue;
297
298		dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
299				 DMA_TO_DEVICE);
300		skb_free_frag(entry->buf);
301		entry->buf = NULL;
302	}
 
303
304	if (!q->cache.va)
305		return;
306
307	page = virt_to_page(q->cache.va);
308	__page_frag_cache_drain(page, q->cache.pagecnt_bias);
309	memset(&q->cache, 0, sizeof(q->cache));
310}
311
312static void
313mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
314{
315	struct page *page;
316
 
317	for (;;) {
318		void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
319
320		if (!buf)
321			break;
322
323		skb_free_frag(buf);
324	}
 
325
326	if (!q->cache.va)
327		return;
328
329	page = virt_to_page(q->cache.va);
330	__page_frag_cache_drain(page, q->cache.pagecnt_bias);
331	memset(&q->cache, 0, sizeof(q->cache));
332}
333
334static void
335mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
336{
337	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
338	mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
339	mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
340}
341
342int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
343			    struct sk_buff *skb)
344{
345	struct mtk_wed_wo_queue_entry *entry;
346	struct mtk_wed_wo_queue_desc *desc;
347	int ret = 0, index;
348	u32 ctrl;
349
 
 
350	q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
351	index = (q->head + 1) % q->n_desc;
352	if (q->tail == index) {
353		ret = -ENOMEM;
354		goto out;
355	}
356
357	entry = &q->entry[index];
358	if (skb->len > entry->len) {
359		ret = -ENOMEM;
360		goto out;
361	}
362
363	desc = &q->desc[index];
364	q->head = index;
365
366	dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
367				DMA_TO_DEVICE);
368	memcpy(entry->buf, skb->data, skb->len);
369	dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
370				   DMA_TO_DEVICE);
371
372	ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
373	       MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
374	WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
375	WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
376
377	mtk_wed_wo_queue_kick(wo, q, q->head);
378	mtk_wed_wo_kickout(wo);
379out:
 
 
380	dev_kfree_skb(skb);
381
382	return ret;
383}
384
385static int
386mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
387{
388	return 0;
389}
390
391static int
392mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
393{
394	struct mtk_wed_wo_queue_regs regs;
395	struct device_node *np;
396	int ret;
397
398	np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
399	if (!np)
400		return -ENODEV;
401
402	wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
403	if (IS_ERR(wo->mmio.regs)) {
404		ret = PTR_ERR(wo->mmio.regs);
405		goto error_put;
406	}
407
408	wo->mmio.irq = irq_of_parse_and_map(np, 0);
409	wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
410	spin_lock_init(&wo->mmio.lock);
411	tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
412
413	ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
414			       mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
415			       KBUILD_MODNAME, wo);
416	if (ret)
417		goto error;
418
419	regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
420	regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
421	regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
422	regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
423
424	ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
425				     MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
426				     &regs);
427	if (ret)
428		goto error;
429
430	mtk_wed_wo_queue_refill(wo, &wo->q_tx, false);
431	mtk_wed_wo_queue_reset(wo, &wo->q_tx);
432
433	regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
434	regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
435	regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
436	regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
437
438	ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
439				     MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
440				     &regs);
441	if (ret)
442		goto error;
443
444	mtk_wed_wo_queue_refill(wo, &wo->q_rx, true);
445	mtk_wed_wo_queue_reset(wo, &wo->q_rx);
446
447	/* rx queue irqmask */
448	mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
449
450	return 0;
451
452error:
453	devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
454error_put:
455	of_node_put(np);
456	return ret;
457}
458
459static void
460mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
461{
462	/* disable interrupts */
463	mtk_wed_wo_set_isr(wo, 0);
464
465	tasklet_disable(&wo->mmio.irq_tasklet);
466
467	disable_irq(wo->mmio.irq);
468	devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
469
470	mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
471	mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
472	mtk_wed_wo_queue_free(wo, &wo->q_tx);
473	mtk_wed_wo_queue_free(wo, &wo->q_rx);
474}
475
476int mtk_wed_wo_init(struct mtk_wed_hw *hw)
477{
478	struct mtk_wed_wo *wo;
479	int ret;
480
481	wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
482	if (!wo)
483		return -ENOMEM;
484
485	hw->wed_wo = wo;
486	wo->hw = hw;
487
488	ret = mtk_wed_wo_hardware_init(wo);
489	if (ret)
490		return ret;
491
492	ret = mtk_wed_mcu_init(wo);
493	if (ret)
494		return ret;
495
496	return mtk_wed_wo_exception_init(wo);
497}
498
499void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
500{
501	struct mtk_wed_wo *wo = hw->wed_wo;
502
503	mtk_wed_wo_hw_deinit(wo);
504}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (C) 2022 MediaTek Inc.
  3 *
  4 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
  5 *	   Sujuan Chen <sujuan.chen@mediatek.com>
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/dma-mapping.h>
 10#include <linux/of_platform.h>
 11#include <linux/interrupt.h>
 12#include <linux/of_address.h>
 13#include <linux/mfd/syscon.h>
 
 14#include <linux/of_irq.h>
 15#include <linux/bitfield.h>
 16
 17#include "mtk_wed.h"
 18#include "mtk_wed_regs.h"
 19#include "mtk_wed_wo.h"
 20
 21static u32
 22mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
 23{
 24	u32 val;
 25
 26	if (regmap_read(wo->mmio.regs, reg, &val))
 27		val = ~0;
 28
 29	return val;
 30}
 31
 32static void
 33mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
 34{
 35	regmap_write(wo->mmio.regs, reg, val);
 36}
 37
 38static u32
 39mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
 40{
 41	u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
 42
 43	return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
 44}
 45
 46static void
 47mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
 48{
 49	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
 50}
 51
 52static void
 53mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
 54{
 55	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
 56}
 57
 58static void
 59mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
 60{
 61	unsigned long flags;
 62
 63	spin_lock_irqsave(&wo->mmio.lock, flags);
 64	wo->mmio.irq_mask &= ~mask;
 65	wo->mmio.irq_mask |= val;
 66	if (set)
 67		mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
 68	spin_unlock_irqrestore(&wo->mmio.lock, flags);
 69}
 70
 71static void
 72mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
 73{
 74	mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
 75	tasklet_schedule(&wo->mmio.irq_tasklet);
 76}
 77
 78static void
 79mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
 80{
 81	mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
 82}
 83
 84static void
 85mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
 86{
 87	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
 88	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
 89}
 90
 91static void
 92mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
 93		      u32 val)
 94{
 95	wmb();
 96	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
 97}
 98
 99static void *
100mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
101		   bool flush)
102{
103	int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
104	int index = (q->tail + 1) % q->n_desc;
105	struct mtk_wed_wo_queue_entry *entry;
106	struct mtk_wed_wo_queue_desc *desc;
107	void *buf;
108
109	if (!q->queued)
110		return NULL;
111
112	if (flush)
113		q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
114	else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
115		return NULL;
116
117	q->tail = index;
118	q->queued--;
119
120	desc = &q->desc[index];
121	entry = &q->entry[index];
122	buf = entry->buf;
123	if (len)
124		*len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
125				 le32_to_cpu(READ_ONCE(desc->ctrl)));
126	if (buf)
127		dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
128				 DMA_FROM_DEVICE);
129	entry->buf = NULL;
130
131	return buf;
132}
133
134static int
135mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
136			bool rx)
137{
138	enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
139	int n_buf = 0;
140
141	spin_lock_bh(&q->lock);
142	while (q->queued < q->n_desc) {
143		struct mtk_wed_wo_queue_entry *entry;
144		dma_addr_t addr;
145		void *buf;
146
147		buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC);
 
148		if (!buf)
149			break;
150
151		addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
152		if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
153			skb_free_frag(buf);
154			break;
155		}
156
157		q->head = (q->head + 1) % q->n_desc;
158		entry = &q->entry[q->head];
159		entry->addr = addr;
160		entry->len = q->buf_size;
161		q->entry[q->head].buf = buf;
162
163		if (rx) {
164			struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
165			u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
166				   FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
167					      entry->len);
168
169			WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
170			WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
171		}
172		q->queued++;
173		n_buf++;
174	}
175	spin_unlock_bh(&q->lock);
176
177	return n_buf;
178}
179
180static void
181mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
182{
183	mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
184	mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
185}
186
187static void
188mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
189{
190	for (;;) {
191		struct mtk_wed_mcu_hdr *hdr;
192		struct sk_buff *skb;
193		void *data;
194		u32 len;
195
196		data = mtk_wed_wo_dequeue(wo, q, &len, false);
197		if (!data)
198			break;
199
200		skb = build_skb(data, q->buf_size);
201		if (!skb) {
202			skb_free_frag(data);
203			continue;
204		}
205
206		__skb_put(skb, len);
207		if (mtk_wed_mcu_check_msg(wo, skb)) {
208			dev_kfree_skb(skb);
209			continue;
210		}
211
212		hdr = (struct mtk_wed_mcu_hdr *)skb->data;
213		if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
214			mtk_wed_mcu_rx_event(wo, skb);
215		else
216			mtk_wed_mcu_rx_unsolicited_event(wo, skb);
217	}
218
219	if (mtk_wed_wo_queue_refill(wo, q, true)) {
220		u32 index = (q->head - 1) % q->n_desc;
221
222		mtk_wed_wo_queue_kick(wo, q, index);
223	}
224}
225
226static irqreturn_t
227mtk_wed_wo_irq_handler(int irq, void *data)
228{
229	struct mtk_wed_wo *wo = data;
230
231	mtk_wed_wo_set_isr(wo, 0);
232	tasklet_schedule(&wo->mmio.irq_tasklet);
233
234	return IRQ_HANDLED;
235}
236
237static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
238{
239	struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
240	u32 intr, mask;
241
242	/* disable interrupts */
243	mtk_wed_wo_set_isr(wo, 0);
244
245	intr = mtk_wed_wo_get_isr(wo);
246	intr &= wo->mmio.irq_mask;
247	mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
248	mtk_wed_wo_irq_disable(wo, mask);
249
250	if (intr & MTK_WED_WO_RXCH_INT_MASK) {
251		mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
252		mtk_wed_wo_rx_complete(wo);
253	}
254}
255
256/* mtk wed wo hw queues */
257
258static int
259mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
260		       int n_desc, int buf_size, int index,
261		       struct mtk_wed_wo_queue_regs *regs)
262{
263	spin_lock_init(&q->lock);
264	q->regs = *regs;
265	q->n_desc = n_desc;
266	q->buf_size = buf_size;
267
268	q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
269				      &q->desc_dma, GFP_KERNEL);
270	if (!q->desc)
271		return -ENOMEM;
272
273	q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
274				GFP_KERNEL);
275	if (!q->entry)
276		return -ENOMEM;
277
278	return 0;
279}
280
281static void
282mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
283{
284	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
285	dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
286			  q->desc_dma);
287}
288
289static void
290mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
291{
292	struct page *page;
293	int i;
294
295	spin_lock_bh(&q->lock);
296	for (i = 0; i < q->n_desc; i++) {
297		struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
298
 
 
 
299		dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
300				 DMA_TO_DEVICE);
301		skb_free_frag(entry->buf);
302		entry->buf = NULL;
303	}
304	spin_unlock_bh(&q->lock);
305
306	if (!q->cache.va)
307		return;
308
309	page = virt_to_page(q->cache.va);
310	__page_frag_cache_drain(page, q->cache.pagecnt_bias);
311	memset(&q->cache, 0, sizeof(q->cache));
312}
313
314static void
315mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
316{
317	struct page *page;
318
319	spin_lock_bh(&q->lock);
320	for (;;) {
321		void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
322
323		if (!buf)
324			break;
325
326		skb_free_frag(buf);
327	}
328	spin_unlock_bh(&q->lock);
329
330	if (!q->cache.va)
331		return;
332
333	page = virt_to_page(q->cache.va);
334	__page_frag_cache_drain(page, q->cache.pagecnt_bias);
335	memset(&q->cache, 0, sizeof(q->cache));
336}
337
338static void
339mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
340{
341	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
342	mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
343	mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
344}
345
346int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
347			    struct sk_buff *skb)
348{
349	struct mtk_wed_wo_queue_entry *entry;
350	struct mtk_wed_wo_queue_desc *desc;
351	int ret = 0, index;
352	u32 ctrl;
353
354	spin_lock_bh(&q->lock);
355
356	q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
357	index = (q->head + 1) % q->n_desc;
358	if (q->tail == index) {
359		ret = -ENOMEM;
360		goto out;
361	}
362
363	entry = &q->entry[index];
364	if (skb->len > entry->len) {
365		ret = -ENOMEM;
366		goto out;
367	}
368
369	desc = &q->desc[index];
370	q->head = index;
371
372	dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
373				DMA_TO_DEVICE);
374	memcpy(entry->buf, skb->data, skb->len);
375	dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
376				   DMA_TO_DEVICE);
377
378	ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
379	       MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
380	WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
381	WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
382
383	mtk_wed_wo_queue_kick(wo, q, q->head);
384	mtk_wed_wo_kickout(wo);
385out:
386	spin_unlock_bh(&q->lock);
387
388	dev_kfree_skb(skb);
389
390	return ret;
391}
392
393static int
394mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
395{
396	return 0;
397}
398
399static int
400mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
401{
402	struct mtk_wed_wo_queue_regs regs;
403	struct device_node *np;
404	int ret;
405
406	np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
407	if (!np)
408		return -ENODEV;
409
410	wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
411	if (IS_ERR(wo->mmio.regs)) {
412		ret = PTR_ERR(wo->mmio.regs);
413		goto error_put;
414	}
415
416	wo->mmio.irq = irq_of_parse_and_map(np, 0);
417	wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
418	spin_lock_init(&wo->mmio.lock);
419	tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
420
421	ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
422			       mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
423			       KBUILD_MODNAME, wo);
424	if (ret)
425		goto error;
426
427	regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
428	regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
429	regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
430	regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
431
432	ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
433				     MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
434				     &regs);
435	if (ret)
436		goto error;
437
438	mtk_wed_wo_queue_refill(wo, &wo->q_tx, false);
439	mtk_wed_wo_queue_reset(wo, &wo->q_tx);
440
441	regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
442	regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
443	regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
444	regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
445
446	ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
447				     MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
448				     &regs);
449	if (ret)
450		goto error;
451
452	mtk_wed_wo_queue_refill(wo, &wo->q_rx, true);
453	mtk_wed_wo_queue_reset(wo, &wo->q_rx);
454
455	/* rx queue irqmask */
456	mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
457
458	return 0;
459
460error:
461	devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
462error_put:
463	of_node_put(np);
464	return ret;
465}
466
467static void
468mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
469{
470	/* disable interrupts */
471	mtk_wed_wo_set_isr(wo, 0);
472
473	tasklet_disable(&wo->mmio.irq_tasklet);
474
475	disable_irq(wo->mmio.irq);
476	devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
477
478	mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
479	mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
480	mtk_wed_wo_queue_free(wo, &wo->q_tx);
481	mtk_wed_wo_queue_free(wo, &wo->q_rx);
482}
483
484int mtk_wed_wo_init(struct mtk_wed_hw *hw)
485{
486	struct mtk_wed_wo *wo;
487	int ret;
488
489	wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
490	if (!wo)
491		return -ENOMEM;
492
493	hw->wed_wo = wo;
494	wo->hw = hw;
495
496	ret = mtk_wed_wo_hardware_init(wo);
497	if (ret)
498		return ret;
499
500	ret = mtk_wed_mcu_init(wo);
501	if (ret)
502		return ret;
503
504	return mtk_wed_wo_exception_init(wo);
505}
506
507void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
508{
509	struct mtk_wed_wo *wo = hw->wed_wo;
510
511	mtk_wed_wo_hw_deinit(wo);
512}