Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1// SPDX-License-Identifier: ISC
  2/* Copyright (C) 2023 MediaTek Inc. */
  3
  4#include <linux/module.h>
  5#include <linux/firmware.h>
  6
  7#include "mt792x.h"
  8#include "dma.h"
  9#include "trace.h"
 10
 11irqreturn_t mt792x_irq_handler(int irq, void *dev_instance)
 12{
 13	struct mt792x_dev *dev = dev_instance;
 14
 15	mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
 16
 17	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
 18		return IRQ_NONE;
 19
 20	tasklet_schedule(&dev->mt76.irq_tasklet);
 21
 22	return IRQ_HANDLED;
 23}
 24EXPORT_SYMBOL_GPL(mt792x_irq_handler);
 25
 26void mt792x_irq_tasklet(unsigned long data)
 27{
 28	struct mt792x_dev *dev = (struct mt792x_dev *)data;
 29	const struct mt792x_irq_map *irq_map = dev->irq_map;
 30	u32 intr, mask = 0;
 31
 32	mt76_wr(dev, irq_map->host_irq_enable, 0);
 33
 34	intr = mt76_rr(dev, MT_WFDMA0_HOST_INT_STA);
 35	intr &= dev->mt76.mmio.irqmask;
 36	mt76_wr(dev, MT_WFDMA0_HOST_INT_STA, intr);
 37
 38	trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
 39
 40	mask |= intr & (irq_map->rx.data_complete_mask |
 41			irq_map->rx.wm_complete_mask |
 42			irq_map->rx.wm2_complete_mask);
 43	if (intr & dev->irq_map->tx.mcu_complete_mask)
 44		mask |= dev->irq_map->tx.mcu_complete_mask;
 45
 46	if (intr & MT_INT_MCU_CMD) {
 47		u32 intr_sw;
 48
 49		intr_sw = mt76_rr(dev, MT_MCU_CMD);
 50		/* ack MCU2HOST_SW_INT_STA */
 51		mt76_wr(dev, MT_MCU_CMD, intr_sw);
 52		if (intr_sw & MT_MCU_CMD_WAKE_RX_PCIE) {
 53			mask |= irq_map->rx.data_complete_mask;
 54			intr |= irq_map->rx.data_complete_mask;
 55		}
 56	}
 57
 58	mt76_set_irq_mask(&dev->mt76, irq_map->host_irq_enable, mask, 0);
 59
 60	if (intr & dev->irq_map->tx.all_complete_mask)
 61		napi_schedule(&dev->mt76.tx_napi);
 62
 63	if (intr & irq_map->rx.wm_complete_mask)
 64		napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
 65
 66	if (intr & irq_map->rx.wm2_complete_mask)
 67		napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
 68
 69	if (intr & irq_map->rx.data_complete_mask)
 70		napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
 71}
 72EXPORT_SYMBOL_GPL(mt792x_irq_tasklet);
 73
 74void mt792x_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
 75{
 76	struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
 77	const struct mt792x_irq_map *irq_map = dev->irq_map;
 78
 79	if (q == MT_RXQ_MAIN)
 80		mt76_connac_irq_enable(mdev, irq_map->rx.data_complete_mask);
 81	else if (q == MT_RXQ_MCU_WA)
 82		mt76_connac_irq_enable(mdev, irq_map->rx.wm2_complete_mask);
 83	else
 84		mt76_connac_irq_enable(mdev, irq_map->rx.wm_complete_mask);
 85}
 86EXPORT_SYMBOL_GPL(mt792x_rx_poll_complete);
 87
 88#define PREFETCH(base, depth)	((base) << 16 | (depth))
 89static void mt792x_dma_prefetch(struct mt792x_dev *dev)
 90{
 91	if (is_mt7925(&dev->mt76)) {
 92		/* rx ring */
 93		mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0000, 0x4));
 94		mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL, PREFETCH(0x0040, 0x4));
 95		mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x0080, 0x4));
 96		mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x00c0, 0x4));
 97		/* tx ring */
 98		mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x0100, 0x10));
 99		mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x0200, 0x10));
100		mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x0300, 0x10));
101		mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x0400, 0x10));
102		mt76_wr(dev, MT_WFDMA0_TX_RING15_EXT_CTRL, PREFETCH(0x0500, 0x4));
103		mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x0540, 0x4));
104	} else {
105		/* rx ring */
106		mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4));
107		mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x40, 0x4));
108		mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x80, 0x4));
109		mt76_wr(dev, MT_WFDMA0_RX_RING4_EXT_CTRL, PREFETCH(0xc0, 0x4));
110		mt76_wr(dev, MT_WFDMA0_RX_RING5_EXT_CTRL, PREFETCH(0x100, 0x4));
111		/* tx ring */
112		mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x140, 0x4));
113		mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x180, 0x4));
114		mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x1c0, 0x4));
115		mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x200, 0x4));
116		mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x240, 0x4));
117		mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x280, 0x4));
118		mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x2c0, 0x4));
119		mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x340, 0x4));
120		mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
121	}
122}
123
124int mt792x_dma_enable(struct mt792x_dev *dev)
125{
126	if (is_mt7925(&dev->mt76))
127		mt76_rmw(dev, MT_UWFDMA0_GLO_CFG_EXT1, BIT(28), BIT(28));
128
129	/* configure perfetch settings */
130	mt792x_dma_prefetch(dev);
131
132	/* reset dma idx */
133	mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
134
135	/* configure delay interrupt */
136	mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
137
138	mt76_set(dev, MT_WFDMA0_GLO_CFG,
139		 MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
140		 MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
141		 MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
142		 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
143		 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
144		 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
145
146	mt76_set(dev, MT_WFDMA0_GLO_CFG,
147		 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
148
149	mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
150
151	/* enable interrupts for TX/RX rings */
152	mt76_connac_irq_enable(&dev->mt76,
153			       dev->irq_map->tx.all_complete_mask |
154			       dev->irq_map->rx.data_complete_mask |
155			       dev->irq_map->rx.wm2_complete_mask |
156			       dev->irq_map->rx.wm_complete_mask |
157			       MT_INT_MCU_CMD);
158	mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE);
159
160	return 0;
161}
162EXPORT_SYMBOL_GPL(mt792x_dma_enable);
163
164static int
165mt792x_dma_reset(struct mt792x_dev *dev, bool force)
166{
167	int i, err;
168
169	err = mt792x_dma_disable(dev, force);
170	if (err)
171		return err;
172
173	/* reset hw queues */
174	for (i = 0; i < __MT_TXQ_MAX; i++)
175		mt76_queue_reset(dev, dev->mphy.q_tx[i]);
176
177	for (i = 0; i < __MT_MCUQ_MAX; i++)
178		mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
179
180	mt76_for_each_q_rx(&dev->mt76, i)
181		mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
182
183	mt76_tx_status_check(&dev->mt76, true);
184
185	return mt792x_dma_enable(dev);
186}
187
188int mt792x_wpdma_reset(struct mt792x_dev *dev, bool force)
189{
190	int i, err;
191
192	/* clean up hw queues */
193	for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++)
194		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
195
196	for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
197		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
198
199	mt76_for_each_q_rx(&dev->mt76, i)
200		mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
201
202	if (force) {
203		err = mt792x_wfsys_reset(dev);
204		if (err)
205			return err;
206	}
207	err = mt792x_dma_reset(dev, force);
208	if (err)
209		return err;
210
211	mt76_for_each_q_rx(&dev->mt76, i)
212		mt76_queue_rx_reset(dev, i);
213
214	return 0;
215}
216EXPORT_SYMBOL_GPL(mt792x_wpdma_reset);
217
218int mt792x_wpdma_reinit_cond(struct mt792x_dev *dev)
219{
220	struct mt76_connac_pm *pm = &dev->pm;
221	int err;
222
223	/* check if the wpdma must be reinitialized */
224	if (mt792x_dma_need_reinit(dev)) {
225		/* disable interrutpts */
226		mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
227		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
228
229		err = mt792x_wpdma_reset(dev, false);
230		if (err) {
231			dev_err(dev->mt76.dev, "wpdma reset failed\n");
232			return err;
233		}
234
235		/* enable interrutpts */
236		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
237		pm->stats.lp_wake++;
238	}
239
240	return 0;
241}
242EXPORT_SYMBOL_GPL(mt792x_wpdma_reinit_cond);
243
244int mt792x_dma_disable(struct mt792x_dev *dev, bool force)
245{
246	/* disable WFDMA0 */
247	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
248		   MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
249		   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
250		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
251		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
252		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
253
254	if (!mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
255				 MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
256				 MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1))
257		return -ETIMEDOUT;
258
259	/* disable dmashdl */
260	mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
261		   MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
262	mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
263
264	if (force) {
265		/* reset */
266		mt76_clear(dev, MT_WFDMA0_RST,
267			   MT_WFDMA0_RST_DMASHDL_ALL_RST |
268			   MT_WFDMA0_RST_LOGIC_RST);
269
270		mt76_set(dev, MT_WFDMA0_RST,
271			 MT_WFDMA0_RST_DMASHDL_ALL_RST |
272			 MT_WFDMA0_RST_LOGIC_RST);
273	}
274
275	return 0;
276}
277EXPORT_SYMBOL_GPL(mt792x_dma_disable);
278
279void mt792x_dma_cleanup(struct mt792x_dev *dev)
280{
281	/* disable */
282	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
283		   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
284		   MT_WFDMA0_GLO_CFG_RX_DMA_EN |
285		   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
286		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
287		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
288		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
289
290	mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
291			    MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
292			    MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1);
293
294	/* reset */
295	mt76_clear(dev, MT_WFDMA0_RST,
296		   MT_WFDMA0_RST_DMASHDL_ALL_RST |
297		   MT_WFDMA0_RST_LOGIC_RST);
298
299	mt76_set(dev, MT_WFDMA0_RST,
300		 MT_WFDMA0_RST_DMASHDL_ALL_RST |
301		 MT_WFDMA0_RST_LOGIC_RST);
302
303	mt76_dma_cleanup(&dev->mt76);
304}
305EXPORT_SYMBOL_GPL(mt792x_dma_cleanup);
306
307int mt792x_poll_tx(struct napi_struct *napi, int budget)
308{
309	struct mt792x_dev *dev;
310
311	dev = container_of(napi, struct mt792x_dev, mt76.tx_napi);
312
313	if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
314		napi_complete(napi);
315		queue_work(dev->mt76.wq, &dev->pm.wake_work);
316		return 0;
317	}
318
319	mt76_connac_tx_cleanup(&dev->mt76);
320	if (napi_complete(napi))
321		mt76_connac_irq_enable(&dev->mt76,
322				       dev->irq_map->tx.all_complete_mask);
323	mt76_connac_pm_unref(&dev->mphy, &dev->pm);
324
325	return 0;
326}
327EXPORT_SYMBOL_GPL(mt792x_poll_tx);
328
329int mt792x_poll_rx(struct napi_struct *napi, int budget)
330{
331	struct mt792x_dev *dev;
332	int done;
333
334	dev = container_of(napi->dev, struct mt792x_dev, mt76.napi_dev);
335
336	if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
337		napi_complete(napi);
338		queue_work(dev->mt76.wq, &dev->pm.wake_work);
339		return 0;
340	}
341	done = mt76_dma_rx_poll(napi, budget);
342	mt76_connac_pm_unref(&dev->mphy, &dev->pm);
343
344	return done;
345}
346EXPORT_SYMBOL_GPL(mt792x_poll_rx);
347
348int mt792x_wfsys_reset(struct mt792x_dev *dev)
349{
350	u32 addr = is_mt7921(&dev->mt76) ? 0x18000140 : 0x7c000140;
351
352	mt76_clear(dev, addr, WFSYS_SW_RST_B);
353	msleep(50);
354	mt76_set(dev, addr, WFSYS_SW_RST_B);
355
356	if (!__mt76_poll_msec(&dev->mt76, addr, WFSYS_SW_INIT_DONE,
357			      WFSYS_SW_INIT_DONE, 500))
358		return -ETIMEDOUT;
359
360	return 0;
361}
362EXPORT_SYMBOL_GPL(mt792x_wfsys_reset);
363