Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: BSD-3-Clause-Clear
  2/*
  3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
  4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5 */
  6
  7#include "core.h"
  8#include "pcic.h"
  9#include "debug.h"
 10
 11static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
 12	"bhi",
 13	"mhi-er0",
 14	"mhi-er1",
 15	"ce0",
 16	"ce1",
 17	"ce2",
 18	"ce3",
 19	"ce4",
 20	"ce5",
 21	"ce6",
 22	"ce7",
 23	"ce8",
 24	"ce9",
 25	"ce10",
 26	"ce11",
 27	"host2wbm-desc-feed",
 28	"host2reo-re-injection",
 29	"host2reo-command",
 30	"host2rxdma-monitor-ring3",
 31	"host2rxdma-monitor-ring2",
 32	"host2rxdma-monitor-ring1",
 33	"reo2ost-exception",
 34	"wbm2host-rx-release",
 35	"reo2host-status",
 36	"reo2host-destination-ring4",
 37	"reo2host-destination-ring3",
 38	"reo2host-destination-ring2",
 39	"reo2host-destination-ring1",
 40	"rxdma2host-monitor-destination-mac3",
 41	"rxdma2host-monitor-destination-mac2",
 42	"rxdma2host-monitor-destination-mac1",
 43	"ppdu-end-interrupts-mac3",
 44	"ppdu-end-interrupts-mac2",
 45	"ppdu-end-interrupts-mac1",
 46	"rxdma2host-monitor-status-ring-mac3",
 47	"rxdma2host-monitor-status-ring-mac2",
 48	"rxdma2host-monitor-status-ring-mac1",
 49	"host2rxdma-host-buf-ring-mac3",
 50	"host2rxdma-host-buf-ring-mac2",
 51	"host2rxdma-host-buf-ring-mac1",
 52	"rxdma2host-destination-ring-mac3",
 53	"rxdma2host-destination-ring-mac2",
 54	"rxdma2host-destination-ring-mac1",
 55	"host2tcl-input-ring4",
 56	"host2tcl-input-ring3",
 57	"host2tcl-input-ring2",
 58	"host2tcl-input-ring1",
 59	"wbm2host-tx-completions-ring3",
 60	"wbm2host-tx-completions-ring2",
 61	"wbm2host-tx-completions-ring1",
 62	"tcl2host-status-ring",
 63};
 64
 65static const struct ath11k_msi_config ath11k_msi_config[] = {
 66	{
 67		.total_vectors = 32,
 68		.total_users = 4,
 69		.users = (struct ath11k_msi_user[]) {
 70			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
 71			{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
 72			{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
 73			{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
 74		},
 75		.hw_rev = ATH11K_HW_QCA6390_HW20,
 76	},
 77	{
 78		.total_vectors = 16,
 79		.total_users = 3,
 80		.users = (struct ath11k_msi_user[]) {
 81			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
 82			{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
 83			{ .name = "DP", .num_vectors = 8, .base_vector = 8 },
 84		},
 85		.hw_rev = ATH11K_HW_QCN9074_HW10,
 86	},
 87	{
 88		.total_vectors = 32,
 89		.total_users = 4,
 90		.users = (struct ath11k_msi_user[]) {
 91			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
 92			{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
 93			{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
 94			{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
 95		},
 96		.hw_rev = ATH11K_HW_WCN6855_HW20,
 97	},
 98	{
 99		.total_vectors = 32,
100		.total_users = 4,
101		.users = (struct ath11k_msi_user[]) {
102			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
103			{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
104			{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
105			{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
106		},
107		.hw_rev = ATH11K_HW_WCN6855_HW21,
108	},
109	{
110		.total_vectors = 28,
111		.total_users = 2,
112		.users = (struct ath11k_msi_user[]) {
113			{ .name = "CE", .num_vectors = 10, .base_vector = 0 },
114			{ .name = "DP", .num_vectors = 18, .base_vector = 10 },
115		},
116		.hw_rev = ATH11K_HW_WCN6750_HW10,
117	},
 
 
 
 
 
 
 
 
 
 
 
118};
119
120int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
121{
122	const struct ath11k_msi_config *msi_config;
123	int i;
124
125	for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) {
126		msi_config = &ath11k_msi_config[i];
127
128		if (msi_config->hw_rev == ab->hw_rev)
129			break;
130	}
131
132	if (i == ARRAY_SIZE(ath11k_msi_config)) {
133		ath11k_err(ab, "failed to fetch msi config, unsupported hw version: 0x%x\n",
134			   ab->hw_rev);
135		return -EINVAL;
136	}
137
138	ab->pci.msi.config = msi_config;
139	return 0;
140}
141EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
142
143static void __ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
144{
145	if (offset < ATH11K_PCI_WINDOW_START)
146		iowrite32(value, ab->mem  + offset);
147	else
148		ab->pci.ops->window_write32(ab, offset, value);
149}
150
151void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
152{
153	int ret = 0;
154	bool wakeup_required;
155
156	/* for offset beyond BAR + 4K - 32, may
157	 * need to wakeup the device to access.
158	 */
159	wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
160			  offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
161	if (wakeup_required && ab->pci.ops->wakeup)
162		ret = ab->pci.ops->wakeup(ab);
163
164	__ath11k_pcic_write32(ab, offset, value);
165
166	if (wakeup_required && !ret && ab->pci.ops->release)
167		ab->pci.ops->release(ab);
168}
169EXPORT_SYMBOL(ath11k_pcic_write32);
170
171static u32 __ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
172{
173	u32 val;
174
175	if (offset < ATH11K_PCI_WINDOW_START)
176		val = ioread32(ab->mem + offset);
177	else
178		val = ab->pci.ops->window_read32(ab, offset);
179
180	return val;
181}
182
183u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
184{
185	int ret = 0;
186	u32 val;
187	bool wakeup_required;
188
189	/* for offset beyond BAR + 4K - 32, may
190	 * need to wakeup the device to access.
191	 */
192	wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
193			  offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
194	if (wakeup_required && ab->pci.ops->wakeup)
195		ret = ab->pci.ops->wakeup(ab);
196
197	val = __ath11k_pcic_read32(ab, offset);
198
199	if (wakeup_required && !ret && ab->pci.ops->release)
200		ab->pci.ops->release(ab);
201
202	return val;
203}
204EXPORT_SYMBOL(ath11k_pcic_read32);
205
206int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end)
207{
208	int ret = 0;
209	bool wakeup_required;
210	u32 *data = buf;
211	u32 i;
212
213	/* for offset beyond BAR + 4K - 32, may
214	 * need to wakeup the device to access.
215	 */
216	wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
217			  end >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
218	if (wakeup_required && ab->pci.ops->wakeup) {
219		ret = ab->pci.ops->wakeup(ab);
220		if (ret) {
221			ath11k_warn(ab,
222				    "wakeup failed, data may be invalid: %d",
223				    ret);
224			/* Even though wakeup() failed, continue processing rather
225			 * than returning because some parts of the data may still
226			 * be valid and useful in some cases, e.g. could give us
227			 * some clues on firmware crash.
228			 * Mislead due to invalid data could be avoided because we
229			 * are aware of the wakeup failure.
230			 */
231		}
232	}
233
234	for (i = start; i < end + 1; i += 4)
235		*data++ = __ath11k_pcic_read32(ab, i);
236
237	if (wakeup_required && ab->pci.ops->release)
238		ab->pci.ops->release(ab);
239
240	return 0;
241}
242EXPORT_SYMBOL(ath11k_pcic_read);
243
244void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
245				 u32 *msi_addr_hi)
246{
247	*msi_addr_lo = ab->pci.msi.addr_lo;
248	*msi_addr_hi = ab->pci.msi.addr_hi;
249}
250EXPORT_SYMBOL(ath11k_pcic_get_msi_address);
251
252int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
253					int *num_vectors, u32 *user_base_data,
254					u32 *base_vector)
255{
256	const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
257	int idx;
258
259	for (idx = 0; idx < msi_config->total_users; idx++) {
260		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
261			*num_vectors = msi_config->users[idx].num_vectors;
262			*base_vector =  msi_config->users[idx].base_vector;
263			*user_base_data = *base_vector + ab->pci.msi.ep_base_data;
264
265			ath11k_dbg(ab, ATH11K_DBG_PCI,
266				   "msi assignment %s num_vectors %d user_base_data %u base_vector %u\n",
267				   user_name, *num_vectors, *user_base_data,
268				   *base_vector);
269
270			return 0;
271		}
272	}
273
274	ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
275
276	return -EINVAL;
277}
278EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment);
279
280void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx)
281{
282	u32 i, msi_data_idx;
283
284	for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
285		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
286			continue;
287
288		if (ce_id == i)
289			break;
290
291		msi_data_idx++;
292	}
293	*msi_idx = msi_data_idx;
294}
295EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx);
296
297static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab)
298{
299	int i, j;
300
301	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
302		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
303
304		for (j = 0; j < irq_grp->num_irq; j++)
305			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
306
307		netif_napi_del(&irq_grp->napi);
 
308	}
309}
310
311void ath11k_pcic_free_irq(struct ath11k_base *ab)
312{
313	int i, irq_idx;
314
315	for (i = 0; i < ab->hw_params.ce_count; i++) {
316		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
317			continue;
318		irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
319		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
320	}
321
322	ath11k_pcic_free_ext_irq(ab);
323}
324EXPORT_SYMBOL(ath11k_pcic_free_irq);
325
326static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
327{
328	u32 irq_idx;
329
330	/* In case of one MSI vector, we handle irq enable/disable in a
331	 * uniform way since we only have one irq
332	 */
333	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
334		return;
335
336	irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
337	enable_irq(ab->irq_num[irq_idx]);
338}
339
340static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
341{
342	u32 irq_idx;
343
344	/* In case of one MSI vector, we handle irq enable/disable in a
345	 * uniform way since we only have one irq
346	 */
347	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
348		return;
349
350	irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
351	disable_irq_nosync(ab->irq_num[irq_idx]);
352}
353
354static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab)
355{
356	int i;
357
358	clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
359
360	for (i = 0; i < ab->hw_params.ce_count; i++) {
361		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
362			continue;
363		ath11k_pcic_ce_irq_disable(ab, i);
364	}
365}
366
367static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab)
368{
369	int i;
370	int irq_idx;
371
372	for (i = 0; i < ab->hw_params.ce_count; i++) {
373		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
374			continue;
375
376		irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
377		synchronize_irq(ab->irq_num[irq_idx]);
378	}
379}
380
381static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t)
382{
383	struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
384	int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
385
386	ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
387
388	enable_irq(ce_pipe->ab->irq_num[irq_idx]);
389}
390
391static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg)
392{
393	struct ath11k_ce_pipe *ce_pipe = arg;
394	struct ath11k_base *ab = ce_pipe->ab;
395	int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
396
397	if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
398		return IRQ_HANDLED;
399
400	/* last interrupt received for this CE */
401	ce_pipe->timestamp = jiffies;
402
403	disable_irq_nosync(ab->irq_num[irq_idx]);
404
405	tasklet_schedule(&ce_pipe->intr_tq);
406
407	return IRQ_HANDLED;
408}
409
410static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
411{
412	struct ath11k_base *ab = irq_grp->ab;
413	int i;
414
415	/* In case of one MSI vector, we handle irq enable/disable
416	 * in a uniform way since we only have one irq
417	 */
418	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
419		return;
420
421	for (i = 0; i < irq_grp->num_irq; i++)
422		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
423}
424
425static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
426{
427	int i;
428
429	clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
430
431	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
432		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
433
434		ath11k_pcic_ext_grp_disable(irq_grp);
435
436		if (irq_grp->napi_enabled) {
437			napi_synchronize(&irq_grp->napi);
438			napi_disable(&irq_grp->napi);
439			irq_grp->napi_enabled = false;
440		}
441	}
442}
443
444static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
445{
446	struct ath11k_base *ab = irq_grp->ab;
447	int i;
448
449	/* In case of one MSI vector, we handle irq enable/disable in a
450	 * uniform way since we only have one irq
451	 */
452	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
453		return;
454
455	for (i = 0; i < irq_grp->num_irq; i++)
456		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
457}
458
459void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
460{
461	int i;
462
463	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
464		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
465
466		if (!irq_grp->napi_enabled) {
467			napi_enable(&irq_grp->napi);
468			irq_grp->napi_enabled = true;
469		}
470		ath11k_pcic_ext_grp_enable(irq_grp);
471	}
472
473	set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
474}
475EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable);
476
477static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab)
478{
479	int i, j, irq_idx;
480
481	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
482		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
483
484		for (j = 0; j < irq_grp->num_irq; j++) {
485			irq_idx = irq_grp->irqs[j];
486			synchronize_irq(ab->irq_num[irq_idx]);
487		}
488	}
489}
490
491void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
492{
493	__ath11k_pcic_ext_irq_disable(ab);
494	ath11k_pcic_sync_ext_irqs(ab);
495}
496EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable);
497
498static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget)
499{
500	struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
501						struct ath11k_ext_irq_grp,
502						napi);
503	struct ath11k_base *ab = irq_grp->ab;
504	int work_done;
505	int i;
506
507	work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
508	if (work_done < budget) {
509		napi_complete_done(napi, work_done);
510		for (i = 0; i < irq_grp->num_irq; i++)
511			enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
512	}
513
514	if (work_done > budget)
515		work_done = budget;
516
517	return work_done;
518}
519
520static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg)
521{
522	struct ath11k_ext_irq_grp *irq_grp = arg;
523	struct ath11k_base *ab = irq_grp->ab;
524	int i;
525
526	if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
527		return IRQ_HANDLED;
528
529	ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq %d\n", irq);
530
531	/* last interrupt received for this group */
532	irq_grp->timestamp = jiffies;
533
534	for (i = 0; i < irq_grp->num_irq; i++)
535		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
536
537	napi_schedule(&irq_grp->napi);
538
539	return IRQ_HANDLED;
540}
541
542static int
543ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
544{
545	return ab->pci.ops->get_msi_irq(ab, vector);
546}
547
548static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
549{
550	int i, j, ret, num_vectors = 0;
551	u32 user_base_data = 0, base_vector = 0;
 
552	unsigned long irq_flags;
553
554	ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
555						  &user_base_data,
556						  &base_vector);
557	if (ret < 0)
558		return ret;
559
560	irq_flags = IRQF_SHARED;
561	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
562		irq_flags |= IRQF_NOBALANCING;
563
564	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
565		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
566		u32 num_irq = 0;
567
568		irq_grp->ab = ab;
569		irq_grp->grp_id = i;
570		init_dummy_netdev(&irq_grp->napi_ndev);
571		netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
 
 
 
 
 
572			       ath11k_pcic_ext_grp_napi_poll);
573
574		if (ab->hw_params.ring_mask->tx[i] ||
575		    ab->hw_params.ring_mask->rx[i] ||
576		    ab->hw_params.ring_mask->rx_err[i] ||
577		    ab->hw_params.ring_mask->rx_wbm_rel[i] ||
578		    ab->hw_params.ring_mask->reo_status[i] ||
579		    ab->hw_params.ring_mask->rxdma2host[i] ||
580		    ab->hw_params.ring_mask->host2rxdma[i] ||
581		    ab->hw_params.ring_mask->rx_mon_status[i]) {
582			num_irq = 1;
583		}
584
585		irq_grp->num_irq = num_irq;
586		irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
587
588		for (j = 0; j < irq_grp->num_irq; j++) {
589			int irq_idx = irq_grp->irqs[j];
590			int vector = (i % num_vectors) + base_vector;
591			int irq = ath11k_pcic_get_msi_irq(ab, vector);
592
593			if (irq < 0)
594				return irq;
 
 
595
596			ab->irq_num[irq_idx] = irq;
597
598			ath11k_dbg(ab, ATH11K_DBG_PCI,
599				   "irq %d group %d\n", irq, i);
600
601			irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
602			ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler,
603					  irq_flags, "DP_EXT_IRQ", irq_grp);
604			if (ret) {
605				ath11k_err(ab, "failed request irq %d: %d\n",
606					   vector, ret);
 
 
 
 
607				return ret;
608			}
609		}
610		ath11k_pcic_ext_grp_disable(irq_grp);
611	}
612
613	return 0;
 
 
 
 
 
 
 
 
 
614}
615
616int ath11k_pcic_config_irq(struct ath11k_base *ab)
617{
618	struct ath11k_ce_pipe *ce_pipe;
619	u32 msi_data_start;
620	u32 msi_data_count, msi_data_idx;
621	u32 msi_irq_start;
622	unsigned int msi_data;
623	int irq, i, ret, irq_idx;
624	unsigned long irq_flags;
625
626	ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count,
627						  &msi_data_start, &msi_irq_start);
628	if (ret)
629		return ret;
630
631	irq_flags = IRQF_SHARED;
632	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
633		irq_flags |= IRQF_NOBALANCING;
634
635	/* Configure CE irqs */
636	for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
637		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
638			continue;
639
640		msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
641		irq = ath11k_pcic_get_msi_irq(ab, msi_data);
642		if (irq < 0)
643			return irq;
644
645		ce_pipe = &ab->ce.ce_pipe[i];
646
647		irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
648
649		tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet);
650
651		ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler,
652				  irq_flags, irq_name[irq_idx], ce_pipe);
653		if (ret) {
654			ath11k_err(ab, "failed to request irq %d: %d\n",
655				   irq_idx, ret);
656			return ret;
657		}
658
659		ab->irq_num[irq_idx] = irq;
660		msi_data_idx++;
661
662		ath11k_pcic_ce_irq_disable(ab, i);
663	}
664
665	ret = ath11k_pcic_ext_irq_config(ab);
666	if (ret)
667		return ret;
668
669	return 0;
670}
671EXPORT_SYMBOL(ath11k_pcic_config_irq);
672
673void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab)
674{
675	int i;
676
677	set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
678
679	for (i = 0; i < ab->hw_params.ce_count; i++) {
680		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
681			continue;
682		ath11k_pcic_ce_irq_enable(ab, i);
683	}
684}
685EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable);
686
687static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab)
688{
689	int i;
690
691	for (i = 0; i < ab->hw_params.ce_count; i++) {
692		struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
693
694		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
695			continue;
696
697		tasklet_kill(&ce_pipe->intr_tq);
698	}
699}
700
701void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab)
702{
703	ath11k_pcic_ce_irqs_disable(ab);
704	ath11k_pcic_sync_ce_irqs(ab);
705	ath11k_pcic_kill_tasklets(ab);
706}
707EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync);
708
709void ath11k_pcic_stop(struct ath11k_base *ab)
710{
711	ath11k_pcic_ce_irq_disable_sync(ab);
712	ath11k_ce_cleanup_pipes(ab);
713}
714EXPORT_SYMBOL(ath11k_pcic_stop);
715
716int ath11k_pcic_start(struct ath11k_base *ab)
717{
718	set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
719
720	ath11k_pcic_ce_irqs_enable(ab);
721	ath11k_ce_rx_post_buf(ab);
722
723	return 0;
724}
725EXPORT_SYMBOL(ath11k_pcic_start);
726
727int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
728				    u8 *ul_pipe, u8 *dl_pipe)
729{
730	const struct service_to_pipe *entry;
731	bool ul_set = false, dl_set = false;
732	int i;
733
734	for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
735		entry = &ab->hw_params.svc_to_ce_map[i];
736
737		if (__le32_to_cpu(entry->service_id) != service_id)
738			continue;
739
740		switch (__le32_to_cpu(entry->pipedir)) {
741		case PIPEDIR_NONE:
742			break;
743		case PIPEDIR_IN:
744			WARN_ON(dl_set);
745			*dl_pipe = __le32_to_cpu(entry->pipenum);
746			dl_set = true;
747			break;
748		case PIPEDIR_OUT:
749			WARN_ON(ul_set);
750			*ul_pipe = __le32_to_cpu(entry->pipenum);
751			ul_set = true;
752			break;
753		case PIPEDIR_INOUT:
754			WARN_ON(dl_set);
755			WARN_ON(ul_set);
756			*dl_pipe = __le32_to_cpu(entry->pipenum);
757			*ul_pipe = __le32_to_cpu(entry->pipenum);
758			dl_set = true;
759			ul_set = true;
760			break;
761		}
762	}
763
764	if (WARN_ON(!ul_set || !dl_set))
765		return -ENOENT;
766
767	return 0;
768}
769EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe);
770
771int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
772				 const struct ath11k_pci_ops *pci_ops)
773{
774	if (!pci_ops)
775		return 0;
776
777	/* Return error if mandatory pci_ops callbacks are missing */
778	if (!pci_ops->get_msi_irq || !pci_ops->window_write32 ||
779	    !pci_ops->window_read32)
780		return -EINVAL;
781
782	ab->pci.ops = pci_ops;
783	return 0;
784}
785EXPORT_SYMBOL(ath11k_pcic_register_pci_ops);
786
787void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
788{
789	int i;
790
791	for (i = 0; i < ab->hw_params.ce_count; i++) {
792		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
793		    i == ATH11K_PCI_CE_WAKE_IRQ)
794			continue;
795		ath11k_pcic_ce_irq_enable(ab, i);
796	}
797}
798EXPORT_SYMBOL(ath11k_pci_enable_ce_irqs_except_wake_irq);
799
800void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
801{
802	int i;
803	int irq_idx;
804	struct ath11k_ce_pipe *ce_pipe;
805
806	for (i = 0; i < ab->hw_params.ce_count; i++) {
807		ce_pipe = &ab->ce.ce_pipe[i];
808		irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
809
810		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
811		    i == ATH11K_PCI_CE_WAKE_IRQ)
812			continue;
813
814		disable_irq_nosync(ab->irq_num[irq_idx]);
815		synchronize_irq(ab->irq_num[irq_idx]);
816		tasklet_kill(&ce_pipe->intr_tq);
817	}
818}
819EXPORT_SYMBOL(ath11k_pci_disable_ce_irqs_except_wake_irq);
v6.13.7
  1// SPDX-License-Identifier: BSD-3-Clause-Clear
  2/*
  3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
  4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5 */
  6
  7#include "core.h"
  8#include "pcic.h"
  9#include "debug.h"
 10
 11static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
 12	"bhi",
 13	"mhi-er0",
 14	"mhi-er1",
 15	"ce0",
 16	"ce1",
 17	"ce2",
 18	"ce3",
 19	"ce4",
 20	"ce5",
 21	"ce6",
 22	"ce7",
 23	"ce8",
 24	"ce9",
 25	"ce10",
 26	"ce11",
 27	"host2wbm-desc-feed",
 28	"host2reo-re-injection",
 29	"host2reo-command",
 30	"host2rxdma-monitor-ring3",
 31	"host2rxdma-monitor-ring2",
 32	"host2rxdma-monitor-ring1",
 33	"reo2ost-exception",
 34	"wbm2host-rx-release",
 35	"reo2host-status",
 36	"reo2host-destination-ring4",
 37	"reo2host-destination-ring3",
 38	"reo2host-destination-ring2",
 39	"reo2host-destination-ring1",
 40	"rxdma2host-monitor-destination-mac3",
 41	"rxdma2host-monitor-destination-mac2",
 42	"rxdma2host-monitor-destination-mac1",
 43	"ppdu-end-interrupts-mac3",
 44	"ppdu-end-interrupts-mac2",
 45	"ppdu-end-interrupts-mac1",
 46	"rxdma2host-monitor-status-ring-mac3",
 47	"rxdma2host-monitor-status-ring-mac2",
 48	"rxdma2host-monitor-status-ring-mac1",
 49	"host2rxdma-host-buf-ring-mac3",
 50	"host2rxdma-host-buf-ring-mac2",
 51	"host2rxdma-host-buf-ring-mac1",
 52	"rxdma2host-destination-ring-mac3",
 53	"rxdma2host-destination-ring-mac2",
 54	"rxdma2host-destination-ring-mac1",
 55	"host2tcl-input-ring4",
 56	"host2tcl-input-ring3",
 57	"host2tcl-input-ring2",
 58	"host2tcl-input-ring1",
 59	"wbm2host-tx-completions-ring3",
 60	"wbm2host-tx-completions-ring2",
 61	"wbm2host-tx-completions-ring1",
 62	"tcl2host-status-ring",
 63};
 64
 65static const struct ath11k_msi_config ath11k_msi_config[] = {
 66	{
 67		.total_vectors = 32,
 68		.total_users = 4,
 69		.users = (struct ath11k_msi_user[]) {
 70			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
 71			{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
 72			{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
 73			{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
 74		},
 75		.hw_rev = ATH11K_HW_QCA6390_HW20,
 76	},
 77	{
 78		.total_vectors = 16,
 79		.total_users = 3,
 80		.users = (struct ath11k_msi_user[]) {
 81			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
 82			{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
 83			{ .name = "DP", .num_vectors = 8, .base_vector = 8 },
 84		},
 85		.hw_rev = ATH11K_HW_QCN9074_HW10,
 86	},
 87	{
 88		.total_vectors = 32,
 89		.total_users = 4,
 90		.users = (struct ath11k_msi_user[]) {
 91			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
 92			{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
 93			{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
 94			{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
 95		},
 96		.hw_rev = ATH11K_HW_WCN6855_HW20,
 97	},
 98	{
 99		.total_vectors = 32,
100		.total_users = 4,
101		.users = (struct ath11k_msi_user[]) {
102			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
103			{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
104			{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
105			{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
106		},
107		.hw_rev = ATH11K_HW_WCN6855_HW21,
108	},
109	{
110		.total_vectors = 28,
111		.total_users = 2,
112		.users = (struct ath11k_msi_user[]) {
113			{ .name = "CE", .num_vectors = 10, .base_vector = 0 },
114			{ .name = "DP", .num_vectors = 18, .base_vector = 10 },
115		},
116		.hw_rev = ATH11K_HW_WCN6750_HW10,
117	},
118	{
119		.total_vectors = 32,
120		.total_users = 4,
121		.users = (struct ath11k_msi_user[]) {
122			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
123			{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
124			{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
125			{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
126		},
127		.hw_rev = ATH11K_HW_QCA2066_HW21,
128	},
129};
130
131int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
132{
133	const struct ath11k_msi_config *msi_config;
134	int i;
135
136	for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) {
137		msi_config = &ath11k_msi_config[i];
138
139		if (msi_config->hw_rev == ab->hw_rev)
140			break;
141	}
142
143	if (i == ARRAY_SIZE(ath11k_msi_config)) {
144		ath11k_err(ab, "failed to fetch msi config, unsupported hw version: 0x%x\n",
145			   ab->hw_rev);
146		return -EINVAL;
147	}
148
149	ab->pci.msi.config = msi_config;
150	return 0;
151}
152EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
153
154static void __ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
155{
156	if (offset < ATH11K_PCI_WINDOW_START)
157		iowrite32(value, ab->mem  + offset);
158	else
159		ab->pci.ops->window_write32(ab, offset, value);
160}
161
162void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
163{
164	int ret = 0;
165	bool wakeup_required;
166
167	/* for offset beyond BAR + 4K - 32, may
168	 * need to wakeup the device to access.
169	 */
170	wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
171			  offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
172	if (wakeup_required && ab->pci.ops->wakeup)
173		ret = ab->pci.ops->wakeup(ab);
174
175	__ath11k_pcic_write32(ab, offset, value);
176
177	if (wakeup_required && !ret && ab->pci.ops->release)
178		ab->pci.ops->release(ab);
179}
180EXPORT_SYMBOL(ath11k_pcic_write32);
181
182static u32 __ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
183{
184	u32 val;
185
186	if (offset < ATH11K_PCI_WINDOW_START)
187		val = ioread32(ab->mem + offset);
188	else
189		val = ab->pci.ops->window_read32(ab, offset);
190
191	return val;
192}
193
194u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
195{
196	int ret = 0;
197	u32 val;
198	bool wakeup_required;
199
200	/* for offset beyond BAR + 4K - 32, may
201	 * need to wakeup the device to access.
202	 */
203	wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
204			  offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
205	if (wakeup_required && ab->pci.ops->wakeup)
206		ret = ab->pci.ops->wakeup(ab);
207
208	val = __ath11k_pcic_read32(ab, offset);
209
210	if (wakeup_required && !ret && ab->pci.ops->release)
211		ab->pci.ops->release(ab);
212
213	return val;
214}
215EXPORT_SYMBOL(ath11k_pcic_read32);
216
217int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end)
218{
219	int ret = 0;
220	bool wakeup_required;
221	u32 *data = buf;
222	u32 i;
223
224	/* for offset beyond BAR + 4K - 32, may
225	 * need to wakeup the device to access.
226	 */
227	wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
228			  end >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
229	if (wakeup_required && ab->pci.ops->wakeup) {
230		ret = ab->pci.ops->wakeup(ab);
231		if (ret) {
232			ath11k_warn(ab,
233				    "wakeup failed, data may be invalid: %d",
234				    ret);
235			/* Even though wakeup() failed, continue processing rather
236			 * than returning because some parts of the data may still
237			 * be valid and useful in some cases, e.g. could give us
238			 * some clues on firmware crash.
239			 * Mislead due to invalid data could be avoided because we
240			 * are aware of the wakeup failure.
241			 */
242		}
243	}
244
245	for (i = start; i < end + 1; i += 4)
246		*data++ = __ath11k_pcic_read32(ab, i);
247
248	if (wakeup_required && ab->pci.ops->release)
249		ab->pci.ops->release(ab);
250
251	return 0;
252}
253EXPORT_SYMBOL(ath11k_pcic_read);
254
255void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
256				 u32 *msi_addr_hi)
257{
258	*msi_addr_lo = ab->pci.msi.addr_lo;
259	*msi_addr_hi = ab->pci.msi.addr_hi;
260}
261EXPORT_SYMBOL(ath11k_pcic_get_msi_address);
262
263int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
264					int *num_vectors, u32 *user_base_data,
265					u32 *base_vector)
266{
267	const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
268	int idx;
269
270	for (idx = 0; idx < msi_config->total_users; idx++) {
271		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
272			*num_vectors = msi_config->users[idx].num_vectors;
273			*base_vector =  msi_config->users[idx].base_vector;
274			*user_base_data = *base_vector + ab->pci.msi.ep_base_data;
275
276			ath11k_dbg(ab, ATH11K_DBG_PCI,
277				   "msi assignment %s num_vectors %d user_base_data %u base_vector %u\n",
278				   user_name, *num_vectors, *user_base_data,
279				   *base_vector);
280
281			return 0;
282		}
283	}
284
285	ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
286
287	return -EINVAL;
288}
289EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment);
290
291void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx)
292{
293	u32 i, msi_data_idx;
294
295	for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
296		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
297			continue;
298
299		if (ce_id == i)
300			break;
301
302		msi_data_idx++;
303	}
304	*msi_idx = msi_data_idx;
305}
306EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx);
307
308static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab)
309{
310	int i, j;
311
312	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
313		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
314
315		for (j = 0; j < irq_grp->num_irq; j++)
316			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
317
318		netif_napi_del(&irq_grp->napi);
319		free_netdev(irq_grp->napi_ndev);
320	}
321}
322
323void ath11k_pcic_free_irq(struct ath11k_base *ab)
324{
325	int i, irq_idx;
326
327	for (i = 0; i < ab->hw_params.ce_count; i++) {
328		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
329			continue;
330		irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
331		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
332	}
333
334	ath11k_pcic_free_ext_irq(ab);
335}
336EXPORT_SYMBOL(ath11k_pcic_free_irq);
337
338static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
339{
340	u32 irq_idx;
341
342	/* In case of one MSI vector, we handle irq enable/disable in a
343	 * uniform way since we only have one irq
344	 */
345	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
346		return;
347
348	irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
349	enable_irq(ab->irq_num[irq_idx]);
350}
351
352static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
353{
354	u32 irq_idx;
355
356	/* In case of one MSI vector, we handle irq enable/disable in a
357	 * uniform way since we only have one irq
358	 */
359	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
360		return;
361
362	irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
363	disable_irq_nosync(ab->irq_num[irq_idx]);
364}
365
366static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab)
367{
368	int i;
369
370	clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
371
372	for (i = 0; i < ab->hw_params.ce_count; i++) {
373		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
374			continue;
375		ath11k_pcic_ce_irq_disable(ab, i);
376	}
377}
378
379static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab)
380{
381	int i;
382	int irq_idx;
383
384	for (i = 0; i < ab->hw_params.ce_count; i++) {
385		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
386			continue;
387
388		irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
389		synchronize_irq(ab->irq_num[irq_idx]);
390	}
391}
392
393static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t)
394{
395	struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
396	int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
397
398	ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
399
400	enable_irq(ce_pipe->ab->irq_num[irq_idx]);
401}
402
403static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg)
404{
405	struct ath11k_ce_pipe *ce_pipe = arg;
406	struct ath11k_base *ab = ce_pipe->ab;
407	int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
408
409	if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
410		return IRQ_HANDLED;
411
412	/* last interrupt received for this CE */
413	ce_pipe->timestamp = jiffies;
414
415	disable_irq_nosync(ab->irq_num[irq_idx]);
416
417	tasklet_schedule(&ce_pipe->intr_tq);
418
419	return IRQ_HANDLED;
420}
421
422static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
423{
424	struct ath11k_base *ab = irq_grp->ab;
425	int i;
426
427	/* In case of one MSI vector, we handle irq enable/disable
428	 * in a uniform way since we only have one irq
429	 */
430	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
431		return;
432
433	for (i = 0; i < irq_grp->num_irq; i++)
434		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
435}
436
437static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
438{
439	int i;
440
441	clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
442
443	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
444		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
445
446		ath11k_pcic_ext_grp_disable(irq_grp);
447
448		if (irq_grp->napi_enabled) {
449			napi_synchronize(&irq_grp->napi);
450			napi_disable(&irq_grp->napi);
451			irq_grp->napi_enabled = false;
452		}
453	}
454}
455
456static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
457{
458	struct ath11k_base *ab = irq_grp->ab;
459	int i;
460
461	/* In case of one MSI vector, we handle irq enable/disable in a
462	 * uniform way since we only have one irq
463	 */
464	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
465		return;
466
467	for (i = 0; i < irq_grp->num_irq; i++)
468		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
469}
470
471void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
472{
473	int i;
474
475	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
476		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
477
478		if (!irq_grp->napi_enabled) {
479			napi_enable(&irq_grp->napi);
480			irq_grp->napi_enabled = true;
481		}
482		ath11k_pcic_ext_grp_enable(irq_grp);
483	}
484
485	set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
486}
487EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable);
488
489static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab)
490{
491	int i, j, irq_idx;
492
493	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
494		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
495
496		for (j = 0; j < irq_grp->num_irq; j++) {
497			irq_idx = irq_grp->irqs[j];
498			synchronize_irq(ab->irq_num[irq_idx]);
499		}
500	}
501}
502
503void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
504{
505	__ath11k_pcic_ext_irq_disable(ab);
506	ath11k_pcic_sync_ext_irqs(ab);
507}
508EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable);
509
510static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget)
511{
512	struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
513						struct ath11k_ext_irq_grp,
514						napi);
515	struct ath11k_base *ab = irq_grp->ab;
516	int work_done;
517	int i;
518
519	work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
520	if (work_done < budget) {
521		napi_complete_done(napi, work_done);
522		for (i = 0; i < irq_grp->num_irq; i++)
523			enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
524	}
525
526	if (work_done > budget)
527		work_done = budget;
528
529	return work_done;
530}
531
532static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg)
533{
534	struct ath11k_ext_irq_grp *irq_grp = arg;
535	struct ath11k_base *ab = irq_grp->ab;
536	int i;
537
538	if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
539		return IRQ_HANDLED;
540
541	ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq %d\n", irq);
542
543	/* last interrupt received for this group */
544	irq_grp->timestamp = jiffies;
545
546	for (i = 0; i < irq_grp->num_irq; i++)
547		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
548
549	napi_schedule(&irq_grp->napi);
550
551	return IRQ_HANDLED;
552}
553
554static int
555ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
556{
557	return ab->pci.ops->get_msi_irq(ab, vector);
558}
559
560static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
561{
562	int i, j, n, ret, num_vectors = 0;
563	u32 user_base_data = 0, base_vector = 0;
564	struct ath11k_ext_irq_grp *irq_grp;
565	unsigned long irq_flags;
566
567	ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
568						  &user_base_data,
569						  &base_vector);
570	if (ret < 0)
571		return ret;
572
573	irq_flags = IRQF_SHARED;
574	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
575		irq_flags |= IRQF_NOBALANCING;
576
577	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
578		irq_grp = &ab->ext_irq_grp[i];
579		u32 num_irq = 0;
580
581		irq_grp->ab = ab;
582		irq_grp->grp_id = i;
583		irq_grp->napi_ndev = alloc_netdev_dummy(0);
584		if (!irq_grp->napi_ndev) {
585			ret = -ENOMEM;
586			goto fail_allocate;
587		}
588
589		netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
590			       ath11k_pcic_ext_grp_napi_poll);
591
592		if (ab->hw_params.ring_mask->tx[i] ||
593		    ab->hw_params.ring_mask->rx[i] ||
594		    ab->hw_params.ring_mask->rx_err[i] ||
595		    ab->hw_params.ring_mask->rx_wbm_rel[i] ||
596		    ab->hw_params.ring_mask->reo_status[i] ||
597		    ab->hw_params.ring_mask->rxdma2host[i] ||
598		    ab->hw_params.ring_mask->host2rxdma[i] ||
599		    ab->hw_params.ring_mask->rx_mon_status[i]) {
600			num_irq = 1;
601		}
602
603		irq_grp->num_irq = num_irq;
604		irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
605
606		for (j = 0; j < irq_grp->num_irq; j++) {
607			int irq_idx = irq_grp->irqs[j];
608			int vector = (i % num_vectors) + base_vector;
609			int irq = ath11k_pcic_get_msi_irq(ab, vector);
610
611			if (irq < 0) {
612				ret = irq;
613				goto fail_irq;
614			}
615
616			ab->irq_num[irq_idx] = irq;
617
618			ath11k_dbg(ab, ATH11K_DBG_PCI,
619				   "irq %d group %d\n", irq, i);
620
621			irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
622			ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler,
623					  irq_flags, "DP_EXT_IRQ", irq_grp);
624			if (ret) {
625				ath11k_err(ab, "failed request irq %d: %d\n",
626					   vector, ret);
627				for (n = 0; n <= i; n++) {
628					irq_grp = &ab->ext_irq_grp[n];
629					free_netdev(irq_grp->napi_ndev);
630				}
631				return ret;
632			}
633		}
634		ath11k_pcic_ext_grp_disable(irq_grp);
635	}
636
637	return 0;
638fail_irq:
639	/* i ->napi_ndev was properly allocated. Free it also */
640	i += 1;
641fail_allocate:
642	for (n = 0; n < i; n++) {
643		irq_grp = &ab->ext_irq_grp[n];
644		free_netdev(irq_grp->napi_ndev);
645	}
646	return ret;
647}
648
649int ath11k_pcic_config_irq(struct ath11k_base *ab)
650{
651	struct ath11k_ce_pipe *ce_pipe;
652	u32 msi_data_start;
653	u32 msi_data_count, msi_data_idx;
654	u32 msi_irq_start;
655	unsigned int msi_data;
656	int irq, i, ret, irq_idx;
657	unsigned long irq_flags;
658
659	ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count,
660						  &msi_data_start, &msi_irq_start);
661	if (ret)
662		return ret;
663
664	irq_flags = IRQF_SHARED;
665	if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
666		irq_flags |= IRQF_NOBALANCING;
667
668	/* Configure CE irqs */
669	for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
670		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
671			continue;
672
673		msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
674		irq = ath11k_pcic_get_msi_irq(ab, msi_data);
675		if (irq < 0)
676			return irq;
677
678		ce_pipe = &ab->ce.ce_pipe[i];
679
680		irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
681
682		tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet);
683
684		ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler,
685				  irq_flags, irq_name[irq_idx], ce_pipe);
686		if (ret) {
687			ath11k_err(ab, "failed to request irq %d: %d\n",
688				   irq_idx, ret);
689			return ret;
690		}
691
692		ab->irq_num[irq_idx] = irq;
693		msi_data_idx++;
694
695		ath11k_pcic_ce_irq_disable(ab, i);
696	}
697
698	ret = ath11k_pcic_ext_irq_config(ab);
699	if (ret)
700		return ret;
701
702	return 0;
703}
704EXPORT_SYMBOL(ath11k_pcic_config_irq);
705
706void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab)
707{
708	int i;
709
710	set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
711
712	for (i = 0; i < ab->hw_params.ce_count; i++) {
713		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
714			continue;
715		ath11k_pcic_ce_irq_enable(ab, i);
716	}
717}
718EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable);
719
720static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab)
721{
722	int i;
723
724	for (i = 0; i < ab->hw_params.ce_count; i++) {
725		struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
726
727		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
728			continue;
729
730		tasklet_kill(&ce_pipe->intr_tq);
731	}
732}
733
734void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab)
735{
736	ath11k_pcic_ce_irqs_disable(ab);
737	ath11k_pcic_sync_ce_irqs(ab);
738	ath11k_pcic_kill_tasklets(ab);
739}
740EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync);
741
742void ath11k_pcic_stop(struct ath11k_base *ab)
743{
744	ath11k_pcic_ce_irq_disable_sync(ab);
745	ath11k_ce_cleanup_pipes(ab);
746}
747EXPORT_SYMBOL(ath11k_pcic_stop);
748
749int ath11k_pcic_start(struct ath11k_base *ab)
750{
751	set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
752
753	ath11k_pcic_ce_irqs_enable(ab);
754	ath11k_ce_rx_post_buf(ab);
755
756	return 0;
757}
758EXPORT_SYMBOL(ath11k_pcic_start);
759
760int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
761				    u8 *ul_pipe, u8 *dl_pipe)
762{
763	const struct service_to_pipe *entry;
764	bool ul_set = false, dl_set = false;
765	int i;
766
767	for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
768		entry = &ab->hw_params.svc_to_ce_map[i];
769
770		if (__le32_to_cpu(entry->service_id) != service_id)
771			continue;
772
773		switch (__le32_to_cpu(entry->pipedir)) {
774		case PIPEDIR_NONE:
775			break;
776		case PIPEDIR_IN:
777			WARN_ON(dl_set);
778			*dl_pipe = __le32_to_cpu(entry->pipenum);
779			dl_set = true;
780			break;
781		case PIPEDIR_OUT:
782			WARN_ON(ul_set);
783			*ul_pipe = __le32_to_cpu(entry->pipenum);
784			ul_set = true;
785			break;
786		case PIPEDIR_INOUT:
787			WARN_ON(dl_set);
788			WARN_ON(ul_set);
789			*dl_pipe = __le32_to_cpu(entry->pipenum);
790			*ul_pipe = __le32_to_cpu(entry->pipenum);
791			dl_set = true;
792			ul_set = true;
793			break;
794		}
795	}
796
797	if (WARN_ON(!ul_set || !dl_set))
798		return -ENOENT;
799
800	return 0;
801}
802EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe);
803
804int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
805				 const struct ath11k_pci_ops *pci_ops)
806{
807	if (!pci_ops)
808		return 0;
809
810	/* Return error if mandatory pci_ops callbacks are missing */
811	if (!pci_ops->get_msi_irq || !pci_ops->window_write32 ||
812	    !pci_ops->window_read32)
813		return -EINVAL;
814
815	ab->pci.ops = pci_ops;
816	return 0;
817}
818EXPORT_SYMBOL(ath11k_pcic_register_pci_ops);
819
820void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
821{
822	int i;
823
824	for (i = 0; i < ab->hw_params.ce_count; i++) {
825		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
826		    i == ATH11K_PCI_CE_WAKE_IRQ)
827			continue;
828		ath11k_pcic_ce_irq_enable(ab, i);
829	}
830}
831EXPORT_SYMBOL(ath11k_pci_enable_ce_irqs_except_wake_irq);
832
833void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
834{
835	int i;
836	int irq_idx;
837	struct ath11k_ce_pipe *ce_pipe;
838
839	for (i = 0; i < ab->hw_params.ce_count; i++) {
840		ce_pipe = &ab->ce.ce_pipe[i];
841		irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
842
843		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
844		    i == ATH11K_PCI_CE_WAKE_IRQ)
845			continue;
846
847		disable_irq_nosync(ab->irq_num[irq_idx]);
848		synchronize_irq(ab->irq_num[irq_idx]);
849		tasklet_kill(&ce_pipe->intr_tq);
850	}
851}
852EXPORT_SYMBOL(ath11k_pci_disable_ce_irqs_except_wake_irq);