Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: BSD-3-Clause-Clear
   2/*
   3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
   4 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/platform_device.h>
   9#include <linux/property.h>
  10#include <linux/of_device.h>
  11#include <linux/of.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/of_address.h>
  14#include <linux/iommu.h>
  15#include "ahb.h"
  16#include "debug.h"
  17#include "hif.h"
  18#include "qmi.h"
  19#include <linux/remoteproc.h>
  20#include "pcic.h"
  21#include <linux/soc/qcom/smem.h>
  22#include <linux/soc/qcom/smem_state.h>
  23
  24static const struct of_device_id ath11k_ahb_of_match[] = {
  25	/* TODO: Should we change the compatible string to something similar
  26	 * to one that ath10k uses?
  27	 */
  28	{ .compatible = "qcom,ipq8074-wifi",
  29	  .data = (void *)ATH11K_HW_IPQ8074,
  30	},
  31	{ .compatible = "qcom,ipq6018-wifi",
  32	  .data = (void *)ATH11K_HW_IPQ6018_HW10,
  33	},
  34	{ .compatible = "qcom,wcn6750-wifi",
  35	  .data = (void *)ATH11K_HW_WCN6750_HW10,
  36	},
  37	{ .compatible = "qcom,ipq5018-wifi",
  38	  .data = (void *)ATH11K_HW_IPQ5018_HW10,
  39	},
  40	{ }
  41};
  42
  43MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
  44
 
 
 
 
 
 
 
  45#define ATH11K_IRQ_CE0_OFFSET 4
  46
  47static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
  48	"misc-pulse1",
  49	"misc-latch",
  50	"sw-exception",
  51	"watchdog",
  52	"ce0",
  53	"ce1",
  54	"ce2",
  55	"ce3",
  56	"ce4",
  57	"ce5",
  58	"ce6",
  59	"ce7",
  60	"ce8",
  61	"ce9",
  62	"ce10",
  63	"ce11",
  64	"host2wbm-desc-feed",
  65	"host2reo-re-injection",
  66	"host2reo-command",
  67	"host2rxdma-monitor-ring3",
  68	"host2rxdma-monitor-ring2",
  69	"host2rxdma-monitor-ring1",
  70	"reo2ost-exception",
  71	"wbm2host-rx-release",
  72	"reo2host-status",
  73	"reo2host-destination-ring4",
  74	"reo2host-destination-ring3",
  75	"reo2host-destination-ring2",
  76	"reo2host-destination-ring1",
  77	"rxdma2host-monitor-destination-mac3",
  78	"rxdma2host-monitor-destination-mac2",
  79	"rxdma2host-monitor-destination-mac1",
  80	"ppdu-end-interrupts-mac3",
  81	"ppdu-end-interrupts-mac2",
  82	"ppdu-end-interrupts-mac1",
  83	"rxdma2host-monitor-status-ring-mac3",
  84	"rxdma2host-monitor-status-ring-mac2",
  85	"rxdma2host-monitor-status-ring-mac1",
  86	"host2rxdma-host-buf-ring-mac3",
  87	"host2rxdma-host-buf-ring-mac2",
  88	"host2rxdma-host-buf-ring-mac1",
  89	"rxdma2host-destination-ring-mac3",
  90	"rxdma2host-destination-ring-mac2",
  91	"rxdma2host-destination-ring-mac1",
  92	"host2tcl-input-ring4",
  93	"host2tcl-input-ring3",
  94	"host2tcl-input-ring2",
  95	"host2tcl-input-ring1",
  96	"wbm2host-tx-completions-ring3",
  97	"wbm2host-tx-completions-ring2",
  98	"wbm2host-tx-completions-ring1",
  99	"tcl2host-status-ring",
 100};
 101
 102/* enum ext_irq_num - irq numbers that can be used by external modules
 103 * like datapath
 104 */
 105enum ext_irq_num {
 106	host2wbm_desc_feed = 16,
 107	host2reo_re_injection,
 108	host2reo_command,
 109	host2rxdma_monitor_ring3,
 110	host2rxdma_monitor_ring2,
 111	host2rxdma_monitor_ring1,
 112	reo2host_exception,
 113	wbm2host_rx_release,
 114	reo2host_status,
 115	reo2host_destination_ring4,
 116	reo2host_destination_ring3,
 117	reo2host_destination_ring2,
 118	reo2host_destination_ring1,
 119	rxdma2host_monitor_destination_mac3,
 120	rxdma2host_monitor_destination_mac2,
 121	rxdma2host_monitor_destination_mac1,
 122	ppdu_end_interrupts_mac3,
 123	ppdu_end_interrupts_mac2,
 124	ppdu_end_interrupts_mac1,
 125	rxdma2host_monitor_status_ring_mac3,
 126	rxdma2host_monitor_status_ring_mac2,
 127	rxdma2host_monitor_status_ring_mac1,
 128	host2rxdma_host_buf_ring_mac3,
 129	host2rxdma_host_buf_ring_mac2,
 130	host2rxdma_host_buf_ring_mac1,
 131	rxdma2host_destination_ring_mac3,
 132	rxdma2host_destination_ring_mac2,
 133	rxdma2host_destination_ring_mac1,
 134	host2tcl_input_ring4,
 135	host2tcl_input_ring3,
 136	host2tcl_input_ring2,
 137	host2tcl_input_ring1,
 138	wbm2host_tx_completions_ring3,
 139	wbm2host_tx_completions_ring2,
 140	wbm2host_tx_completions_ring1,
 141	tcl2host_status_ring,
 142};
 143
 144static int
 145ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
 146{
 147	return ab->pci.msi.irqs[vector];
 148}
 149
 150static inline u32
 151ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset)
 152{
 153	u32 window_start = 0;
 154
 155	/* If offset lies within DP register range, use 1st window */
 156	if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
 157		window_start = ATH11K_PCI_WINDOW_START;
 158	/* If offset lies within CE register range, use 2nd window */
 159	else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
 160		 ATH11K_PCI_WINDOW_RANGE_MASK)
 161		window_start = 2 * ATH11K_PCI_WINDOW_START;
 162
 163	return window_start;
 164}
 165
 166static void
 167ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value)
 168{
 169	u32 window_start;
 170
 171	/* WCN6750 uses static window based register access*/
 172	window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
 173
 174	iowrite32(value, ab->mem + window_start +
 175		  (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
 176}
 177
 178static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset)
 179{
 180	u32 window_start;
 181	u32 val;
 182
 183	/* WCN6750 uses static window based register access */
 184	window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
 185
 186	val = ioread32(ab->mem + window_start +
 187		       (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
 188	return val;
 189}
 190
 191static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
 192	.wakeup = NULL,
 193	.release = NULL,
 194	.get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
 195	.window_write32 = ath11k_ahb_window_write32_wcn6750,
 196	.window_read32 = ath11k_ahb_window_read32_wcn6750,
 197};
 198
 199static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
 200{
 201	return ioread32(ab->mem + offset);
 202}
 203
 204static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
 205{
 206	iowrite32(value, ab->mem + offset);
 207}
 208
 209static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
 210{
 211	int i;
 212
 213	for (i = 0; i < ab->hw_params.ce_count; i++) {
 214		struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
 215
 216		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
 217			continue;
 218
 219		tasklet_kill(&ce_pipe->intr_tq);
 220	}
 221}
 222
 223static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
 224{
 225	int i;
 226
 227	for (i = 0; i < irq_grp->num_irq; i++)
 228		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
 229}
 230
 231static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
 232{
 233	int i;
 234
 235	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
 236		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
 237
 238		ath11k_ahb_ext_grp_disable(irq_grp);
 239
 240		if (irq_grp->napi_enabled) {
 241			napi_synchronize(&irq_grp->napi);
 242			napi_disable(&irq_grp->napi);
 243			irq_grp->napi_enabled = false;
 244		}
 245	}
 246}
 247
 248static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
 249{
 250	int i;
 251
 252	for (i = 0; i < irq_grp->num_irq; i++)
 253		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
 254}
 255
 256static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
 257{
 258	u32 val;
 259
 260	val = ath11k_ahb_read32(ab, offset);
 261	ath11k_ahb_write32(ab, offset, val | BIT(bit));
 262}
 263
 264static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
 265{
 266	u32 val;
 267
 268	val = ath11k_ahb_read32(ab, offset);
 269	ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
 270}
 271
 272static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
 273{
 274	const struct ce_attr *ce_attr;
 275	const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
 276	u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
 277
 278	ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
 279	ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
 280	ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
 281
 282	ce_attr = &ab->hw_params.host_ce_config[ce_id];
 283	if (ce_attr->src_nentries)
 284		ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
 285
 286	if (ce_attr->dest_nentries) {
 287		ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
 
 
 
 
 288		ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
 289				    ie3_reg_addr);
 290	}
 291}
 292
 293static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
 294{
 295	const struct ce_attr *ce_attr;
 296	const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
 297	u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
 298
 299	ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
 300	ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
 301	ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
 302
 303	ce_attr = &ab->hw_params.host_ce_config[ce_id];
 304	if (ce_attr->src_nentries)
 305		ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
 306
 307	if (ce_attr->dest_nentries) {
 308		ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
 309		ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
 310				      ie3_reg_addr);
 311	}
 312}
 313
 314static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
 315{
 316	int i;
 317	int irq_idx;
 318
 319	for (i = 0; i < ab->hw_params.ce_count; i++) {
 320		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
 321			continue;
 322
 323		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
 324		synchronize_irq(ab->irq_num[irq_idx]);
 325	}
 326}
 327
 328static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
 329{
 330	int i, j;
 331	int irq_idx;
 332
 333	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
 334		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
 335
 336		for (j = 0; j < irq_grp->num_irq; j++) {
 337			irq_idx = irq_grp->irqs[j];
 338			synchronize_irq(ab->irq_num[irq_idx]);
 339		}
 340	}
 341}
 342
 343static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
 344{
 345	int i;
 346
 347	for (i = 0; i < ab->hw_params.ce_count; i++) {
 348		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
 349			continue;
 350		ath11k_ahb_ce_irq_enable(ab, i);
 351	}
 352}
 353
 354static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
 355{
 356	int i;
 357
 358	for (i = 0; i < ab->hw_params.ce_count; i++) {
 359		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
 360			continue;
 361		ath11k_ahb_ce_irq_disable(ab, i);
 362	}
 363}
 364
 365static int ath11k_ahb_start(struct ath11k_base *ab)
 366{
 367	ath11k_ahb_ce_irqs_enable(ab);
 368	ath11k_ce_rx_post_buf(ab);
 369
 370	return 0;
 371}
 372
 373static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
 374{
 375	int i;
 376
 377	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
 378		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
 379
 380		if (!irq_grp->napi_enabled) {
 381			napi_enable(&irq_grp->napi);
 382			irq_grp->napi_enabled = true;
 383		}
 384		ath11k_ahb_ext_grp_enable(irq_grp);
 385	}
 386}
 387
 388static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
 389{
 390	__ath11k_ahb_ext_irq_disable(ab);
 391	ath11k_ahb_sync_ext_irqs(ab);
 392}
 393
 394static void ath11k_ahb_stop(struct ath11k_base *ab)
 395{
 396	if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
 397		ath11k_ahb_ce_irqs_disable(ab);
 398	ath11k_ahb_sync_ce_irqs(ab);
 399	ath11k_ahb_kill_tasklets(ab);
 400	del_timer_sync(&ab->rx_replenish_retry);
 401	ath11k_ce_cleanup_pipes(ab);
 402}
 403
 404static int ath11k_ahb_power_up(struct ath11k_base *ab)
 405{
 406	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 407	int ret;
 408
 409	ret = rproc_boot(ab_ahb->tgt_rproc);
 410	if (ret)
 411		ath11k_err(ab, "failed to boot the remote processor Q6\n");
 412
 413	return ret;
 414}
 415
 416static void ath11k_ahb_power_down(struct ath11k_base *ab)
 417{
 418	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 419
 420	rproc_shutdown(ab_ahb->tgt_rproc);
 421}
 422
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 423static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
 424{
 425	struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
 426
 427	cfg->tgt_ce_len = ab->hw_params.target_ce_count;
 428	cfg->tgt_ce = ab->hw_params.target_ce_config;
 429	cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
 430	cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
 431	ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
 432}
 433
 434static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
 435{
 436	int i, j;
 437
 438	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
 439		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
 440
 441		for (j = 0; j < irq_grp->num_irq; j++)
 442			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
 443
 444		netif_napi_del(&irq_grp->napi);
 445	}
 446}
 447
 448static void ath11k_ahb_free_irq(struct ath11k_base *ab)
 449{
 450	int irq_idx;
 451	int i;
 452
 453	if (ab->hw_params.hybrid_bus_type)
 454		return ath11k_pcic_free_irq(ab);
 455
 456	for (i = 0; i < ab->hw_params.ce_count; i++) {
 457		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
 458			continue;
 459		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
 460		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
 461	}
 462
 463	ath11k_ahb_free_ext_irq(ab);
 464}
 465
 466static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t)
 467{
 468	struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
 469
 470	ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
 471
 472	ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
 473}
 474
 475static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
 476{
 477	struct ath11k_ce_pipe *ce_pipe = arg;
 478
 479	/* last interrupt received for this CE */
 480	ce_pipe->timestamp = jiffies;
 481
 482	ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
 483
 484	tasklet_schedule(&ce_pipe->intr_tq);
 485
 486	return IRQ_HANDLED;
 487}
 488
 489static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
 490{
 491	struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
 492						struct ath11k_ext_irq_grp,
 493						napi);
 494	struct ath11k_base *ab = irq_grp->ab;
 495	int work_done;
 496
 497	work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
 498	if (work_done < budget) {
 499		napi_complete_done(napi, work_done);
 500		ath11k_ahb_ext_grp_enable(irq_grp);
 501	}
 502
 503	if (work_done > budget)
 504		work_done = budget;
 505
 506	return work_done;
 507}
 508
 509static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
 510{
 511	struct ath11k_ext_irq_grp *irq_grp = arg;
 512
 513	/* last interrupt received for this group */
 514	irq_grp->timestamp = jiffies;
 515
 516	ath11k_ahb_ext_grp_disable(irq_grp);
 517
 518	napi_schedule(&irq_grp->napi);
 519
 520	return IRQ_HANDLED;
 521}
 522
 523static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
 524{
 525	struct ath11k_hw_params *hw = &ab->hw_params;
 526	int i, j;
 527	int irq;
 528	int ret;
 529
 530	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
 531		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
 532		u32 num_irq = 0;
 533
 534		irq_grp->ab = ab;
 535		irq_grp->grp_id = i;
 536		init_dummy_netdev(&irq_grp->napi_ndev);
 537		netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
 538			       ath11k_ahb_ext_grp_napi_poll);
 539
 540		for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
 541			if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
 542				irq_grp->irqs[num_irq++] =
 543					wbm2host_tx_completions_ring1 - j;
 544			}
 545
 546			if (ab->hw_params.ring_mask->rx[i] & BIT(j)) {
 547				irq_grp->irqs[num_irq++] =
 548					reo2host_destination_ring1 - j;
 549			}
 550
 551			if (ab->hw_params.ring_mask->rx_err[i] & BIT(j))
 552				irq_grp->irqs[num_irq++] = reo2host_exception;
 553
 554			if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j))
 555				irq_grp->irqs[num_irq++] = wbm2host_rx_release;
 556
 557			if (ab->hw_params.ring_mask->reo_status[i] & BIT(j))
 558				irq_grp->irqs[num_irq++] = reo2host_status;
 559
 560			if (j < ab->hw_params.max_radios) {
 561				if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) {
 562					irq_grp->irqs[num_irq++] =
 563						rxdma2host_destination_ring_mac1 -
 564						ath11k_hw_get_mac_from_pdev_id(hw, j);
 565				}
 566
 567				if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) {
 568					irq_grp->irqs[num_irq++] =
 569						host2rxdma_host_buf_ring_mac1 -
 570						ath11k_hw_get_mac_from_pdev_id(hw, j);
 571				}
 572
 573				if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) {
 574					irq_grp->irqs[num_irq++] =
 575						ppdu_end_interrupts_mac1 -
 576						ath11k_hw_get_mac_from_pdev_id(hw, j);
 577					irq_grp->irqs[num_irq++] =
 578						rxdma2host_monitor_status_ring_mac1 -
 579						ath11k_hw_get_mac_from_pdev_id(hw, j);
 580				}
 581			}
 582		}
 583		irq_grp->num_irq = num_irq;
 584
 585		for (j = 0; j < irq_grp->num_irq; j++) {
 586			int irq_idx = irq_grp->irqs[j];
 587
 588			irq = platform_get_irq_byname(ab->pdev,
 589						      irq_name[irq_idx]);
 590			ab->irq_num[irq_idx] = irq;
 591			irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
 592			ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
 593					  IRQF_TRIGGER_RISING,
 594					  irq_name[irq_idx], irq_grp);
 595			if (ret) {
 596				ath11k_err(ab, "failed request_irq for %d\n",
 597					   irq);
 598			}
 599		}
 600	}
 601
 602	return 0;
 603}
 604
 605static int ath11k_ahb_config_irq(struct ath11k_base *ab)
 606{
 607	int irq, irq_idx, i;
 608	int ret;
 609
 610	if (ab->hw_params.hybrid_bus_type)
 611		return ath11k_pcic_config_irq(ab);
 612
 613	/* Configure CE irqs */
 614	for (i = 0; i < ab->hw_params.ce_count; i++) {
 615		struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
 616
 617		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
 618			continue;
 619
 620		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
 621
 622		tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet);
 623		irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
 624		ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
 625				  IRQF_TRIGGER_RISING, irq_name[irq_idx],
 626				  ce_pipe);
 627		if (ret)
 628			return ret;
 629
 630		ab->irq_num[irq_idx] = irq;
 631	}
 632
 633	/* Configure external interrupts */
 634	ret = ath11k_ahb_config_ext_irq(ab);
 635
 636	return ret;
 637}
 638
 639static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
 640					  u8 *ul_pipe, u8 *dl_pipe)
 641{
 642	const struct service_to_pipe *entry;
 643	bool ul_set = false, dl_set = false;
 644	int i;
 645
 646	for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
 647		entry = &ab->hw_params.svc_to_ce_map[i];
 648
 649		if (__le32_to_cpu(entry->service_id) != service_id)
 650			continue;
 651
 652		switch (__le32_to_cpu(entry->pipedir)) {
 653		case PIPEDIR_NONE:
 654			break;
 655		case PIPEDIR_IN:
 656			WARN_ON(dl_set);
 657			*dl_pipe = __le32_to_cpu(entry->pipenum);
 658			dl_set = true;
 659			break;
 660		case PIPEDIR_OUT:
 661			WARN_ON(ul_set);
 662			*ul_pipe = __le32_to_cpu(entry->pipenum);
 663			ul_set = true;
 664			break;
 665		case PIPEDIR_INOUT:
 666			WARN_ON(dl_set);
 667			WARN_ON(ul_set);
 668			*dl_pipe = __le32_to_cpu(entry->pipenum);
 669			*ul_pipe = __le32_to_cpu(entry->pipenum);
 670			dl_set = true;
 671			ul_set = true;
 672			break;
 673		}
 674	}
 675
 676	if (WARN_ON(!ul_set || !dl_set))
 677		return -ENOENT;
 678
 679	return 0;
 680}
 681
 682static int ath11k_ahb_hif_suspend(struct ath11k_base *ab)
 683{
 684	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 685	u32 wake_irq;
 686	u32 value = 0;
 687	int ret;
 688
 689	if (!device_may_wakeup(ab->dev))
 690		return -EPERM;
 691
 692	wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
 693
 694	ret = enable_irq_wake(wake_irq);
 695	if (ret) {
 696		ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret);
 697		return ret;
 698	}
 699
 700	value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
 701				ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
 702	value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER,
 703				 ATH11K_AHB_SMP2P_SMEM_MSG);
 704
 705	ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
 706					  ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
 707	if (ret) {
 708		ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
 709		return ret;
 710	}
 711
 712	ath11k_dbg(ab, ATH11K_DBG_AHB, "device suspended\n");
 713
 714	return ret;
 715}
 716
 717static int ath11k_ahb_hif_resume(struct ath11k_base *ab)
 718{
 719	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 720	u32 wake_irq;
 721	u32 value = 0;
 722	int ret;
 723
 724	if (!device_may_wakeup(ab->dev))
 725		return -EPERM;
 726
 727	wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
 728
 729	ret = disable_irq_wake(wake_irq);
 730	if (ret) {
 731		ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret);
 732		return ret;
 733	}
 734
 735	reinit_completion(&ab->wow.wakeup_completed);
 736
 737	value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
 738				ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
 739	value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT,
 740				 ATH11K_AHB_SMP2P_SMEM_MSG);
 741
 742	ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
 743					  ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
 744	if (ret) {
 745		ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
 746		return ret;
 747	}
 748
 749	ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
 750	if (ret == 0) {
 751		ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
 752		return -ETIMEDOUT;
 753	}
 754
 755	ath11k_dbg(ab, ATH11K_DBG_AHB, "device resumed\n");
 756
 757	return 0;
 758}
 759
 760static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
 761	.start = ath11k_ahb_start,
 762	.stop = ath11k_ahb_stop,
 763	.read32 = ath11k_ahb_read32,
 764	.write32 = ath11k_ahb_write32,
 765	.read = NULL,
 766	.irq_enable = ath11k_ahb_ext_irq_enable,
 767	.irq_disable = ath11k_ahb_ext_irq_disable,
 768	.map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
 769	.power_down = ath11k_ahb_power_down,
 770	.power_up = ath11k_ahb_power_up,
 771};
 772
 773static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
 774	.start = ath11k_pcic_start,
 775	.stop = ath11k_pcic_stop,
 776	.read32 = ath11k_pcic_read32,
 777	.write32 = ath11k_pcic_write32,
 778	.read = NULL,
 779	.irq_enable = ath11k_pcic_ext_irq_enable,
 780	.irq_disable = ath11k_pcic_ext_irq_disable,
 781	.get_msi_address =  ath11k_pcic_get_msi_address,
 782	.get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
 783	.map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
 784	.power_down = ath11k_ahb_power_down,
 785	.power_up = ath11k_ahb_power_up,
 786	.suspend = ath11k_ahb_hif_suspend,
 787	.resume = ath11k_ahb_hif_resume,
 788	.ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq,
 789	.ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq,
 790};
 791
 792static int ath11k_core_get_rproc(struct ath11k_base *ab)
 793{
 794	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 795	struct device *dev = ab->dev;
 796	struct rproc *prproc;
 797	phandle rproc_phandle;
 798
 799	if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
 800		ath11k_err(ab, "failed to get q6_rproc handle\n");
 801		return -ENOENT;
 802	}
 803
 804	prproc = rproc_get_by_phandle(rproc_phandle);
 805	if (!prproc) {
 806		ath11k_dbg(ab, ATH11K_DBG_AHB, "failed to get rproc, deferring\n");
 807		return -EPROBE_DEFER;
 808	}
 809	ab_ahb->tgt_rproc = prproc;
 810
 811	return 0;
 812}
 813
 814static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
 815{
 816	struct platform_device *pdev = ab->pdev;
 817	phys_addr_t msi_addr_pa;
 818	dma_addr_t msi_addr_iova;
 819	struct resource *res;
 820	int int_prop;
 821	int ret;
 822	int i;
 823
 824	ret = ath11k_pcic_init_msi_config(ab);
 825	if (ret) {
 826		ath11k_err(ab, "failed to init msi config: %d\n", ret);
 827		return ret;
 828	}
 829
 830	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 831	if (!res) {
 832		ath11k_err(ab, "failed to fetch msi_addr\n");
 833		return -ENOENT;
 834	}
 835
 836	msi_addr_pa = res->start;
 837	msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
 838					 DMA_FROM_DEVICE, 0);
 839	if (dma_mapping_error(ab->dev, msi_addr_iova))
 840		return -ENOMEM;
 841
 842	ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova);
 843	ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova);
 844
 845	ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop);
 846	if (ret)
 847		return ret;
 848
 849	ab->pci.msi.ep_base_data = int_prop + 32;
 850
 851	for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
 852		ret = platform_get_irq(pdev, i);
 853		if (ret < 0)
 854			return ret;
 855
 856		ab->pci.msi.irqs[i] = ret;
 857	}
 858
 859	set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
 860
 861	return 0;
 862}
 863
 864static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab)
 865{
 866	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 867
 868	if (!ab->hw_params.smp2p_wow_exit)
 869		return 0;
 870
 871	ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out",
 872							    &ab_ahb->smp2p_info.smem_bit);
 873	if (IS_ERR(ab_ahb->smp2p_info.smem_state)) {
 874		ath11k_err(ab, "failed to fetch smem state: %ld\n",
 875			   PTR_ERR(ab_ahb->smp2p_info.smem_state));
 876		return PTR_ERR(ab_ahb->smp2p_info.smem_state);
 877	}
 878
 879	return 0;
 880}
 881
 882static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab)
 883{
 884	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 885
 886	if (!ab->hw_params.smp2p_wow_exit)
 887		return;
 888
 889	qcom_smem_state_put(ab_ahb->smp2p_info.smem_state);
 890}
 891
 892static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
 893{
 894	struct platform_device *pdev = ab->pdev;
 
 895	struct resource *mem_res;
 896	void __iomem *mem;
 
 897
 898	if (ab->hw_params.hybrid_bus_type)
 899		return ath11k_ahb_setup_msi_resources(ab);
 
 
 
 900
 901	mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
 902	if (IS_ERR(mem)) {
 903		dev_err(&pdev->dev, "ioremap error\n");
 904		return PTR_ERR(mem);
 905	}
 906
 907	ab->mem = mem;
 908	ab->mem_len = resource_size(mem_res);
 909
 910	return 0;
 911}
 912
 913static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
 914{
 915	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 916	struct device *dev = ab->dev;
 917	struct device_node *node;
 918	struct resource r;
 919	int ret;
 920
 921	node = of_parse_phandle(dev->of_node, "memory-region", 0);
 922	if (!node)
 923		return -ENOENT;
 924
 925	ret = of_address_to_resource(node, 0, &r);
 926	of_node_put(node);
 927	if (ret) {
 928		dev_err(dev, "failed to resolve msa fixed region\n");
 929		return ret;
 930	}
 931
 932	ab_ahb->fw.msa_paddr = r.start;
 933	ab_ahb->fw.msa_size = resource_size(&r);
 934
 935	node = of_parse_phandle(dev->of_node, "memory-region", 1);
 936	if (!node)
 937		return -ENOENT;
 938
 939	ret = of_address_to_resource(node, 0, &r);
 940	of_node_put(node);
 941	if (ret) {
 942		dev_err(dev, "failed to resolve ce fixed region\n");
 943		return ret;
 944	}
 945
 946	ab_ahb->fw.ce_paddr = r.start;
 947	ab_ahb->fw.ce_size = resource_size(&r);
 948
 949	return 0;
 950}
 951
 952static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
 953{
 954	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 955	struct device *host_dev = ab->dev;
 956	struct platform_device_info info = {0};
 957	struct iommu_domain *iommu_dom;
 958	struct platform_device *pdev;
 959	struct device_node *node;
 960	int ret;
 961
 962	/* Chipsets not requiring MSA need not initialize
 963	 * MSA resources, return success in such cases.
 964	 */
 965	if (!ab->hw_params.fixed_fw_mem)
 966		return 0;
 967
 968	ret = ath11k_ahb_setup_msa_resources(ab);
 969	if (ret) {
 970		ath11k_err(ab, "failed to setup msa resources\n");
 971		return ret;
 972	}
 973
 974	node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
 975	if (!node) {
 976		ab_ahb->fw.use_tz = true;
 977		return 0;
 978	}
 979
 980	info.fwnode = &node->fwnode;
 981	info.parent = host_dev;
 982	info.name = node->name;
 983	info.dma_mask = DMA_BIT_MASK(32);
 984
 985	pdev = platform_device_register_full(&info);
 986	if (IS_ERR(pdev)) {
 987		of_node_put(node);
 988		return PTR_ERR(pdev);
 989	}
 990
 991	ret = of_dma_configure(&pdev->dev, node, true);
 992	if (ret) {
 993		ath11k_err(ab, "dma configure fail: %d\n", ret);
 994		goto err_unregister;
 995	}
 996
 997	ab_ahb->fw.dev = &pdev->dev;
 998
 999	iommu_dom = iommu_domain_alloc(&platform_bus_type);
1000	if (!iommu_dom) {
1001		ath11k_err(ab, "failed to allocate iommu domain\n");
1002		ret = -ENOMEM;
1003		goto err_unregister;
1004	}
1005
1006	ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev);
1007	if (ret) {
1008		ath11k_err(ab, "could not attach device: %d\n", ret);
1009		goto err_iommu_free;
1010	}
1011
1012	ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
1013			ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
1014			IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1015	if (ret) {
1016		ath11k_err(ab, "failed to map firmware region: %d\n", ret);
1017		goto err_iommu_detach;
1018	}
1019
1020	ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
1021			ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
1022			IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1023	if (ret) {
1024		ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
1025		goto err_iommu_unmap;
1026	}
1027
1028	ab_ahb->fw.use_tz = false;
1029	ab_ahb->fw.iommu_domain = iommu_dom;
1030	of_node_put(node);
1031
1032	return 0;
1033
1034err_iommu_unmap:
1035	iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
1036
1037err_iommu_detach:
1038	iommu_detach_device(iommu_dom, ab_ahb->fw.dev);
1039
1040err_iommu_free:
1041	iommu_domain_free(iommu_dom);
1042
1043err_unregister:
1044	platform_device_unregister(pdev);
1045	of_node_put(node);
1046
1047	return ret;
1048}
1049
1050static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
1051{
1052	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
1053	struct iommu_domain *iommu;
1054	size_t unmapped_size;
1055
1056	/* Chipsets not requiring MSA would have not initialized
1057	 * MSA resources, return success in such cases.
1058	 */
1059	if (!ab->hw_params.fixed_fw_mem)
1060		return 0;
1061
1062	if (ab_ahb->fw.use_tz)
1063		return 0;
1064
1065	iommu = ab_ahb->fw.iommu_domain;
1066
1067	unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
1068	if (unmapped_size != ab_ahb->fw.msa_size)
1069		ath11k_err(ab, "failed to unmap firmware: %zu\n",
1070			   unmapped_size);
1071
1072	unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size);
1073	if (unmapped_size != ab_ahb->fw.ce_size)
1074		ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n",
1075			   unmapped_size);
1076
1077	iommu_detach_device(iommu, ab_ahb->fw.dev);
1078	iommu_domain_free(iommu);
1079
1080	platform_device_unregister(to_platform_device(ab_ahb->fw.dev));
1081
1082	return 0;
1083}
1084
1085static int ath11k_ahb_probe(struct platform_device *pdev)
1086{
1087	struct ath11k_base *ab;
1088	const struct ath11k_hif_ops *hif_ops;
1089	const struct ath11k_pci_ops *pci_ops;
1090	enum ath11k_hw_rev hw_rev;
1091	int ret;
1092
1093	hw_rev = (uintptr_t)device_get_match_data(&pdev->dev);
1094
1095	switch (hw_rev) {
1096	case ATH11K_HW_IPQ8074:
1097	case ATH11K_HW_IPQ6018_HW10:
1098	case ATH11K_HW_IPQ5018_HW10:
1099		hif_ops = &ath11k_ahb_hif_ops_ipq8074;
1100		pci_ops = NULL;
1101		break;
1102	case ATH11K_HW_WCN6750_HW10:
1103		hif_ops = &ath11k_ahb_hif_ops_wcn6750;
1104		pci_ops = &ath11k_ahb_pci_ops_wcn6750;
1105		break;
1106	default:
1107		dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev);
1108		return -EOPNOTSUPP;
1109	}
1110
1111	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1112	if (ret) {
1113		dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
1114		return ret;
1115	}
1116
1117	ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
1118			       ATH11K_BUS_AHB);
 
1119	if (!ab) {
1120		dev_err(&pdev->dev, "failed to allocate ath11k base\n");
1121		return -ENOMEM;
1122	}
1123
1124	ab->hif.ops = hif_ops;
1125	ab->pdev = pdev;
1126	ab->hw_rev = hw_rev;
1127	ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
 
1128	platform_set_drvdata(pdev, ab);
1129
1130	ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
1131	if (ret) {
1132		ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
1133		goto err_core_free;
1134	}
1135
1136	ret = ath11k_core_pre_init(ab);
1137	if (ret)
1138		goto err_core_free;
1139
1140	ret = ath11k_ahb_setup_resources(ab);
1141	if (ret)
1142		goto err_core_free;
1143
1144	ab->mem_ce = ab->mem;
1145
1146	if (ab->hw_params.ce_remap) {
1147		const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
1148		/* ce register space is moved out of wcss unlike ipq8074 or ipq6018
1149		 * and the space is not contiguous, hence remapping the CE registers
1150		 * to a new space for accessing them.
1151		 */
1152		ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
1153		if (!ab->mem_ce) {
1154			dev_err(&pdev->dev, "ce ioremap error\n");
1155			ret = -ENOMEM;
1156			goto err_core_free;
1157		}
1158	}
1159
1160	ret = ath11k_ahb_fw_resources_init(ab);
1161	if (ret)
1162		goto err_core_free;
1163
1164	ret = ath11k_ahb_setup_smp2p_handle(ab);
1165	if (ret)
1166		goto err_fw_deinit;
1167
1168	ret = ath11k_hal_srng_init(ab);
1169	if (ret)
1170		goto err_release_smp2p_handle;
1171
1172	ret = ath11k_ce_alloc_pipes(ab);
1173	if (ret) {
1174		ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1175		goto err_hal_srng_deinit;
1176	}
1177
1178	ath11k_ahb_init_qmi_ce_config(ab);
1179
1180	ret = ath11k_core_get_rproc(ab);
1181	if (ret) {
1182		ath11k_err(ab, "failed to get rproc: %d\n", ret);
1183		goto err_ce_free;
1184	}
1185
1186	ret = ath11k_core_init(ab);
1187	if (ret) {
1188		ath11k_err(ab, "failed to init core: %d\n", ret);
1189		goto err_ce_free;
1190	}
1191
1192	ret = ath11k_ahb_config_irq(ab);
1193	if (ret) {
1194		ath11k_err(ab, "failed to configure irq: %d\n", ret);
1195		goto err_ce_free;
1196	}
1197
1198	ath11k_qmi_fwreset_from_cold_boot(ab);
1199
1200	return 0;
1201
1202err_ce_free:
1203	ath11k_ce_free_pipes(ab);
1204
1205err_hal_srng_deinit:
1206	ath11k_hal_srng_deinit(ab);
1207
1208err_release_smp2p_handle:
1209	ath11k_ahb_release_smp2p_handle(ab);
1210
1211err_fw_deinit:
1212	ath11k_ahb_fw_resource_deinit(ab);
1213
1214err_core_free:
1215	ath11k_core_free(ab);
1216	platform_set_drvdata(pdev, NULL);
1217
1218	return ret;
1219}
1220
1221static void ath11k_ahb_remove_prepare(struct ath11k_base *ab)
1222{
 
1223	unsigned long left;
1224
 
 
 
 
 
 
 
 
 
1225	if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
1226		left = wait_for_completion_timeout(&ab->driver_recovery,
1227						   ATH11K_AHB_RECOVERY_TIMEOUT);
1228		if (!left)
1229			ath11k_warn(ab, "failed to receive recovery response completion\n");
1230	}
1231
1232	set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
1233	cancel_work_sync(&ab->restart_work);
1234	cancel_work_sync(&ab->qmi.event_work);
1235}
1236
1237static void ath11k_ahb_free_resources(struct ath11k_base *ab)
1238{
1239	struct platform_device *pdev = ab->pdev;
1240
 
 
1241	ath11k_ahb_free_irq(ab);
1242	ath11k_hal_srng_deinit(ab);
1243	ath11k_ahb_release_smp2p_handle(ab);
1244	ath11k_ahb_fw_resource_deinit(ab);
1245	ath11k_ce_free_pipes(ab);
1246
1247	if (ab->hw_params.ce_remap)
1248		iounmap(ab->mem_ce);
1249
1250	ath11k_core_free(ab);
1251	platform_set_drvdata(pdev, NULL);
1252}
1253
1254static void ath11k_ahb_remove(struct platform_device *pdev)
1255{
1256	struct ath11k_base *ab = platform_get_drvdata(pdev);
1257
1258	if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1259		ath11k_ahb_power_down(ab);
1260		ath11k_debugfs_soc_destroy(ab);
1261		ath11k_qmi_deinit_service(ab);
1262		goto qmi_fail;
1263	}
1264
1265	ath11k_ahb_remove_prepare(ab);
1266	ath11k_core_deinit(ab);
1267
1268qmi_fail:
1269	ath11k_ahb_free_resources(ab);
1270}
1271
1272static void ath11k_ahb_shutdown(struct platform_device *pdev)
1273{
1274	struct ath11k_base *ab = platform_get_drvdata(pdev);
1275
1276	/* platform shutdown() & remove() are mutually exclusive.
1277	 * remove() is invoked during rmmod & shutdown() during
1278	 * system reboot/shutdown.
1279	 */
1280	ath11k_ahb_remove_prepare(ab);
1281
1282	if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)))
1283		goto free_resources;
1284
1285	ath11k_core_deinit(ab);
1286
1287free_resources:
1288	ath11k_ahb_free_resources(ab);
1289}
1290
1291static struct platform_driver ath11k_ahb_driver = {
1292	.driver         = {
1293		.name   = "ath11k",
1294		.of_match_table = ath11k_ahb_of_match,
1295	},
1296	.probe  = ath11k_ahb_probe,
1297	.remove_new = ath11k_ahb_remove,
1298	.shutdown = ath11k_ahb_shutdown,
1299};
1300
1301module_platform_driver(ath11k_ahb_driver);
 
 
 
 
 
 
 
 
 
 
1302
1303MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices");
1304MODULE_LICENSE("Dual BSD/GPL");
v5.14.15
  1// SPDX-License-Identifier: BSD-3-Clause-Clear
  2/*
  3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
 
  4 */
  5
  6#include <linux/module.h>
  7#include <linux/platform_device.h>
 
  8#include <linux/of_device.h>
  9#include <linux/of.h>
 10#include <linux/dma-mapping.h>
 
 
 11#include "ahb.h"
 12#include "debug.h"
 13#include "hif.h"
 
 14#include <linux/remoteproc.h>
 
 
 
 15
 16static const struct of_device_id ath11k_ahb_of_match[] = {
 17	/* TODO: Should we change the compatible string to something similar
 18	 * to one that ath10k uses?
 19	 */
 20	{ .compatible = "qcom,ipq8074-wifi",
 21	  .data = (void *)ATH11K_HW_IPQ8074,
 22	},
 23	{ .compatible = "qcom,ipq6018-wifi",
 24	  .data = (void *)ATH11K_HW_IPQ6018_HW10,
 25	},
 
 
 
 
 
 
 26	{ }
 27};
 28
 29MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
 30
 31static const struct ath11k_bus_params ath11k_ahb_bus_params = {
 32	.mhi_support = false,
 33	.m3_fw_support = false,
 34	.fixed_bdf_addr = true,
 35	.fixed_mem_region = true,
 36};
 37
 38#define ATH11K_IRQ_CE0_OFFSET 4
 39
 40static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
 41	"misc-pulse1",
 42	"misc-latch",
 43	"sw-exception",
 44	"watchdog",
 45	"ce0",
 46	"ce1",
 47	"ce2",
 48	"ce3",
 49	"ce4",
 50	"ce5",
 51	"ce6",
 52	"ce7",
 53	"ce8",
 54	"ce9",
 55	"ce10",
 56	"ce11",
 57	"host2wbm-desc-feed",
 58	"host2reo-re-injection",
 59	"host2reo-command",
 60	"host2rxdma-monitor-ring3",
 61	"host2rxdma-monitor-ring2",
 62	"host2rxdma-monitor-ring1",
 63	"reo2ost-exception",
 64	"wbm2host-rx-release",
 65	"reo2host-status",
 66	"reo2host-destination-ring4",
 67	"reo2host-destination-ring3",
 68	"reo2host-destination-ring2",
 69	"reo2host-destination-ring1",
 70	"rxdma2host-monitor-destination-mac3",
 71	"rxdma2host-monitor-destination-mac2",
 72	"rxdma2host-monitor-destination-mac1",
 73	"ppdu-end-interrupts-mac3",
 74	"ppdu-end-interrupts-mac2",
 75	"ppdu-end-interrupts-mac1",
 76	"rxdma2host-monitor-status-ring-mac3",
 77	"rxdma2host-monitor-status-ring-mac2",
 78	"rxdma2host-monitor-status-ring-mac1",
 79	"host2rxdma-host-buf-ring-mac3",
 80	"host2rxdma-host-buf-ring-mac2",
 81	"host2rxdma-host-buf-ring-mac1",
 82	"rxdma2host-destination-ring-mac3",
 83	"rxdma2host-destination-ring-mac2",
 84	"rxdma2host-destination-ring-mac1",
 85	"host2tcl-input-ring4",
 86	"host2tcl-input-ring3",
 87	"host2tcl-input-ring2",
 88	"host2tcl-input-ring1",
 89	"wbm2host-tx-completions-ring3",
 90	"wbm2host-tx-completions-ring2",
 91	"wbm2host-tx-completions-ring1",
 92	"tcl2host-status-ring",
 93};
 94
 95/* enum ext_irq_num - irq numbers that can be used by external modules
 96 * like datapath
 97 */
 98enum ext_irq_num {
 99	host2wbm_desc_feed = 16,
100	host2reo_re_injection,
101	host2reo_command,
102	host2rxdma_monitor_ring3,
103	host2rxdma_monitor_ring2,
104	host2rxdma_monitor_ring1,
105	reo2host_exception,
106	wbm2host_rx_release,
107	reo2host_status,
108	reo2host_destination_ring4,
109	reo2host_destination_ring3,
110	reo2host_destination_ring2,
111	reo2host_destination_ring1,
112	rxdma2host_monitor_destination_mac3,
113	rxdma2host_monitor_destination_mac2,
114	rxdma2host_monitor_destination_mac1,
115	ppdu_end_interrupts_mac3,
116	ppdu_end_interrupts_mac2,
117	ppdu_end_interrupts_mac1,
118	rxdma2host_monitor_status_ring_mac3,
119	rxdma2host_monitor_status_ring_mac2,
120	rxdma2host_monitor_status_ring_mac1,
121	host2rxdma_host_buf_ring_mac3,
122	host2rxdma_host_buf_ring_mac2,
123	host2rxdma_host_buf_ring_mac1,
124	rxdma2host_destination_ring_mac3,
125	rxdma2host_destination_ring_mac2,
126	rxdma2host_destination_ring_mac1,
127	host2tcl_input_ring4,
128	host2tcl_input_ring3,
129	host2tcl_input_ring2,
130	host2tcl_input_ring1,
131	wbm2host_tx_completions_ring3,
132	wbm2host_tx_completions_ring2,
133	wbm2host_tx_completions_ring1,
134	tcl2host_status_ring,
135};
136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
138{
139	return ioread32(ab->mem + offset);
140}
141
142static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
143{
144	iowrite32(value, ab->mem + offset);
145}
146
147static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
148{
149	int i;
150
151	for (i = 0; i < ab->hw_params.ce_count; i++) {
152		struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
153
154		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
155			continue;
156
157		tasklet_kill(&ce_pipe->intr_tq);
158	}
159}
160
161static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
162{
163	int i;
164
165	for (i = 0; i < irq_grp->num_irq; i++)
166		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
167}
168
169static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
170{
171	int i;
172
173	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
174		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
175
176		ath11k_ahb_ext_grp_disable(irq_grp);
177
178		napi_synchronize(&irq_grp->napi);
179		napi_disable(&irq_grp->napi);
 
 
 
180	}
181}
182
183static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
184{
185	int i;
186
187	for (i = 0; i < irq_grp->num_irq; i++)
188		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
189}
190
191static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
192{
193	u32 val;
194
195	val = ath11k_ahb_read32(ab, offset);
196	ath11k_ahb_write32(ab, offset, val | BIT(bit));
197}
198
199static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
200{
201	u32 val;
202
203	val = ath11k_ahb_read32(ab, offset);
204	ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
205}
206
207static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
208{
209	const struct ce_pipe_config *ce_config;
 
 
 
 
 
 
 
 
 
 
210
211	ce_config = &ab->hw_params.target_ce_config[ce_id];
212	if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT)
213		ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
214
215	if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) {
216		ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
217		ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
218				    CE_HOST_IE_3_ADDRESS);
219	}
220}
221
222static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
223{
224	const struct ce_pipe_config *ce_config;
225
226	ce_config = &ab->hw_params.target_ce_config[ce_id];
227	if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT)
228		ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
 
 
 
 
 
 
229
230	if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) {
231		ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
232		ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
233				      CE_HOST_IE_3_ADDRESS);
234	}
235}
236
237static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
238{
239	int i;
240	int irq_idx;
241
242	for (i = 0; i < ab->hw_params.ce_count; i++) {
243		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
244			continue;
245
246		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
247		synchronize_irq(ab->irq_num[irq_idx]);
248	}
249}
250
251static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
252{
253	int i, j;
254	int irq_idx;
255
256	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
257		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
258
259		for (j = 0; j < irq_grp->num_irq; j++) {
260			irq_idx = irq_grp->irqs[j];
261			synchronize_irq(ab->irq_num[irq_idx]);
262		}
263	}
264}
265
266static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
267{
268	int i;
269
270	for (i = 0; i < ab->hw_params.ce_count; i++) {
271		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
272			continue;
273		ath11k_ahb_ce_irq_enable(ab, i);
274	}
275}
276
277static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
278{
279	int i;
280
281	for (i = 0; i < ab->hw_params.ce_count; i++) {
282		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
283			continue;
284		ath11k_ahb_ce_irq_disable(ab, i);
285	}
286}
287
288static int ath11k_ahb_start(struct ath11k_base *ab)
289{
290	ath11k_ahb_ce_irqs_enable(ab);
291	ath11k_ce_rx_post_buf(ab);
292
293	return 0;
294}
295
296static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
297{
298	int i;
299
300	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
301		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
302
303		napi_enable(&irq_grp->napi);
 
 
 
304		ath11k_ahb_ext_grp_enable(irq_grp);
305	}
306}
307
308static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
309{
310	__ath11k_ahb_ext_irq_disable(ab);
311	ath11k_ahb_sync_ext_irqs(ab);
312}
313
314static void ath11k_ahb_stop(struct ath11k_base *ab)
315{
316	if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
317		ath11k_ahb_ce_irqs_disable(ab);
318	ath11k_ahb_sync_ce_irqs(ab);
319	ath11k_ahb_kill_tasklets(ab);
320	del_timer_sync(&ab->rx_replenish_retry);
321	ath11k_ce_cleanup_pipes(ab);
322}
323
324static int ath11k_ahb_power_up(struct ath11k_base *ab)
325{
326	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
327	int ret;
328
329	ret = rproc_boot(ab_ahb->tgt_rproc);
330	if (ret)
331		ath11k_err(ab, "failed to boot the remote processor Q6\n");
332
333	return ret;
334}
335
336static void ath11k_ahb_power_down(struct ath11k_base *ab)
337{
338	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
339
340	rproc_shutdown(ab_ahb->tgt_rproc);
341}
342
343static int ath11k_ahb_fwreset_from_cold_boot(struct ath11k_base *ab)
344{
345	int timeout;
346
347	if (ath11k_cold_boot_cal == 0 || ab->qmi.cal_done ||
348	    ab->hw_params.cold_boot_calib == 0)
349		return 0;
350
351	ath11k_dbg(ab, ATH11K_DBG_AHB, "wait for cold boot done\n");
352	timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
353				     (ab->qmi.cal_done  == 1),
354				     ATH11K_COLD_BOOT_FW_RESET_DELAY);
355	if (timeout <= 0) {
356		ath11k_cold_boot_cal = 0;
357		ath11k_warn(ab, "Coldboot Calibration failed timed out\n");
358	}
359
360	/* reset the firmware */
361	ath11k_ahb_power_down(ab);
362	ath11k_ahb_power_up(ab);
363
364	ath11k_dbg(ab, ATH11K_DBG_AHB, "exited from cold boot mode\n");
365	return 0;
366}
367
368static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
369{
370	struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
371
372	cfg->tgt_ce_len = ab->hw_params.target_ce_count;
373	cfg->tgt_ce = ab->hw_params.target_ce_config;
374	cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
375	cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
376	ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
377}
378
379static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
380{
381	int i, j;
382
383	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
384		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
385
386		for (j = 0; j < irq_grp->num_irq; j++)
387			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
 
 
388	}
389}
390
391static void ath11k_ahb_free_irq(struct ath11k_base *ab)
392{
393	int irq_idx;
394	int i;
395
 
 
 
396	for (i = 0; i < ab->hw_params.ce_count; i++) {
397		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
398			continue;
399		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
400		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
401	}
402
403	ath11k_ahb_free_ext_irq(ab);
404}
405
406static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t)
407{
408	struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
409
410	ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
411
412	ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
413}
414
415static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
416{
417	struct ath11k_ce_pipe *ce_pipe = arg;
418
419	/* last interrupt received for this CE */
420	ce_pipe->timestamp = jiffies;
421
422	ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
423
424	tasklet_schedule(&ce_pipe->intr_tq);
425
426	return IRQ_HANDLED;
427}
428
429static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
430{
431	struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
432						struct ath11k_ext_irq_grp,
433						napi);
434	struct ath11k_base *ab = irq_grp->ab;
435	int work_done;
436
437	work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
438	if (work_done < budget) {
439		napi_complete_done(napi, work_done);
440		ath11k_ahb_ext_grp_enable(irq_grp);
441	}
442
443	if (work_done > budget)
444		work_done = budget;
445
446	return work_done;
447}
448
449static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
450{
451	struct ath11k_ext_irq_grp *irq_grp = arg;
452
453	/* last interrupt received for this group */
454	irq_grp->timestamp = jiffies;
455
456	ath11k_ahb_ext_grp_disable(irq_grp);
457
458	napi_schedule(&irq_grp->napi);
459
460	return IRQ_HANDLED;
461}
462
463static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
464{
465	struct ath11k_hw_params *hw = &ab->hw_params;
466	int i, j;
467	int irq;
468	int ret;
469
470	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
471		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
472		u32 num_irq = 0;
473
474		irq_grp->ab = ab;
475		irq_grp->grp_id = i;
476		init_dummy_netdev(&irq_grp->napi_ndev);
477		netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
478			       ath11k_ahb_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
479
480		for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
481			if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
482				irq_grp->irqs[num_irq++] =
483					wbm2host_tx_completions_ring1 - j;
484			}
485
486			if (ab->hw_params.ring_mask->rx[i] & BIT(j)) {
487				irq_grp->irqs[num_irq++] =
488					reo2host_destination_ring1 - j;
489			}
490
491			if (ab->hw_params.ring_mask->rx_err[i] & BIT(j))
492				irq_grp->irqs[num_irq++] = reo2host_exception;
493
494			if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j))
495				irq_grp->irqs[num_irq++] = wbm2host_rx_release;
496
497			if (ab->hw_params.ring_mask->reo_status[i] & BIT(j))
498				irq_grp->irqs[num_irq++] = reo2host_status;
499
500			if (j < ab->hw_params.max_radios) {
501				if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) {
502					irq_grp->irqs[num_irq++] =
503						rxdma2host_destination_ring_mac1 -
504						ath11k_hw_get_mac_from_pdev_id(hw, j);
505				}
506
507				if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) {
508					irq_grp->irqs[num_irq++] =
509						host2rxdma_host_buf_ring_mac1 -
510						ath11k_hw_get_mac_from_pdev_id(hw, j);
511				}
512
513				if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) {
514					irq_grp->irqs[num_irq++] =
515						ppdu_end_interrupts_mac1 -
516						ath11k_hw_get_mac_from_pdev_id(hw, j);
517					irq_grp->irqs[num_irq++] =
518						rxdma2host_monitor_status_ring_mac1 -
519						ath11k_hw_get_mac_from_pdev_id(hw, j);
520				}
521			}
522		}
523		irq_grp->num_irq = num_irq;
524
525		for (j = 0; j < irq_grp->num_irq; j++) {
526			int irq_idx = irq_grp->irqs[j];
527
528			irq = platform_get_irq_byname(ab->pdev,
529						      irq_name[irq_idx]);
530			ab->irq_num[irq_idx] = irq;
531			irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
532			ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
533					  IRQF_TRIGGER_RISING,
534					  irq_name[irq_idx], irq_grp);
535			if (ret) {
536				ath11k_err(ab, "failed request_irq for %d\n",
537					   irq);
538			}
539		}
540	}
541
542	return 0;
543}
544
545static int ath11k_ahb_config_irq(struct ath11k_base *ab)
546{
547	int irq, irq_idx, i;
548	int ret;
549
 
 
 
550	/* Configure CE irqs */
551	for (i = 0; i < ab->hw_params.ce_count; i++) {
552		struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
553
554		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
555			continue;
556
557		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
558
559		tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet);
560		irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
561		ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
562				  IRQF_TRIGGER_RISING, irq_name[irq_idx],
563				  ce_pipe);
564		if (ret)
565			return ret;
566
567		ab->irq_num[irq_idx] = irq;
568	}
569
570	/* Configure external interrupts */
571	ret = ath11k_ahb_ext_irq_config(ab);
572
573	return ret;
574}
575
576static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
577					  u8 *ul_pipe, u8 *dl_pipe)
578{
579	const struct service_to_pipe *entry;
580	bool ul_set = false, dl_set = false;
581	int i;
582
583	for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
584		entry = &ab->hw_params.svc_to_ce_map[i];
585
586		if (__le32_to_cpu(entry->service_id) != service_id)
587			continue;
588
589		switch (__le32_to_cpu(entry->pipedir)) {
590		case PIPEDIR_NONE:
591			break;
592		case PIPEDIR_IN:
593			WARN_ON(dl_set);
594			*dl_pipe = __le32_to_cpu(entry->pipenum);
595			dl_set = true;
596			break;
597		case PIPEDIR_OUT:
598			WARN_ON(ul_set);
599			*ul_pipe = __le32_to_cpu(entry->pipenum);
600			ul_set = true;
601			break;
602		case PIPEDIR_INOUT:
603			WARN_ON(dl_set);
604			WARN_ON(ul_set);
605			*dl_pipe = __le32_to_cpu(entry->pipenum);
606			*ul_pipe = __le32_to_cpu(entry->pipenum);
607			dl_set = true;
608			ul_set = true;
609			break;
610		}
611	}
612
613	if (WARN_ON(!ul_set || !dl_set))
614		return -ENOENT;
615
616	return 0;
617}
618
619static const struct ath11k_hif_ops ath11k_ahb_hif_ops = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
620	.start = ath11k_ahb_start,
621	.stop = ath11k_ahb_stop,
622	.read32 = ath11k_ahb_read32,
623	.write32 = ath11k_ahb_write32,
 
624	.irq_enable = ath11k_ahb_ext_irq_enable,
625	.irq_disable = ath11k_ahb_ext_irq_disable,
626	.map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
627	.power_down = ath11k_ahb_power_down,
628	.power_up = ath11k_ahb_power_up,
629};
630
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
631static int ath11k_core_get_rproc(struct ath11k_base *ab)
632{
633	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
634	struct device *dev = ab->dev;
635	struct rproc *prproc;
636	phandle rproc_phandle;
637
638	if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
639		ath11k_err(ab, "failed to get q6_rproc handle\n");
640		return -ENOENT;
641	}
642
643	prproc = rproc_get_by_phandle(rproc_phandle);
644	if (!prproc) {
645		ath11k_err(ab, "failed to get rproc\n");
646		return -EINVAL;
647	}
648	ab_ahb->tgt_rproc = prproc;
649
650	return 0;
651}
652
653static int ath11k_ahb_probe(struct platform_device *pdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
654{
655	struct ath11k_base *ab;
656	const struct of_device_id *of_id;
657	struct resource *mem_res;
658	void __iomem *mem;
659	int ret;
660
661	of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev);
662	if (!of_id) {
663		dev_err(&pdev->dev, "failed to find matching device tree id\n");
664		return -EINVAL;
665	}
666
667	mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
668	if (IS_ERR(mem)) {
669		dev_err(&pdev->dev, "ioremap error\n");
670		return PTR_ERR(mem);
671	}
672
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
673	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
674	if (ret) {
675		dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
676		return ret;
677	}
678
679	ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
680			       ATH11K_BUS_AHB,
681			       &ath11k_ahb_bus_params);
682	if (!ab) {
683		dev_err(&pdev->dev, "failed to allocate ath11k base\n");
684		return -ENOMEM;
685	}
686
687	ab->hif.ops = &ath11k_ahb_hif_ops;
688	ab->pdev = pdev;
689	ab->hw_rev = (enum ath11k_hw_rev)of_id->data;
690	ab->mem = mem;
691	ab->mem_len = resource_size(mem_res);
692	platform_set_drvdata(pdev, ab);
693
 
 
 
 
 
 
694	ret = ath11k_core_pre_init(ab);
695	if (ret)
696		goto err_core_free;
697
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
698	ret = ath11k_hal_srng_init(ab);
699	if (ret)
700		goto err_core_free;
701
702	ret = ath11k_ce_alloc_pipes(ab);
703	if (ret) {
704		ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
705		goto err_hal_srng_deinit;
706	}
707
708	ath11k_ahb_init_qmi_ce_config(ab);
709
710	ret = ath11k_core_get_rproc(ab);
711	if (ret) {
712		ath11k_err(ab, "failed to get rproc: %d\n", ret);
713		goto err_ce_free;
714	}
715
716	ret = ath11k_core_init(ab);
717	if (ret) {
718		ath11k_err(ab, "failed to init core: %d\n", ret);
719		goto err_ce_free;
720	}
721
722	ret = ath11k_ahb_config_irq(ab);
723	if (ret) {
724		ath11k_err(ab, "failed to configure irq: %d\n", ret);
725		goto err_ce_free;
726	}
727
728	ath11k_ahb_fwreset_from_cold_boot(ab);
729
730	return 0;
731
732err_ce_free:
733	ath11k_ce_free_pipes(ab);
734
735err_hal_srng_deinit:
736	ath11k_hal_srng_deinit(ab);
737
 
 
 
 
 
 
738err_core_free:
739	ath11k_core_free(ab);
740	platform_set_drvdata(pdev, NULL);
741
742	return ret;
743}
744
745static int ath11k_ahb_remove(struct platform_device *pdev)
746{
747	struct ath11k_base *ab = platform_get_drvdata(pdev);
748	unsigned long left;
749
750	if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
751		ath11k_ahb_power_down(ab);
752		ath11k_debugfs_soc_destroy(ab);
753		ath11k_qmi_deinit_service(ab);
754		goto qmi_fail;
755	}
756
757	reinit_completion(&ab->driver_recovery);
758
759	if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
760		left = wait_for_completion_timeout(&ab->driver_recovery,
761						   ATH11K_AHB_RECOVERY_TIMEOUT);
762		if (!left)
763			ath11k_warn(ab, "failed to receive recovery response completion\n");
764	}
765
766	set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
767	cancel_work_sync(&ab->restart_work);
 
 
 
 
 
 
768
769	ath11k_core_deinit(ab);
770qmi_fail:
771	ath11k_ahb_free_irq(ab);
772	ath11k_hal_srng_deinit(ab);
 
 
773	ath11k_ce_free_pipes(ab);
 
 
 
 
774	ath11k_core_free(ab);
775	platform_set_drvdata(pdev, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
776
777	return 0;
 
 
 
 
 
 
778}
779
780static struct platform_driver ath11k_ahb_driver = {
781	.driver         = {
782		.name   = "ath11k",
783		.of_match_table = ath11k_ahb_of_match,
784	},
785	.probe  = ath11k_ahb_probe,
786	.remove = ath11k_ahb_remove,
 
787};
788
789static int ath11k_ahb_init(void)
790{
791	return platform_driver_register(&ath11k_ahb_driver);
792}
793module_init(ath11k_ahb_init);
794
795static void ath11k_ahb_exit(void)
796{
797	platform_driver_unregister(&ath11k_ahb_driver);
798}
799module_exit(ath11k_ahb_exit);
800
801MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices");
802MODULE_LICENSE("Dual BSD/GPL");