Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1// SPDX-License-Identifier: BSD-3-Clause-Clear
   2/*
   3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
   4 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/platform_device.h>
   9#include <linux/property.h>
  10#include <linux/of_device.h>
  11#include <linux/of.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/of_address.h>
  14#include <linux/iommu.h>
  15#include "ahb.h"
  16#include "debug.h"
  17#include "hif.h"
  18#include "qmi.h"
  19#include <linux/remoteproc.h>
  20#include "pcic.h"
  21#include <linux/soc/qcom/smem.h>
  22#include <linux/soc/qcom/smem_state.h>
  23
  24static const struct of_device_id ath11k_ahb_of_match[] = {
  25	/* TODO: Should we change the compatible string to something similar
  26	 * to one that ath10k uses?
  27	 */
  28	{ .compatible = "qcom,ipq8074-wifi",
  29	  .data = (void *)ATH11K_HW_IPQ8074,
  30	},
  31	{ .compatible = "qcom,ipq6018-wifi",
  32	  .data = (void *)ATH11K_HW_IPQ6018_HW10,
  33	},
  34	{ .compatible = "qcom,wcn6750-wifi",
  35	  .data = (void *)ATH11K_HW_WCN6750_HW10,
  36	},
  37	{ .compatible = "qcom,ipq5018-wifi",
  38	  .data = (void *)ATH11K_HW_IPQ5018_HW10,
  39	},
  40	{ }
  41};
  42
  43MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
  44
  45#define ATH11K_IRQ_CE0_OFFSET 4
  46
  47static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
  48	"misc-pulse1",
  49	"misc-latch",
  50	"sw-exception",
  51	"watchdog",
  52	"ce0",
  53	"ce1",
  54	"ce2",
  55	"ce3",
  56	"ce4",
  57	"ce5",
  58	"ce6",
  59	"ce7",
  60	"ce8",
  61	"ce9",
  62	"ce10",
  63	"ce11",
  64	"host2wbm-desc-feed",
  65	"host2reo-re-injection",
  66	"host2reo-command",
  67	"host2rxdma-monitor-ring3",
  68	"host2rxdma-monitor-ring2",
  69	"host2rxdma-monitor-ring1",
  70	"reo2ost-exception",
  71	"wbm2host-rx-release",
  72	"reo2host-status",
  73	"reo2host-destination-ring4",
  74	"reo2host-destination-ring3",
  75	"reo2host-destination-ring2",
  76	"reo2host-destination-ring1",
  77	"rxdma2host-monitor-destination-mac3",
  78	"rxdma2host-monitor-destination-mac2",
  79	"rxdma2host-monitor-destination-mac1",
  80	"ppdu-end-interrupts-mac3",
  81	"ppdu-end-interrupts-mac2",
  82	"ppdu-end-interrupts-mac1",
  83	"rxdma2host-monitor-status-ring-mac3",
  84	"rxdma2host-monitor-status-ring-mac2",
  85	"rxdma2host-monitor-status-ring-mac1",
  86	"host2rxdma-host-buf-ring-mac3",
  87	"host2rxdma-host-buf-ring-mac2",
  88	"host2rxdma-host-buf-ring-mac1",
  89	"rxdma2host-destination-ring-mac3",
  90	"rxdma2host-destination-ring-mac2",
  91	"rxdma2host-destination-ring-mac1",
  92	"host2tcl-input-ring4",
  93	"host2tcl-input-ring3",
  94	"host2tcl-input-ring2",
  95	"host2tcl-input-ring1",
  96	"wbm2host-tx-completions-ring3",
  97	"wbm2host-tx-completions-ring2",
  98	"wbm2host-tx-completions-ring1",
  99	"tcl2host-status-ring",
 100};
 101
 102/* enum ext_irq_num - irq numbers that can be used by external modules
 103 * like datapath
 104 */
 105enum ext_irq_num {
 106	host2wbm_desc_feed = 16,
 107	host2reo_re_injection,
 108	host2reo_command,
 109	host2rxdma_monitor_ring3,
 110	host2rxdma_monitor_ring2,
 111	host2rxdma_monitor_ring1,
 112	reo2host_exception,
 113	wbm2host_rx_release,
 114	reo2host_status,
 115	reo2host_destination_ring4,
 116	reo2host_destination_ring3,
 117	reo2host_destination_ring2,
 118	reo2host_destination_ring1,
 119	rxdma2host_monitor_destination_mac3,
 120	rxdma2host_monitor_destination_mac2,
 121	rxdma2host_monitor_destination_mac1,
 122	ppdu_end_interrupts_mac3,
 123	ppdu_end_interrupts_mac2,
 124	ppdu_end_interrupts_mac1,
 125	rxdma2host_monitor_status_ring_mac3,
 126	rxdma2host_monitor_status_ring_mac2,
 127	rxdma2host_monitor_status_ring_mac1,
 128	host2rxdma_host_buf_ring_mac3,
 129	host2rxdma_host_buf_ring_mac2,
 130	host2rxdma_host_buf_ring_mac1,
 131	rxdma2host_destination_ring_mac3,
 132	rxdma2host_destination_ring_mac2,
 133	rxdma2host_destination_ring_mac1,
 134	host2tcl_input_ring4,
 135	host2tcl_input_ring3,
 136	host2tcl_input_ring2,
 137	host2tcl_input_ring1,
 138	wbm2host_tx_completions_ring3,
 139	wbm2host_tx_completions_ring2,
 140	wbm2host_tx_completions_ring1,
 141	tcl2host_status_ring,
 142};
 143
 144static int
 145ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
 146{
 147	return ab->pci.msi.irqs[vector];
 148}
 149
 150static inline u32
 151ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset)
 152{
 153	u32 window_start = 0;
 154
 155	/* If offset lies within DP register range, use 1st window */
 156	if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
 157		window_start = ATH11K_PCI_WINDOW_START;
 158	/* If offset lies within CE register range, use 2nd window */
 159	else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
 160		 ATH11K_PCI_WINDOW_RANGE_MASK)
 161		window_start = 2 * ATH11K_PCI_WINDOW_START;
 162
 163	return window_start;
 164}
 165
 166static void
 167ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value)
 168{
 169	u32 window_start;
 170
 171	/* WCN6750 uses static window based register access*/
 172	window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
 173
 174	iowrite32(value, ab->mem + window_start +
 175		  (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
 176}
 177
 178static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset)
 179{
 180	u32 window_start;
 181	u32 val;
 182
 183	/* WCN6750 uses static window based register access */
 184	window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
 185
 186	val = ioread32(ab->mem + window_start +
 187		       (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
 188	return val;
 189}
 190
 191static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
 192	.wakeup = NULL,
 193	.release = NULL,
 194	.get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
 195	.window_write32 = ath11k_ahb_window_write32_wcn6750,
 196	.window_read32 = ath11k_ahb_window_read32_wcn6750,
 197};
 198
 199static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
 200{
 201	return ioread32(ab->mem + offset);
 202}
 203
 204static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
 205{
 206	iowrite32(value, ab->mem + offset);
 207}
 208
 209static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
 210{
 211	int i;
 212
 213	for (i = 0; i < ab->hw_params.ce_count; i++) {
 214		struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
 215
 216		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
 217			continue;
 218
 219		tasklet_kill(&ce_pipe->intr_tq);
 220	}
 221}
 222
 223static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
 224{
 225	int i;
 226
 227	for (i = 0; i < irq_grp->num_irq; i++)
 228		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
 229}
 230
 231static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
 232{
 233	int i;
 234
 235	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
 236		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
 237
 238		ath11k_ahb_ext_grp_disable(irq_grp);
 239
 240		if (irq_grp->napi_enabled) {
 241			napi_synchronize(&irq_grp->napi);
 242			napi_disable(&irq_grp->napi);
 243			irq_grp->napi_enabled = false;
 244		}
 245	}
 246}
 247
 248static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
 249{
 250	int i;
 251
 252	for (i = 0; i < irq_grp->num_irq; i++)
 253		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
 254}
 255
 256static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
 257{
 258	u32 val;
 259
 260	val = ath11k_ahb_read32(ab, offset);
 261	ath11k_ahb_write32(ab, offset, val | BIT(bit));
 262}
 263
 264static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
 265{
 266	u32 val;
 267
 268	val = ath11k_ahb_read32(ab, offset);
 269	ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
 270}
 271
 272static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
 273{
 274	const struct ce_attr *ce_attr;
 275	const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
 276	u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
 277
 278	ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
 279	ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
 280	ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
 281
 282	ce_attr = &ab->hw_params.host_ce_config[ce_id];
 283	if (ce_attr->src_nentries)
 284		ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
 285
 286	if (ce_attr->dest_nentries) {
 287		ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
 288		ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
 289				    ie3_reg_addr);
 290	}
 291}
 292
 293static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
 294{
 295	const struct ce_attr *ce_attr;
 296	const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
 297	u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
 298
 299	ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
 300	ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
 301	ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
 302
 303	ce_attr = &ab->hw_params.host_ce_config[ce_id];
 304	if (ce_attr->src_nentries)
 305		ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
 306
 307	if (ce_attr->dest_nentries) {
 308		ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
 309		ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
 310				      ie3_reg_addr);
 311	}
 312}
 313
 314static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
 315{
 316	int i;
 317	int irq_idx;
 318
 319	for (i = 0; i < ab->hw_params.ce_count; i++) {
 320		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
 321			continue;
 322
 323		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
 324		synchronize_irq(ab->irq_num[irq_idx]);
 325	}
 326}
 327
 328static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
 329{
 330	int i, j;
 331	int irq_idx;
 332
 333	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
 334		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
 335
 336		for (j = 0; j < irq_grp->num_irq; j++) {
 337			irq_idx = irq_grp->irqs[j];
 338			synchronize_irq(ab->irq_num[irq_idx]);
 339		}
 340	}
 341}
 342
 343static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
 344{
 345	int i;
 346
 347	for (i = 0; i < ab->hw_params.ce_count; i++) {
 348		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
 349			continue;
 350		ath11k_ahb_ce_irq_enable(ab, i);
 351	}
 352}
 353
 354static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
 355{
 356	int i;
 357
 358	for (i = 0; i < ab->hw_params.ce_count; i++) {
 359		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
 360			continue;
 361		ath11k_ahb_ce_irq_disable(ab, i);
 362	}
 363}
 364
 365static int ath11k_ahb_start(struct ath11k_base *ab)
 366{
 367	ath11k_ahb_ce_irqs_enable(ab);
 368	ath11k_ce_rx_post_buf(ab);
 369
 370	return 0;
 371}
 372
 373static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
 374{
 375	int i;
 376
 377	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
 378		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
 379
 380		if (!irq_grp->napi_enabled) {
 381			napi_enable(&irq_grp->napi);
 382			irq_grp->napi_enabled = true;
 383		}
 384		ath11k_ahb_ext_grp_enable(irq_grp);
 385	}
 386}
 387
 388static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
 389{
 390	__ath11k_ahb_ext_irq_disable(ab);
 391	ath11k_ahb_sync_ext_irqs(ab);
 392}
 393
 394static void ath11k_ahb_stop(struct ath11k_base *ab)
 395{
 396	if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
 397		ath11k_ahb_ce_irqs_disable(ab);
 398	ath11k_ahb_sync_ce_irqs(ab);
 399	ath11k_ahb_kill_tasklets(ab);
 400	del_timer_sync(&ab->rx_replenish_retry);
 401	ath11k_ce_cleanup_pipes(ab);
 402}
 403
 404static int ath11k_ahb_power_up(struct ath11k_base *ab)
 405{
 406	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 407	int ret;
 408
 409	ret = rproc_boot(ab_ahb->tgt_rproc);
 410	if (ret)
 411		ath11k_err(ab, "failed to boot the remote processor Q6\n");
 412
 413	return ret;
 414}
 415
 416static void ath11k_ahb_power_down(struct ath11k_base *ab)
 417{
 418	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 419
 420	rproc_shutdown(ab_ahb->tgt_rproc);
 421}
 422
 423static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
 424{
 425	struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
 426
 427	cfg->tgt_ce_len = ab->hw_params.target_ce_count;
 428	cfg->tgt_ce = ab->hw_params.target_ce_config;
 429	cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
 430	cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
 431	ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
 432}
 433
 434static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
 435{
 436	int i, j;
 437
 438	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
 439		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
 440
 441		for (j = 0; j < irq_grp->num_irq; j++)
 442			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
 443
 444		netif_napi_del(&irq_grp->napi);
 445	}
 446}
 447
 448static void ath11k_ahb_free_irq(struct ath11k_base *ab)
 449{
 450	int irq_idx;
 451	int i;
 452
 453	if (ab->hw_params.hybrid_bus_type)
 454		return ath11k_pcic_free_irq(ab);
 455
 456	for (i = 0; i < ab->hw_params.ce_count; i++) {
 457		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
 458			continue;
 459		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
 460		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
 461	}
 462
 463	ath11k_ahb_free_ext_irq(ab);
 464}
 465
 466static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t)
 467{
 468	struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
 469
 470	ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
 471
 472	ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
 473}
 474
 475static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
 476{
 477	struct ath11k_ce_pipe *ce_pipe = arg;
 478
 479	/* last interrupt received for this CE */
 480	ce_pipe->timestamp = jiffies;
 481
 482	ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
 483
 484	tasklet_schedule(&ce_pipe->intr_tq);
 485
 486	return IRQ_HANDLED;
 487}
 488
 489static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
 490{
 491	struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
 492						struct ath11k_ext_irq_grp,
 493						napi);
 494	struct ath11k_base *ab = irq_grp->ab;
 495	int work_done;
 496
 497	work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
 498	if (work_done < budget) {
 499		napi_complete_done(napi, work_done);
 500		ath11k_ahb_ext_grp_enable(irq_grp);
 501	}
 502
 503	if (work_done > budget)
 504		work_done = budget;
 505
 506	return work_done;
 507}
 508
 509static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
 510{
 511	struct ath11k_ext_irq_grp *irq_grp = arg;
 512
 513	/* last interrupt received for this group */
 514	irq_grp->timestamp = jiffies;
 515
 516	ath11k_ahb_ext_grp_disable(irq_grp);
 517
 518	napi_schedule(&irq_grp->napi);
 519
 520	return IRQ_HANDLED;
 521}
 522
 523static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
 524{
 525	struct ath11k_hw_params *hw = &ab->hw_params;
 526	int i, j;
 527	int irq;
 528	int ret;
 529
 530	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
 531		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
 532		u32 num_irq = 0;
 533
 534		irq_grp->ab = ab;
 535		irq_grp->grp_id = i;
 536		init_dummy_netdev(&irq_grp->napi_ndev);
 537		netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
 538			       ath11k_ahb_ext_grp_napi_poll);
 539
 540		for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
 541			if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
 542				irq_grp->irqs[num_irq++] =
 543					wbm2host_tx_completions_ring1 - j;
 544			}
 545
 546			if (ab->hw_params.ring_mask->rx[i] & BIT(j)) {
 547				irq_grp->irqs[num_irq++] =
 548					reo2host_destination_ring1 - j;
 549			}
 550
 551			if (ab->hw_params.ring_mask->rx_err[i] & BIT(j))
 552				irq_grp->irqs[num_irq++] = reo2host_exception;
 553
 554			if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j))
 555				irq_grp->irqs[num_irq++] = wbm2host_rx_release;
 556
 557			if (ab->hw_params.ring_mask->reo_status[i] & BIT(j))
 558				irq_grp->irqs[num_irq++] = reo2host_status;
 559
 560			if (j < ab->hw_params.max_radios) {
 561				if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) {
 562					irq_grp->irqs[num_irq++] =
 563						rxdma2host_destination_ring_mac1 -
 564						ath11k_hw_get_mac_from_pdev_id(hw, j);
 565				}
 566
 567				if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) {
 568					irq_grp->irqs[num_irq++] =
 569						host2rxdma_host_buf_ring_mac1 -
 570						ath11k_hw_get_mac_from_pdev_id(hw, j);
 571				}
 572
 573				if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) {
 574					irq_grp->irqs[num_irq++] =
 575						ppdu_end_interrupts_mac1 -
 576						ath11k_hw_get_mac_from_pdev_id(hw, j);
 577					irq_grp->irqs[num_irq++] =
 578						rxdma2host_monitor_status_ring_mac1 -
 579						ath11k_hw_get_mac_from_pdev_id(hw, j);
 580				}
 581			}
 582		}
 583		irq_grp->num_irq = num_irq;
 584
 585		for (j = 0; j < irq_grp->num_irq; j++) {
 586			int irq_idx = irq_grp->irqs[j];
 587
 588			irq = platform_get_irq_byname(ab->pdev,
 589						      irq_name[irq_idx]);
 590			ab->irq_num[irq_idx] = irq;
 591			irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
 592			ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
 593					  IRQF_TRIGGER_RISING,
 594					  irq_name[irq_idx], irq_grp);
 595			if (ret) {
 596				ath11k_err(ab, "failed request_irq for %d\n",
 597					   irq);
 598			}
 599		}
 600	}
 601
 602	return 0;
 603}
 604
 605static int ath11k_ahb_config_irq(struct ath11k_base *ab)
 606{
 607	int irq, irq_idx, i;
 608	int ret;
 609
 610	if (ab->hw_params.hybrid_bus_type)
 611		return ath11k_pcic_config_irq(ab);
 612
 613	/* Configure CE irqs */
 614	for (i = 0; i < ab->hw_params.ce_count; i++) {
 615		struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
 616
 617		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
 618			continue;
 619
 620		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
 621
 622		tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet);
 623		irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
 624		ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
 625				  IRQF_TRIGGER_RISING, irq_name[irq_idx],
 626				  ce_pipe);
 627		if (ret)
 628			return ret;
 629
 630		ab->irq_num[irq_idx] = irq;
 631	}
 632
 633	/* Configure external interrupts */
 634	ret = ath11k_ahb_config_ext_irq(ab);
 635
 636	return ret;
 637}
 638
 639static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
 640					  u8 *ul_pipe, u8 *dl_pipe)
 641{
 642	const struct service_to_pipe *entry;
 643	bool ul_set = false, dl_set = false;
 644	int i;
 645
 646	for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
 647		entry = &ab->hw_params.svc_to_ce_map[i];
 648
 649		if (__le32_to_cpu(entry->service_id) != service_id)
 650			continue;
 651
 652		switch (__le32_to_cpu(entry->pipedir)) {
 653		case PIPEDIR_NONE:
 654			break;
 655		case PIPEDIR_IN:
 656			WARN_ON(dl_set);
 657			*dl_pipe = __le32_to_cpu(entry->pipenum);
 658			dl_set = true;
 659			break;
 660		case PIPEDIR_OUT:
 661			WARN_ON(ul_set);
 662			*ul_pipe = __le32_to_cpu(entry->pipenum);
 663			ul_set = true;
 664			break;
 665		case PIPEDIR_INOUT:
 666			WARN_ON(dl_set);
 667			WARN_ON(ul_set);
 668			*dl_pipe = __le32_to_cpu(entry->pipenum);
 669			*ul_pipe = __le32_to_cpu(entry->pipenum);
 670			dl_set = true;
 671			ul_set = true;
 672			break;
 673		}
 674	}
 675
 676	if (WARN_ON(!ul_set || !dl_set))
 677		return -ENOENT;
 678
 679	return 0;
 680}
 681
 682static int ath11k_ahb_hif_suspend(struct ath11k_base *ab)
 683{
 684	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 685	u32 wake_irq;
 686	u32 value = 0;
 687	int ret;
 688
 689	if (!device_may_wakeup(ab->dev))
 690		return -EPERM;
 691
 692	wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
 693
 694	ret = enable_irq_wake(wake_irq);
 695	if (ret) {
 696		ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret);
 697		return ret;
 698	}
 699
 700	value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
 701				ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
 702	value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER,
 703				 ATH11K_AHB_SMP2P_SMEM_MSG);
 704
 705	ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
 706					  ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
 707	if (ret) {
 708		ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
 709		return ret;
 710	}
 711
 712	ath11k_dbg(ab, ATH11K_DBG_AHB, "device suspended\n");
 713
 714	return ret;
 715}
 716
 717static int ath11k_ahb_hif_resume(struct ath11k_base *ab)
 718{
 719	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 720	u32 wake_irq;
 721	u32 value = 0;
 722	int ret;
 723
 724	if (!device_may_wakeup(ab->dev))
 725		return -EPERM;
 726
 727	wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
 728
 729	ret = disable_irq_wake(wake_irq);
 730	if (ret) {
 731		ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret);
 732		return ret;
 733	}
 734
 735	reinit_completion(&ab->wow.wakeup_completed);
 736
 737	value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
 738				ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
 739	value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT,
 740				 ATH11K_AHB_SMP2P_SMEM_MSG);
 741
 742	ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
 743					  ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
 744	if (ret) {
 745		ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
 746		return ret;
 747	}
 748
 749	ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
 750	if (ret == 0) {
 751		ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
 752		return -ETIMEDOUT;
 753	}
 754
 755	ath11k_dbg(ab, ATH11K_DBG_AHB, "device resumed\n");
 756
 757	return 0;
 758}
 759
 760static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
 761	.start = ath11k_ahb_start,
 762	.stop = ath11k_ahb_stop,
 763	.read32 = ath11k_ahb_read32,
 764	.write32 = ath11k_ahb_write32,
 765	.read = NULL,
 766	.irq_enable = ath11k_ahb_ext_irq_enable,
 767	.irq_disable = ath11k_ahb_ext_irq_disable,
 768	.map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
 769	.power_down = ath11k_ahb_power_down,
 770	.power_up = ath11k_ahb_power_up,
 771};
 772
 773static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
 774	.start = ath11k_pcic_start,
 775	.stop = ath11k_pcic_stop,
 776	.read32 = ath11k_pcic_read32,
 777	.write32 = ath11k_pcic_write32,
 778	.read = NULL,
 779	.irq_enable = ath11k_pcic_ext_irq_enable,
 780	.irq_disable = ath11k_pcic_ext_irq_disable,
 781	.get_msi_address =  ath11k_pcic_get_msi_address,
 782	.get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
 783	.map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
 784	.power_down = ath11k_ahb_power_down,
 785	.power_up = ath11k_ahb_power_up,
 786	.suspend = ath11k_ahb_hif_suspend,
 787	.resume = ath11k_ahb_hif_resume,
 788	.ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq,
 789	.ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq,
 790};
 791
 792static int ath11k_core_get_rproc(struct ath11k_base *ab)
 793{
 794	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 795	struct device *dev = ab->dev;
 796	struct rproc *prproc;
 797	phandle rproc_phandle;
 798
 799	if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
 800		ath11k_err(ab, "failed to get q6_rproc handle\n");
 801		return -ENOENT;
 802	}
 803
 804	prproc = rproc_get_by_phandle(rproc_phandle);
 805	if (!prproc) {
 806		ath11k_dbg(ab, ATH11K_DBG_AHB, "failed to get rproc, deferring\n");
 807		return -EPROBE_DEFER;
 808	}
 809	ab_ahb->tgt_rproc = prproc;
 810
 811	return 0;
 812}
 813
 814static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
 815{
 816	struct platform_device *pdev = ab->pdev;
 817	phys_addr_t msi_addr_pa;
 818	dma_addr_t msi_addr_iova;
 819	struct resource *res;
 820	int int_prop;
 821	int ret;
 822	int i;
 823
 824	ret = ath11k_pcic_init_msi_config(ab);
 825	if (ret) {
 826		ath11k_err(ab, "failed to init msi config: %d\n", ret);
 827		return ret;
 828	}
 829
 830	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 831	if (!res) {
 832		ath11k_err(ab, "failed to fetch msi_addr\n");
 833		return -ENOENT;
 834	}
 835
 836	msi_addr_pa = res->start;
 837	msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
 838					 DMA_FROM_DEVICE, 0);
 839	if (dma_mapping_error(ab->dev, msi_addr_iova))
 840		return -ENOMEM;
 841
 842	ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova);
 843	ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova);
 844
 845	ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop);
 846	if (ret)
 847		return ret;
 848
 849	ab->pci.msi.ep_base_data = int_prop + 32;
 850
 851	for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
 852		ret = platform_get_irq(pdev, i);
 853		if (ret < 0)
 854			return ret;
 855
 856		ab->pci.msi.irqs[i] = ret;
 857	}
 858
 859	set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
 860
 861	return 0;
 862}
 863
 864static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab)
 865{
 866	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 867
 868	if (!ab->hw_params.smp2p_wow_exit)
 869		return 0;
 870
 871	ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out",
 872							    &ab_ahb->smp2p_info.smem_bit);
 873	if (IS_ERR(ab_ahb->smp2p_info.smem_state)) {
 874		ath11k_err(ab, "failed to fetch smem state: %ld\n",
 875			   PTR_ERR(ab_ahb->smp2p_info.smem_state));
 876		return PTR_ERR(ab_ahb->smp2p_info.smem_state);
 877	}
 878
 879	return 0;
 880}
 881
 882static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab)
 883{
 884	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 885
 886	if (!ab->hw_params.smp2p_wow_exit)
 887		return;
 888
 889	qcom_smem_state_put(ab_ahb->smp2p_info.smem_state);
 890}
 891
 892static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
 893{
 894	struct platform_device *pdev = ab->pdev;
 895	struct resource *mem_res;
 896	void __iomem *mem;
 897
 898	if (ab->hw_params.hybrid_bus_type)
 899		return ath11k_ahb_setup_msi_resources(ab);
 900
 901	mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
 902	if (IS_ERR(mem)) {
 903		dev_err(&pdev->dev, "ioremap error\n");
 904		return PTR_ERR(mem);
 905	}
 906
 907	ab->mem = mem;
 908	ab->mem_len = resource_size(mem_res);
 909
 910	return 0;
 911}
 912
 913static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
 914{
 915	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 916	struct device *dev = ab->dev;
 917	struct device_node *node;
 918	struct resource r;
 919	int ret;
 920
 921	node = of_parse_phandle(dev->of_node, "memory-region", 0);
 922	if (!node)
 923		return -ENOENT;
 924
 925	ret = of_address_to_resource(node, 0, &r);
 926	of_node_put(node);
 927	if (ret) {
 928		dev_err(dev, "failed to resolve msa fixed region\n");
 929		return ret;
 930	}
 931
 932	ab_ahb->fw.msa_paddr = r.start;
 933	ab_ahb->fw.msa_size = resource_size(&r);
 934
 935	node = of_parse_phandle(dev->of_node, "memory-region", 1);
 936	if (!node)
 937		return -ENOENT;
 938
 939	ret = of_address_to_resource(node, 0, &r);
 940	of_node_put(node);
 941	if (ret) {
 942		dev_err(dev, "failed to resolve ce fixed region\n");
 943		return ret;
 944	}
 945
 946	ab_ahb->fw.ce_paddr = r.start;
 947	ab_ahb->fw.ce_size = resource_size(&r);
 948
 949	return 0;
 950}
 951
 952static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
 953{
 954	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
 955	struct device *host_dev = ab->dev;
 956	struct platform_device_info info = {0};
 957	struct iommu_domain *iommu_dom;
 958	struct platform_device *pdev;
 959	struct device_node *node;
 960	int ret;
 961
 962	/* Chipsets not requiring MSA need not initialize
 963	 * MSA resources, return success in such cases.
 964	 */
 965	if (!ab->hw_params.fixed_fw_mem)
 966		return 0;
 967
 968	ret = ath11k_ahb_setup_msa_resources(ab);
 969	if (ret) {
 970		ath11k_err(ab, "failed to setup msa resources\n");
 971		return ret;
 972	}
 973
 974	node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
 975	if (!node) {
 976		ab_ahb->fw.use_tz = true;
 977		return 0;
 978	}
 979
 980	info.fwnode = &node->fwnode;
 981	info.parent = host_dev;
 982	info.name = node->name;
 983	info.dma_mask = DMA_BIT_MASK(32);
 984
 985	pdev = platform_device_register_full(&info);
 986	if (IS_ERR(pdev)) {
 987		of_node_put(node);
 988		return PTR_ERR(pdev);
 989	}
 990
 991	ret = of_dma_configure(&pdev->dev, node, true);
 992	if (ret) {
 993		ath11k_err(ab, "dma configure fail: %d\n", ret);
 994		goto err_unregister;
 995	}
 996
 997	ab_ahb->fw.dev = &pdev->dev;
 998
 999	iommu_dom = iommu_domain_alloc(&platform_bus_type);
1000	if (!iommu_dom) {
1001		ath11k_err(ab, "failed to allocate iommu domain\n");
1002		ret = -ENOMEM;
1003		goto err_unregister;
1004	}
1005
1006	ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev);
1007	if (ret) {
1008		ath11k_err(ab, "could not attach device: %d\n", ret);
1009		goto err_iommu_free;
1010	}
1011
1012	ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
1013			ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
1014			IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1015	if (ret) {
1016		ath11k_err(ab, "failed to map firmware region: %d\n", ret);
1017		goto err_iommu_detach;
1018	}
1019
1020	ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
1021			ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
1022			IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1023	if (ret) {
1024		ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
1025		goto err_iommu_unmap;
1026	}
1027
1028	ab_ahb->fw.use_tz = false;
1029	ab_ahb->fw.iommu_domain = iommu_dom;
1030	of_node_put(node);
1031
1032	return 0;
1033
1034err_iommu_unmap:
1035	iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
1036
1037err_iommu_detach:
1038	iommu_detach_device(iommu_dom, ab_ahb->fw.dev);
1039
1040err_iommu_free:
1041	iommu_domain_free(iommu_dom);
1042
1043err_unregister:
1044	platform_device_unregister(pdev);
1045	of_node_put(node);
1046
1047	return ret;
1048}
1049
1050static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
1051{
1052	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
1053	struct iommu_domain *iommu;
1054	size_t unmapped_size;
1055
1056	/* Chipsets not requiring MSA would have not initialized
1057	 * MSA resources, return success in such cases.
1058	 */
1059	if (!ab->hw_params.fixed_fw_mem)
1060		return 0;
1061
1062	if (ab_ahb->fw.use_tz)
1063		return 0;
1064
1065	iommu = ab_ahb->fw.iommu_domain;
1066
1067	unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
1068	if (unmapped_size != ab_ahb->fw.msa_size)
1069		ath11k_err(ab, "failed to unmap firmware: %zu\n",
1070			   unmapped_size);
1071
1072	unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size);
1073	if (unmapped_size != ab_ahb->fw.ce_size)
1074		ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n",
1075			   unmapped_size);
1076
1077	iommu_detach_device(iommu, ab_ahb->fw.dev);
1078	iommu_domain_free(iommu);
1079
1080	platform_device_unregister(to_platform_device(ab_ahb->fw.dev));
1081
1082	return 0;
1083}
1084
1085static int ath11k_ahb_probe(struct platform_device *pdev)
1086{
1087	struct ath11k_base *ab;
1088	const struct ath11k_hif_ops *hif_ops;
1089	const struct ath11k_pci_ops *pci_ops;
1090	enum ath11k_hw_rev hw_rev;
1091	int ret;
1092
1093	hw_rev = (uintptr_t)device_get_match_data(&pdev->dev);
1094
1095	switch (hw_rev) {
1096	case ATH11K_HW_IPQ8074:
1097	case ATH11K_HW_IPQ6018_HW10:
1098	case ATH11K_HW_IPQ5018_HW10:
1099		hif_ops = &ath11k_ahb_hif_ops_ipq8074;
1100		pci_ops = NULL;
1101		break;
1102	case ATH11K_HW_WCN6750_HW10:
1103		hif_ops = &ath11k_ahb_hif_ops_wcn6750;
1104		pci_ops = &ath11k_ahb_pci_ops_wcn6750;
1105		break;
1106	default:
1107		dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev);
1108		return -EOPNOTSUPP;
1109	}
1110
1111	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1112	if (ret) {
1113		dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
1114		return ret;
1115	}
1116
1117	ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
1118			       ATH11K_BUS_AHB);
1119	if (!ab) {
1120		dev_err(&pdev->dev, "failed to allocate ath11k base\n");
1121		return -ENOMEM;
1122	}
1123
1124	ab->hif.ops = hif_ops;
1125	ab->pdev = pdev;
1126	ab->hw_rev = hw_rev;
1127	ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
1128	platform_set_drvdata(pdev, ab);
1129
1130	ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
1131	if (ret) {
1132		ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
1133		goto err_core_free;
1134	}
1135
1136	ret = ath11k_core_pre_init(ab);
1137	if (ret)
1138		goto err_core_free;
1139
1140	ret = ath11k_ahb_setup_resources(ab);
1141	if (ret)
1142		goto err_core_free;
1143
1144	ab->mem_ce = ab->mem;
1145
1146	if (ab->hw_params.ce_remap) {
1147		const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
1148		/* ce register space is moved out of wcss unlike ipq8074 or ipq6018
1149		 * and the space is not contiguous, hence remapping the CE registers
1150		 * to a new space for accessing them.
1151		 */
1152		ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
1153		if (!ab->mem_ce) {
1154			dev_err(&pdev->dev, "ce ioremap error\n");
1155			ret = -ENOMEM;
1156			goto err_core_free;
1157		}
1158	}
1159
1160	ret = ath11k_ahb_fw_resources_init(ab);
1161	if (ret)
1162		goto err_core_free;
1163
1164	ret = ath11k_ahb_setup_smp2p_handle(ab);
1165	if (ret)
1166		goto err_fw_deinit;
1167
1168	ret = ath11k_hal_srng_init(ab);
1169	if (ret)
1170		goto err_release_smp2p_handle;
1171
1172	ret = ath11k_ce_alloc_pipes(ab);
1173	if (ret) {
1174		ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1175		goto err_hal_srng_deinit;
1176	}
1177
1178	ath11k_ahb_init_qmi_ce_config(ab);
1179
1180	ret = ath11k_core_get_rproc(ab);
1181	if (ret) {
1182		ath11k_err(ab, "failed to get rproc: %d\n", ret);
1183		goto err_ce_free;
1184	}
1185
1186	ret = ath11k_core_init(ab);
1187	if (ret) {
1188		ath11k_err(ab, "failed to init core: %d\n", ret);
1189		goto err_ce_free;
1190	}
1191
1192	ret = ath11k_ahb_config_irq(ab);
1193	if (ret) {
1194		ath11k_err(ab, "failed to configure irq: %d\n", ret);
1195		goto err_ce_free;
1196	}
1197
1198	ath11k_qmi_fwreset_from_cold_boot(ab);
1199
1200	return 0;
1201
1202err_ce_free:
1203	ath11k_ce_free_pipes(ab);
1204
1205err_hal_srng_deinit:
1206	ath11k_hal_srng_deinit(ab);
1207
1208err_release_smp2p_handle:
1209	ath11k_ahb_release_smp2p_handle(ab);
1210
1211err_fw_deinit:
1212	ath11k_ahb_fw_resource_deinit(ab);
1213
1214err_core_free:
1215	ath11k_core_free(ab);
1216	platform_set_drvdata(pdev, NULL);
1217
1218	return ret;
1219}
1220
1221static void ath11k_ahb_remove_prepare(struct ath11k_base *ab)
1222{
1223	unsigned long left;
1224
1225	if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
1226		left = wait_for_completion_timeout(&ab->driver_recovery,
1227						   ATH11K_AHB_RECOVERY_TIMEOUT);
1228		if (!left)
1229			ath11k_warn(ab, "failed to receive recovery response completion\n");
1230	}
1231
1232	set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
1233	cancel_work_sync(&ab->restart_work);
1234	cancel_work_sync(&ab->qmi.event_work);
1235}
1236
1237static void ath11k_ahb_free_resources(struct ath11k_base *ab)
1238{
1239	struct platform_device *pdev = ab->pdev;
1240
1241	ath11k_ahb_free_irq(ab);
1242	ath11k_hal_srng_deinit(ab);
1243	ath11k_ahb_release_smp2p_handle(ab);
1244	ath11k_ahb_fw_resource_deinit(ab);
1245	ath11k_ce_free_pipes(ab);
1246
1247	if (ab->hw_params.ce_remap)
1248		iounmap(ab->mem_ce);
1249
1250	ath11k_core_free(ab);
1251	platform_set_drvdata(pdev, NULL);
1252}
1253
1254static void ath11k_ahb_remove(struct platform_device *pdev)
1255{
1256	struct ath11k_base *ab = platform_get_drvdata(pdev);
1257
1258	if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1259		ath11k_ahb_power_down(ab);
1260		ath11k_debugfs_soc_destroy(ab);
1261		ath11k_qmi_deinit_service(ab);
1262		goto qmi_fail;
1263	}
1264
1265	ath11k_ahb_remove_prepare(ab);
1266	ath11k_core_deinit(ab);
1267
1268qmi_fail:
1269	ath11k_ahb_free_resources(ab);
1270}
1271
1272static void ath11k_ahb_shutdown(struct platform_device *pdev)
1273{
1274	struct ath11k_base *ab = platform_get_drvdata(pdev);
1275
1276	/* platform shutdown() & remove() are mutually exclusive.
1277	 * remove() is invoked during rmmod & shutdown() during
1278	 * system reboot/shutdown.
1279	 */
1280	ath11k_ahb_remove_prepare(ab);
1281
1282	if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)))
1283		goto free_resources;
1284
1285	ath11k_core_deinit(ab);
1286
1287free_resources:
1288	ath11k_ahb_free_resources(ab);
1289}
1290
1291static struct platform_driver ath11k_ahb_driver = {
1292	.driver         = {
1293		.name   = "ath11k",
1294		.of_match_table = ath11k_ahb_of_match,
1295	},
1296	.probe  = ath11k_ahb_probe,
1297	.remove_new = ath11k_ahb_remove,
1298	.shutdown = ath11k_ahb_shutdown,
1299};
1300
1301module_platform_driver(ath11k_ahb_driver);
1302
1303MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices");
1304MODULE_LICENSE("Dual BSD/GPL");