Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v6.8
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (c) 2005-2011 Atheros Communications Inc.
   4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
   5 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
   6 */
   7
   8#include <linux/pci.h>
   9#include <linux/module.h>
  10#include <linux/interrupt.h>
  11#include <linux/spinlock.h>
  12#include <linux/bitops.h>
  13
  14#include "core.h"
  15#include "debug.h"
  16#include "coredump.h"
  17
  18#include "targaddrs.h"
  19#include "bmi.h"
  20
  21#include "hif.h"
  22#include "htc.h"
  23
  24#include "ce.h"
  25#include "pci.h"
  26
  27enum ath10k_pci_reset_mode {
  28	ATH10K_PCI_RESET_AUTO = 0,
  29	ATH10K_PCI_RESET_WARM_ONLY = 1,
  30};
  31
  32static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
  33static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
  34
  35module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
  36MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
  37
  38module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
  39MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
  40
  41/* how long wait to wait for target to initialise, in ms */
  42#define ATH10K_PCI_TARGET_WAIT 3000
  43#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
  44
  45/* Maximum number of bytes that can be handled atomically by
  46 * diag read and write.
  47 */
  48#define ATH10K_DIAG_TRANSFER_LIMIT	0x5000
  49
  50#define QCA99X0_PCIE_BAR0_START_REG    0x81030
  51#define QCA99X0_CPU_MEM_ADDR_REG       0x4d00c
  52#define QCA99X0_CPU_MEM_DATA_REG       0x4d010
  53
  54static const struct pci_device_id ath10k_pci_id_table[] = {
  55	/* PCI-E QCA988X V2 (Ubiquiti branded) */
  56	{ PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) },
  57
  58	{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
  59	{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
  60	{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
  61	{ PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
  62	{ PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
  63	{ PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
  64	{ PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
  65	{ PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
  66	{0}
  67};
  68
  69static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
  70	/* QCA988X pre 2.0 chips are not supported because they need some nasty
  71	 * hacks. ath10k doesn't have them and these devices crash horribly
  72	 * because of that.
  73	 */
  74	{ QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },
  75	{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
  76
  77	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
  78	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
  79	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
  80	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
  81	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
  82
  83	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
  84	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
  85	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
  86	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
  87	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
  88
  89	{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
  90
  91	{ QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
  92
  93	{ QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
  94
  95	{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
  96	{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
  97
  98	{ QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
  99};
 100
 101static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
 102static int ath10k_pci_cold_reset(struct ath10k *ar);
 103static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
 104static int ath10k_pci_init_irq(struct ath10k *ar);
 105static int ath10k_pci_deinit_irq(struct ath10k *ar);
 106static int ath10k_pci_request_irq(struct ath10k *ar);
 107static void ath10k_pci_free_irq(struct ath10k *ar);
 108static int ath10k_pci_bmi_wait(struct ath10k *ar,
 109			       struct ath10k_ce_pipe *tx_pipe,
 110			       struct ath10k_ce_pipe *rx_pipe,
 111			       struct bmi_xfer *xfer);
 112static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
 113static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
 114static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 115static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
 116static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
 117static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 118static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
 119
 120static const struct ce_attr pci_host_ce_config_wlan[] = {
 121	/* CE0: host->target HTC control and raw streams */
 122	{
 123		.flags = CE_ATTR_FLAGS,
 124		.src_nentries = 16,
 125		.src_sz_max = 256,
 126		.dest_nentries = 0,
 127		.send_cb = ath10k_pci_htc_tx_cb,
 128	},
 129
 130	/* CE1: target->host HTT + HTC control */
 131	{
 132		.flags = CE_ATTR_FLAGS,
 133		.src_nentries = 0,
 134		.src_sz_max = 2048,
 135		.dest_nentries = 512,
 136		.recv_cb = ath10k_pci_htt_htc_rx_cb,
 137	},
 138
 139	/* CE2: target->host WMI */
 140	{
 141		.flags = CE_ATTR_FLAGS,
 142		.src_nentries = 0,
 143		.src_sz_max = 2048,
 144		.dest_nentries = 128,
 145		.recv_cb = ath10k_pci_htc_rx_cb,
 146	},
 147
 148	/* CE3: host->target WMI */
 149	{
 150		.flags = CE_ATTR_FLAGS,
 151		.src_nentries = 32,
 152		.src_sz_max = 2048,
 153		.dest_nentries = 0,
 154		.send_cb = ath10k_pci_htc_tx_cb,
 155	},
 156
 157	/* CE4: host->target HTT */
 158	{
 159		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
 160		.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
 161		.src_sz_max = 256,
 162		.dest_nentries = 0,
 163		.send_cb = ath10k_pci_htt_tx_cb,
 164	},
 165
 166	/* CE5: target->host HTT (HIF->HTT) */
 167	{
 168		.flags = CE_ATTR_FLAGS,
 169		.src_nentries = 0,
 170		.src_sz_max = 512,
 171		.dest_nentries = 512,
 172		.recv_cb = ath10k_pci_htt_rx_cb,
 173	},
 174
 175	/* CE6: target autonomous hif_memcpy */
 176	{
 177		.flags = CE_ATTR_FLAGS,
 178		.src_nentries = 0,
 179		.src_sz_max = 0,
 180		.dest_nentries = 0,
 181	},
 182
 183	/* CE7: ce_diag, the Diagnostic Window */
 184	{
 185		.flags = CE_ATTR_FLAGS | CE_ATTR_POLL,
 186		.src_nentries = 2,
 187		.src_sz_max = DIAG_TRANSFER_LIMIT,
 188		.dest_nentries = 2,
 189	},
 190
 191	/* CE8: target->host pktlog */
 192	{
 193		.flags = CE_ATTR_FLAGS,
 194		.src_nentries = 0,
 195		.src_sz_max = 2048,
 196		.dest_nentries = 128,
 197		.recv_cb = ath10k_pci_pktlog_rx_cb,
 198	},
 199
 200	/* CE9 target autonomous qcache memcpy */
 201	{
 202		.flags = CE_ATTR_FLAGS,
 203		.src_nentries = 0,
 204		.src_sz_max = 0,
 205		.dest_nentries = 0,
 206	},
 207
 208	/* CE10: target autonomous hif memcpy */
 209	{
 210		.flags = CE_ATTR_FLAGS,
 211		.src_nentries = 0,
 212		.src_sz_max = 0,
 213		.dest_nentries = 0,
 214	},
 215
 216	/* CE11: target autonomous hif memcpy */
 217	{
 218		.flags = CE_ATTR_FLAGS,
 219		.src_nentries = 0,
 220		.src_sz_max = 0,
 221		.dest_nentries = 0,
 222	},
 223};
 224
 225/* Target firmware's Copy Engine configuration. */
 226static const struct ce_pipe_config pci_target_ce_config_wlan[] = {
 227	/* CE0: host->target HTC control and raw streams */
 228	{
 229		.pipenum = __cpu_to_le32(0),
 230		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
 231		.nentries = __cpu_to_le32(32),
 232		.nbytes_max = __cpu_to_le32(256),
 233		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
 234		.reserved = __cpu_to_le32(0),
 235	},
 236
 237	/* CE1: target->host HTT + HTC control */
 238	{
 239		.pipenum = __cpu_to_le32(1),
 240		.pipedir = __cpu_to_le32(PIPEDIR_IN),
 241		.nentries = __cpu_to_le32(32),
 242		.nbytes_max = __cpu_to_le32(2048),
 243		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
 244		.reserved = __cpu_to_le32(0),
 245	},
 246
 247	/* CE2: target->host WMI */
 248	{
 249		.pipenum = __cpu_to_le32(2),
 250		.pipedir = __cpu_to_le32(PIPEDIR_IN),
 251		.nentries = __cpu_to_le32(64),
 252		.nbytes_max = __cpu_to_le32(2048),
 253		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
 254		.reserved = __cpu_to_le32(0),
 255	},
 256
 257	/* CE3: host->target WMI */
 258	{
 259		.pipenum = __cpu_to_le32(3),
 260		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
 261		.nentries = __cpu_to_le32(32),
 262		.nbytes_max = __cpu_to_le32(2048),
 263		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
 264		.reserved = __cpu_to_le32(0),
 265	},
 266
 267	/* CE4: host->target HTT */
 268	{
 269		.pipenum = __cpu_to_le32(4),
 270		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
 271		.nentries = __cpu_to_le32(256),
 272		.nbytes_max = __cpu_to_le32(256),
 273		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
 274		.reserved = __cpu_to_le32(0),
 275	},
 276
 277	/* NB: 50% of src nentries, since tx has 2 frags */
 278
 279	/* CE5: target->host HTT (HIF->HTT) */
 280	{
 281		.pipenum = __cpu_to_le32(5),
 282		.pipedir = __cpu_to_le32(PIPEDIR_IN),
 283		.nentries = __cpu_to_le32(32),
 284		.nbytes_max = __cpu_to_le32(512),
 285		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
 286		.reserved = __cpu_to_le32(0),
 287	},
 288
 289	/* CE6: Reserved for target autonomous hif_memcpy */
 290	{
 291		.pipenum = __cpu_to_le32(6),
 292		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 293		.nentries = __cpu_to_le32(32),
 294		.nbytes_max = __cpu_to_le32(4096),
 295		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
 296		.reserved = __cpu_to_le32(0),
 297	},
 298
 299	/* CE7 used only by Host */
 300	{
 301		.pipenum = __cpu_to_le32(7),
 302		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 303		.nentries = __cpu_to_le32(0),
 304		.nbytes_max = __cpu_to_le32(0),
 305		.flags = __cpu_to_le32(0),
 306		.reserved = __cpu_to_le32(0),
 307	},
 308
 309	/* CE8 target->host packtlog */
 310	{
 311		.pipenum = __cpu_to_le32(8),
 312		.pipedir = __cpu_to_le32(PIPEDIR_IN),
 313		.nentries = __cpu_to_le32(64),
 314		.nbytes_max = __cpu_to_le32(2048),
 315		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
 316		.reserved = __cpu_to_le32(0),
 317	},
 318
 319	/* CE9 target autonomous qcache memcpy */
 320	{
 321		.pipenum = __cpu_to_le32(9),
 322		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 323		.nentries = __cpu_to_le32(32),
 324		.nbytes_max = __cpu_to_le32(2048),
 325		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
 326		.reserved = __cpu_to_le32(0),
 327	},
 328
 329	/* It not necessary to send target wlan configuration for CE10 & CE11
 330	 * as these CEs are not actively used in target.
 331	 */
 332};
 333
 334/*
 335 * Map from service/endpoint to Copy Engine.
 336 * This table is derived from the CE_PCI TABLE, above.
 337 * It is passed to the Target at startup for use by firmware.
 338 */
 339static const struct ce_service_to_pipe pci_target_service_to_ce_map_wlan[] = {
 340	{
 341		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 342		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
 343		__cpu_to_le32(3),
 344	},
 345	{
 346		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 347		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
 348		__cpu_to_le32(2),
 349	},
 350	{
 351		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 352		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
 353		__cpu_to_le32(3),
 354	},
 355	{
 356		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 357		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
 358		__cpu_to_le32(2),
 359	},
 360	{
 361		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 362		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
 363		__cpu_to_le32(3),
 364	},
 365	{
 366		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 367		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
 368		__cpu_to_le32(2),
 369	},
 370	{
 371		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 372		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
 373		__cpu_to_le32(3),
 374	},
 375	{
 376		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 377		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
 378		__cpu_to_le32(2),
 379	},
 380	{
 381		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 382		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
 383		__cpu_to_le32(3),
 384	},
 385	{
 386		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 387		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
 388		__cpu_to_le32(2),
 389	},
 390	{
 391		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 392		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
 393		__cpu_to_le32(0),
 394	},
 395	{
 396		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 397		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
 398		__cpu_to_le32(1),
 399	},
 400	{ /* not used */
 401		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 402		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
 403		__cpu_to_le32(0),
 404	},
 405	{ /* not used */
 406		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 407		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
 408		__cpu_to_le32(1),
 409	},
 410	{
 411		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 412		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
 413		__cpu_to_le32(4),
 414	},
 415	{
 416		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 417		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
 418		__cpu_to_le32(5),
 419	},
 420
 421	/* (Additions here) */
 422
 423	{ /* must be last */
 424		__cpu_to_le32(0),
 425		__cpu_to_le32(0),
 426		__cpu_to_le32(0),
 427	},
 428};
 429
 430static bool ath10k_pci_is_awake(struct ath10k *ar)
 431{
 432	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 433	u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 434			   RTC_STATE_ADDRESS);
 435
 436	return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
 437}
 438
 439static void __ath10k_pci_wake(struct ath10k *ar)
 440{
 441	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 442
 443	lockdep_assert_held(&ar_pci->ps_lock);
 444
 445	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
 446		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 447
 448	iowrite32(PCIE_SOC_WAKE_V_MASK,
 449		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 450		  PCIE_SOC_WAKE_ADDRESS);
 451}
 452
 453static void __ath10k_pci_sleep(struct ath10k *ar)
 454{
 455	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 456
 457	lockdep_assert_held(&ar_pci->ps_lock);
 458
 459	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
 460		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 461
 462	iowrite32(PCIE_SOC_WAKE_RESET,
 463		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 464		  PCIE_SOC_WAKE_ADDRESS);
 465	ar_pci->ps_awake = false;
 466}
 467
 468static int ath10k_pci_wake_wait(struct ath10k *ar)
 469{
 470	int tot_delay = 0;
 471	int curr_delay = 5;
 472
 473	while (tot_delay < PCIE_WAKE_TIMEOUT) {
 474		if (ath10k_pci_is_awake(ar)) {
 475			if (tot_delay > PCIE_WAKE_LATE_US)
 476				ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
 477					    tot_delay / 1000);
 478			return 0;
 479		}
 480
 481		udelay(curr_delay);
 482		tot_delay += curr_delay;
 483
 484		if (curr_delay < 50)
 485			curr_delay += 5;
 486	}
 487
 488	return -ETIMEDOUT;
 489}
 490
 491static int ath10k_pci_force_wake(struct ath10k *ar)
 492{
 493	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 494	unsigned long flags;
 495	int ret = 0;
 496
 497	if (ar_pci->pci_ps)
 498		return ret;
 499
 500	spin_lock_irqsave(&ar_pci->ps_lock, flags);
 501
 502	if (!ar_pci->ps_awake) {
 503		iowrite32(PCIE_SOC_WAKE_V_MASK,
 504			  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 505			  PCIE_SOC_WAKE_ADDRESS);
 506
 507		ret = ath10k_pci_wake_wait(ar);
 508		if (ret == 0)
 509			ar_pci->ps_awake = true;
 510	}
 511
 512	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 513
 514	return ret;
 515}
 516
 517static void ath10k_pci_force_sleep(struct ath10k *ar)
 518{
 519	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 520	unsigned long flags;
 521
 522	spin_lock_irqsave(&ar_pci->ps_lock, flags);
 523
 524	iowrite32(PCIE_SOC_WAKE_RESET,
 525		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 526		  PCIE_SOC_WAKE_ADDRESS);
 527	ar_pci->ps_awake = false;
 528
 529	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 530}
 531
 532static int ath10k_pci_wake(struct ath10k *ar)
 533{
 534	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 535	unsigned long flags;
 536	int ret = 0;
 537
 538	if (ar_pci->pci_ps == 0)
 539		return ret;
 540
 541	spin_lock_irqsave(&ar_pci->ps_lock, flags);
 542
 543	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
 544		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 545
 546	/* This function can be called very frequently. To avoid excessive
 547	 * CPU stalls for MMIO reads use a cache var to hold the device state.
 548	 */
 549	if (!ar_pci->ps_awake) {
 550		__ath10k_pci_wake(ar);
 551
 552		ret = ath10k_pci_wake_wait(ar);
 553		if (ret == 0)
 554			ar_pci->ps_awake = true;
 555	}
 556
 557	if (ret == 0) {
 558		ar_pci->ps_wake_refcount++;
 559		WARN_ON(ar_pci->ps_wake_refcount == 0);
 560	}
 561
 562	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 563
 564	return ret;
 565}
 566
 567static void ath10k_pci_sleep(struct ath10k *ar)
 568{
 569	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 570	unsigned long flags;
 571
 572	if (ar_pci->pci_ps == 0)
 573		return;
 574
 575	spin_lock_irqsave(&ar_pci->ps_lock, flags);
 576
 577	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
 578		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 579
 580	if (WARN_ON(ar_pci->ps_wake_refcount == 0))
 581		goto skip;
 582
 583	ar_pci->ps_wake_refcount--;
 584
 585	mod_timer(&ar_pci->ps_timer, jiffies +
 586		  msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
 587
 588skip:
 589	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 590}
 591
 592static void ath10k_pci_ps_timer(struct timer_list *t)
 593{
 594	struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer);
 595	struct ath10k *ar = ar_pci->ar;
 596	unsigned long flags;
 597
 598	spin_lock_irqsave(&ar_pci->ps_lock, flags);
 599
 600	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
 601		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 602
 603	if (ar_pci->ps_wake_refcount > 0)
 604		goto skip;
 605
 606	__ath10k_pci_sleep(ar);
 607
 608skip:
 609	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 610}
 611
 612static void ath10k_pci_sleep_sync(struct ath10k *ar)
 613{
 614	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 615	unsigned long flags;
 616
 617	if (ar_pci->pci_ps == 0) {
 618		ath10k_pci_force_sleep(ar);
 619		return;
 620	}
 621
 622	del_timer_sync(&ar_pci->ps_timer);
 623
 624	spin_lock_irqsave(&ar_pci->ps_lock, flags);
 625	WARN_ON(ar_pci->ps_wake_refcount > 0);
 626	__ath10k_pci_sleep(ar);
 627	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 628}
 629
 630static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
 631{
 632	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 633	int ret;
 634
 635	if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
 636		ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
 637			    offset, offset + sizeof(value), ar_pci->mem_len);
 638		return;
 639	}
 640
 641	ret = ath10k_pci_wake(ar);
 642	if (ret) {
 643		ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
 644			    value, offset, ret);
 645		return;
 646	}
 647
 648	iowrite32(value, ar_pci->mem + offset);
 649	ath10k_pci_sleep(ar);
 650}
 651
 652static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
 653{
 654	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 655	u32 val;
 656	int ret;
 657
 658	if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
 659		ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
 660			    offset, offset + sizeof(val), ar_pci->mem_len);
 661		return 0;
 662	}
 663
 664	ret = ath10k_pci_wake(ar);
 665	if (ret) {
 666		ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
 667			    offset, ret);
 668		return 0xffffffff;
 669	}
 670
 671	val = ioread32(ar_pci->mem + offset);
 672	ath10k_pci_sleep(ar);
 673
 674	return val;
 675}
 676
 677inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
 678{
 679	struct ath10k_ce *ce = ath10k_ce_priv(ar);
 680
 681	ce->bus_ops->write32(ar, offset, value);
 682}
 683
 684inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
 685{
 686	struct ath10k_ce *ce = ath10k_ce_priv(ar);
 687
 688	return ce->bus_ops->read32(ar, offset);
 689}
 690
 691u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
 692{
 693	return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
 694}
 695
 696void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
 697{
 698	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
 699}
 700
 701u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
 702{
 703	return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
 704}
 705
 706void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
 707{
 708	ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
 709}
 710
 711bool ath10k_pci_irq_pending(struct ath10k *ar)
 712{
 713	u32 cause;
 714
 715	/* Check if the shared legacy irq is for us */
 716	cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 717				  PCIE_INTR_CAUSE_ADDRESS);
 718	if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
 719		return true;
 720
 721	return false;
 722}
 723
 724void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
 725{
 726	/* IMPORTANT: INTR_CLR register has to be set after
 727	 * INTR_ENABLE is set to 0, otherwise interrupt can not be
 728	 * really cleared.
 729	 */
 730	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
 731			   0);
 732	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
 733			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 734
 735	/* IMPORTANT: this extra read transaction is required to
 736	 * flush the posted write buffer.
 737	 */
 738	(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 739				PCIE_INTR_ENABLE_ADDRESS);
 740}
 741
 742void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
 743{
 744	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
 745			   PCIE_INTR_ENABLE_ADDRESS,
 746			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 747
 748	/* IMPORTANT: this extra read transaction is required to
 749	 * flush the posted write buffer.
 750	 */
 751	(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 752				PCIE_INTR_ENABLE_ADDRESS);
 753}
 754
 755static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
 756{
 757	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 758
 759	if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
 760		return "msi";
 761
 762	return "legacy";
 763}
 764
 765static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
 766{
 767	struct ath10k *ar = pipe->hif_ce_state;
 768	struct ath10k_ce *ce = ath10k_ce_priv(ar);
 769	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 770	struct sk_buff *skb;
 771	dma_addr_t paddr;
 772	int ret;
 773
 774	skb = dev_alloc_skb(pipe->buf_sz);
 775	if (!skb)
 776		return -ENOMEM;
 777
 778	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
 779
 780	paddr = dma_map_single(ar->dev, skb->data,
 781			       skb->len + skb_tailroom(skb),
 782			       DMA_FROM_DEVICE);
 783	if (unlikely(dma_mapping_error(ar->dev, paddr))) {
 784		ath10k_warn(ar, "failed to dma map pci rx buf\n");
 785		dev_kfree_skb_any(skb);
 786		return -EIO;
 787	}
 788
 789	ATH10K_SKB_RXCB(skb)->paddr = paddr;
 790
 791	spin_lock_bh(&ce->ce_lock);
 792	ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
 793	spin_unlock_bh(&ce->ce_lock);
 794	if (ret) {
 795		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
 796				 DMA_FROM_DEVICE);
 797		dev_kfree_skb_any(skb);
 798		return ret;
 799	}
 800
 801	return 0;
 802}
 803
 804static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
 805{
 806	struct ath10k *ar = pipe->hif_ce_state;
 807	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 808	struct ath10k_ce *ce = ath10k_ce_priv(ar);
 809	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 810	int ret, num;
 811
 812	if (pipe->buf_sz == 0)
 813		return;
 814
 815	if (!ce_pipe->dest_ring)
 816		return;
 817
 818	spin_lock_bh(&ce->ce_lock);
 819	num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
 820	spin_unlock_bh(&ce->ce_lock);
 821
 822	while (num >= 0) {
 823		ret = __ath10k_pci_rx_post_buf(pipe);
 824		if (ret) {
 825			if (ret == -ENOSPC)
 826				break;
 827			ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
 828			mod_timer(&ar_pci->rx_post_retry, jiffies +
 829				  ATH10K_PCI_RX_POST_RETRY_MS);
 830			break;
 831		}
 832		num--;
 833	}
 834}
 835
 836void ath10k_pci_rx_post(struct ath10k *ar)
 837{
 838	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 839	int i;
 840
 841	for (i = 0; i < CE_COUNT; i++)
 842		ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
 843}
 844
 845void ath10k_pci_rx_replenish_retry(struct timer_list *t)
 846{
 847	struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry);
 848	struct ath10k *ar = ar_pci->ar;
 849
 850	ath10k_pci_rx_post(ar);
 851}
 852
 853static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 854{
 855	u32 val = 0, region = addr & 0xfffff;
 856
 857	val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
 858				 & 0x7ff) << 21;
 859	val |= 0x100000 | region;
 860	return val;
 861}
 862
 863/* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr.
 864 * Support to access target space below 1M for qca6174 and qca9377.
 865 * If target space is below 1M, the bit[20] of converted CE addr is 0.
 866 * Otherwise bit[20] of converted CE addr is 1.
 867 */
 868static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 869{
 870	u32 val = 0, region = addr & 0xfffff;
 871
 872	val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
 873				 & 0x7ff) << 21;
 874	val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;
 875	return val;
 876}
 877
 878static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 879{
 880	u32 val = 0, region = addr & 0xfffff;
 881
 882	val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
 883	val |= 0x100000 | region;
 884	return val;
 885}
 886
 887static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 888{
 889	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 890
 891	if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
 892		return -ENOTSUPP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 893
 894	return ar_pci->targ_cpu_to_ce_addr(ar, addr);
 
 895}
 896
 897/*
 898 * Diagnostic read/write access is provided for startup/config/debug usage.
 899 * Caller must guarantee proper alignment, when applicable, and single user
 900 * at any moment.
 901 */
 902static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
 903				    int nbytes)
 904{
 905	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 906	int ret = 0;
 907	u32 *buf;
 908	unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
 909	struct ath10k_ce_pipe *ce_diag;
 910	/* Host buffer address in CE space */
 911	u32 ce_data;
 912	dma_addr_t ce_data_base = 0;
 913	void *data_buf;
 914	int i;
 915
 916	mutex_lock(&ar_pci->ce_diag_mutex);
 
 917	ce_diag = ar_pci->ce_diag;
 918
 919	/*
 920	 * Allocate a temporary bounce buffer to hold caller's data
 921	 * to be DMA'ed from Target. This guarantees
 922	 *   1) 4-byte alignment
 923	 *   2) Buffer in DMA-able space
 924	 */
 925	alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
 926
 927	data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
 928				      GFP_ATOMIC);
 
 
 
 929	if (!data_buf) {
 930		ret = -ENOMEM;
 931		goto done;
 932	}
 933
 934	/* The address supplied by the caller is in the
 935	 * Target CPU virtual address space.
 936	 *
 937	 * In order to use this address with the diagnostic CE,
 938	 * convert it from Target CPU virtual address space
 939	 * to CE address space
 940	 */
 941	address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
 942
 943	remaining_bytes = nbytes;
 944	ce_data = ce_data_base;
 945	while (remaining_bytes) {
 946		nbytes = min_t(unsigned int, remaining_bytes,
 947			       DIAG_TRANSFER_LIMIT);
 948
 949		ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
 950		if (ret != 0)
 951			goto done;
 952
 953		/* Request CE to send from Target(!) address to Host buffer */
 954		ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0);
 
 
 
 
 
 
 
 
 
 
 
 955		if (ret)
 956			goto done;
 957
 958		i = 0;
 959		while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
 960			udelay(DIAG_ACCESS_CE_WAIT_US);
 961			i += DIAG_ACCESS_CE_WAIT_US;
 962
 963			if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
 964				ret = -EBUSY;
 965				goto done;
 966			}
 967		}
 968
 969		i = 0;
 970		while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
 971						     &completed_nbytes) != 0) {
 972			udelay(DIAG_ACCESS_CE_WAIT_US);
 973			i += DIAG_ACCESS_CE_WAIT_US;
 
 974
 975			if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
 976				ret = -EBUSY;
 977				goto done;
 978			}
 979		}
 980
 981		if (nbytes != completed_nbytes) {
 982			ret = -EIO;
 983			goto done;
 984		}
 985
 986		if (*buf != ce_data) {
 987			ret = -EIO;
 988			goto done;
 989		}
 990
 991		remaining_bytes -= nbytes;
 
 
 
 
 
 
 992		memcpy(data, data_buf, nbytes);
 993
 994		address += nbytes;
 995		data += nbytes;
 996	}
 997
 998done:
 999
1000	if (data_buf)
1001		dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1002				  ce_data_base);
1003
1004	mutex_unlock(&ar_pci->ce_diag_mutex);
1005
1006	return ret;
1007}
1008
1009static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
1010{
1011	__le32 val = 0;
1012	int ret;
1013
1014	ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
1015	*value = __le32_to_cpu(val);
1016
1017	return ret;
1018}
1019
1020static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
1021				     u32 src, u32 len)
1022{
1023	u32 host_addr, addr;
1024	int ret;
1025
1026	host_addr = host_interest_item_address(src);
1027
1028	ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
1029	if (ret != 0) {
1030		ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
1031			    src, ret);
1032		return ret;
1033	}
1034
1035	ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1036	if (ret != 0) {
1037		ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
1038			    addr, len, ret);
1039		return ret;
1040	}
1041
1042	return 0;
1043}
1044
1045#define ath10k_pci_diag_read_hi(ar, dest, src, len)		\
1046	__ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
1047
1048int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1049			      const void *data, int nbytes)
1050{
1051	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1052	int ret = 0;
1053	u32 *buf;
1054	unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
1055	struct ath10k_ce_pipe *ce_diag;
1056	void *data_buf;
 
1057	dma_addr_t ce_data_base = 0;
1058	int i;
1059
1060	mutex_lock(&ar_pci->ce_diag_mutex);
 
1061	ce_diag = ar_pci->ce_diag;
1062
1063	/*
1064	 * Allocate a temporary bounce buffer to hold caller's data
1065	 * to be DMA'ed to Target. This guarantees
1066	 *   1) 4-byte alignment
1067	 *   2) Buffer in DMA-able space
1068	 */
1069	alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
1070
1071	data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
1072				      GFP_ATOMIC);
 
1073	if (!data_buf) {
1074		ret = -ENOMEM;
1075		goto done;
1076	}
1077
 
 
 
1078	/*
1079	 * The address supplied by the caller is in the
1080	 * Target CPU virtual address space.
1081	 *
1082	 * In order to use this address with the diagnostic CE,
1083	 * convert it from
1084	 *    Target CPU virtual address space
1085	 * to
1086	 *    CE address space
1087	 */
1088	address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1089
1090	remaining_bytes = nbytes;
 
1091	while (remaining_bytes) {
1092		/* FIXME: check cast */
1093		nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1094
1095		/* Copy caller's data to allocated DMA buf */
1096		memcpy(data_buf, data, nbytes);
1097
1098		/* Set up to receive directly into Target(!) address */
1099		ret = ath10k_ce_rx_post_buf(ce_diag, &address, address);
1100		if (ret != 0)
1101			goto done;
1102
1103		/*
1104		 * Request CE to send caller-supplied data that
1105		 * was copied to bounce buffer to Target(!) address.
1106		 */
1107		ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0);
 
1108		if (ret != 0)
1109			goto done;
1110
1111		i = 0;
1112		while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
1113			udelay(DIAG_ACCESS_CE_WAIT_US);
1114			i += DIAG_ACCESS_CE_WAIT_US;
1115
1116			if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1117				ret = -EBUSY;
1118				goto done;
1119			}
1120		}
1121
1122		i = 0;
1123		while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
1124						     &completed_nbytes) != 0) {
1125			udelay(DIAG_ACCESS_CE_WAIT_US);
1126			i += DIAG_ACCESS_CE_WAIT_US;
 
1127
1128			if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1129				ret = -EBUSY;
1130				goto done;
1131			}
1132		}
1133
1134		if (nbytes != completed_nbytes) {
1135			ret = -EIO;
1136			goto done;
1137		}
1138
1139		if (*buf != address) {
1140			ret = -EIO;
1141			goto done;
1142		}
1143
1144		remaining_bytes -= nbytes;
1145		address += nbytes;
1146		data += nbytes;
1147	}
1148
1149done:
1150	if (data_buf) {
1151		dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1152				  ce_data_base);
1153	}
1154
1155	if (ret != 0)
1156		ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1157			    address, ret);
1158
1159	mutex_unlock(&ar_pci->ce_diag_mutex);
1160
1161	return ret;
1162}
1163
1164static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1165{
1166	__le32 val = __cpu_to_le32(value);
1167
1168	return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1169}
1170
1171/* Called by lower (CE) layer when a send to Target completes. */
1172static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1173{
1174	struct ath10k *ar = ce_state->ar;
1175	struct sk_buff_head list;
1176	struct sk_buff *skb;
1177
1178	__skb_queue_head_init(&list);
1179	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1180		/* no need to call tx completion for NULL pointers */
1181		if (skb == NULL)
1182			continue;
1183
1184		__skb_queue_tail(&list, skb);
1185	}
1186
1187	while ((skb = __skb_dequeue(&list)))
1188		ath10k_htc_tx_completion_handler(ar, skb);
1189}
1190
1191static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1192				     void (*callback)(struct ath10k *ar,
1193						      struct sk_buff *skb))
1194{
1195	struct ath10k *ar = ce_state->ar;
1196	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1197	struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
1198	struct sk_buff *skb;
1199	struct sk_buff_head list;
1200	void *transfer_context;
1201	unsigned int nbytes, max_nbytes;
1202
1203	__skb_queue_head_init(&list);
1204	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1205					     &nbytes) == 0) {
1206		skb = transfer_context;
1207		max_nbytes = skb->len + skb_tailroom(skb);
1208		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1209				 max_nbytes, DMA_FROM_DEVICE);
1210
1211		if (unlikely(max_nbytes < nbytes)) {
1212			ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1213				    nbytes, max_nbytes);
1214			dev_kfree_skb_any(skb);
1215			continue;
1216		}
1217
1218		skb_put(skb, nbytes);
1219		__skb_queue_tail(&list, skb);
1220	}
1221
1222	while ((skb = __skb_dequeue(&list))) {
1223		ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1224			   ce_state->id, skb->len);
1225		ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1226				skb->data, skb->len);
1227
1228		callback(ar, skb);
1229	}
1230
1231	ath10k_pci_rx_post_pipe(pipe_info);
1232}
1233
1234static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1235					 void (*callback)(struct ath10k *ar,
1236							  struct sk_buff *skb))
1237{
1238	struct ath10k *ar = ce_state->ar;
1239	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1240	struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
1241	struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1242	struct sk_buff *skb;
1243	struct sk_buff_head list;
1244	void *transfer_context;
1245	unsigned int nbytes, max_nbytes, nentries;
1246	int orig_len;
1247
1248	/* No need to acquire ce_lock for CE5, since this is the only place CE5
1249	 * is processed other than init and deinit. Before releasing CE5
1250	 * buffers, interrupts are disabled. Thus CE5 access is serialized.
1251	 */
1252	__skb_queue_head_init(&list);
1253	while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1254						    &nbytes) == 0) {
1255		skb = transfer_context;
1256		max_nbytes = skb->len + skb_tailroom(skb);
1257
1258		if (unlikely(max_nbytes < nbytes)) {
1259			ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1260				    nbytes, max_nbytes);
1261			continue;
1262		}
1263
1264		dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1265					max_nbytes, DMA_FROM_DEVICE);
1266		skb_put(skb, nbytes);
1267		__skb_queue_tail(&list, skb);
1268	}
1269
1270	nentries = skb_queue_len(&list);
1271	while ((skb = __skb_dequeue(&list))) {
1272		ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1273			   ce_state->id, skb->len);
1274		ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1275				skb->data, skb->len);
1276
1277		orig_len = skb->len;
1278		callback(ar, skb);
1279		skb_push(skb, orig_len - skb->len);
1280		skb_reset_tail_pointer(skb);
1281		skb_trim(skb, 0);
1282
1283		/*let device gain the buffer again*/
1284		dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1285					   skb->len + skb_tailroom(skb),
1286					   DMA_FROM_DEVICE);
1287	}
1288	ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1289}
1290
1291/* Called by lower (CE) layer when data is received from the Target. */
1292static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1293{
1294	ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1295}
1296
1297static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1298{
1299	/* CE4 polling needs to be done whenever CE pipe which transports
1300	 * HTT Rx (target->host) is processed.
1301	 */
1302	ath10k_ce_per_engine_service(ce_state->ar, 4);
1303
1304	ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1305}
1306
1307/* Called by lower (CE) layer when data is received from the Target.
1308 * Only 10.4 firmware uses separate CE to transfer pktlog data.
1309 */
1310static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1311{
1312	ath10k_pci_process_rx_cb(ce_state,
1313				 ath10k_htt_rx_pktlog_completion_handler);
1314}
1315
1316/* Called by lower (CE) layer when a send to HTT Target completes. */
1317static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1318{
1319	struct ath10k *ar = ce_state->ar;
1320	struct sk_buff *skb;
1321
1322	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1323		/* no need to call tx completion for NULL pointers */
1324		if (!skb)
1325			continue;
1326
1327		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1328				 skb->len, DMA_TO_DEVICE);
1329		ath10k_htt_hif_tx_complete(ar, skb);
1330	}
1331}
1332
1333static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1334{
1335	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1336	ath10k_htt_t2h_msg_handler(ar, skb);
1337}
1338
1339/* Called by lower (CE) layer when HTT data is received from the Target. */
1340static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1341{
1342	/* CE4 polling needs to be done whenever CE pipe which transports
1343	 * HTT Rx (target->host) is processed.
1344	 */
1345	ath10k_ce_per_engine_service(ce_state->ar, 4);
1346
1347	ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1348}
1349
1350int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1351			 struct ath10k_hif_sg_item *items, int n_items)
1352{
1353	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1354	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1355	struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1356	struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1357	struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1358	unsigned int nentries_mask;
1359	unsigned int sw_index;
1360	unsigned int write_index;
1361	int err, i = 0;
1362
1363	spin_lock_bh(&ce->ce_lock);
1364
1365	nentries_mask = src_ring->nentries_mask;
1366	sw_index = src_ring->sw_index;
1367	write_index = src_ring->write_index;
1368
1369	if (unlikely(CE_RING_DELTA(nentries_mask,
1370				   write_index, sw_index - 1) < n_items)) {
1371		err = -ENOBUFS;
1372		goto err;
1373	}
1374
1375	for (i = 0; i < n_items - 1; i++) {
1376		ath10k_dbg(ar, ATH10K_DBG_PCI,
1377			   "pci tx item %d paddr %pad len %d n_items %d\n",
1378			   i, &items[i].paddr, items[i].len, n_items);
1379		ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1380				items[i].vaddr, items[i].len);
1381
1382		err = ath10k_ce_send_nolock(ce_pipe,
1383					    items[i].transfer_context,
1384					    items[i].paddr,
1385					    items[i].len,
1386					    items[i].transfer_id,
1387					    CE_SEND_FLAG_GATHER);
1388		if (err)
1389			goto err;
1390	}
1391
1392	/* `i` is equal to `n_items -1` after for() */
1393
1394	ath10k_dbg(ar, ATH10K_DBG_PCI,
1395		   "pci tx item %d paddr %pad len %d n_items %d\n",
1396		   i, &items[i].paddr, items[i].len, n_items);
1397	ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1398			items[i].vaddr, items[i].len);
1399
1400	err = ath10k_ce_send_nolock(ce_pipe,
1401				    items[i].transfer_context,
1402				    items[i].paddr,
1403				    items[i].len,
1404				    items[i].transfer_id,
1405				    0);
1406	if (err)
1407		goto err;
1408
1409	spin_unlock_bh(&ce->ce_lock);
1410	return 0;
1411
1412err:
1413	for (; i > 0; i--)
1414		__ath10k_ce_send_revert(ce_pipe);
1415
1416	spin_unlock_bh(&ce->ce_lock);
1417	return err;
1418}
1419
1420int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1421			     size_t buf_len)
1422{
1423	return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1424}
1425
1426u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1427{
1428	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1429
1430	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1431
1432	return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1433}
1434
1435static void ath10k_pci_dump_registers(struct ath10k *ar,
1436				      struct ath10k_fw_crash_data *crash_data)
1437{
1438	__le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1439	int i, ret;
1440
1441	lockdep_assert_held(&ar->dump_mutex);
1442
1443	ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1444				      hi_failure_state,
1445				      REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1446	if (ret) {
1447		ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1448		return;
1449	}
1450
1451	BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1452
1453	ath10k_err(ar, "firmware register dump:\n");
1454	for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1455		ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1456			   i,
1457			   __le32_to_cpu(reg_dump_values[i]),
1458			   __le32_to_cpu(reg_dump_values[i + 1]),
1459			   __le32_to_cpu(reg_dump_values[i + 2]),
1460			   __le32_to_cpu(reg_dump_values[i + 3]));
1461
1462	if (!crash_data)
1463		return;
1464
1465	for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1466		crash_data->registers[i] = reg_dump_values[i];
1467}
1468
1469static int ath10k_pci_dump_memory_section(struct ath10k *ar,
1470					  const struct ath10k_mem_region *mem_region,
1471					  u8 *buf, size_t buf_len)
1472{
1473	const struct ath10k_mem_section *cur_section, *next_section;
1474	unsigned int count, section_size, skip_size;
1475	int ret, i, j;
1476
1477	if (!mem_region || !buf)
1478		return 0;
1479
1480	cur_section = &mem_region->section_table.sections[0];
1481
1482	if (mem_region->start > cur_section->start) {
1483		ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
1484			    mem_region->start, cur_section->start);
1485		return 0;
1486	}
1487
1488	skip_size = cur_section->start - mem_region->start;
1489
1490	/* fill the gap between the first register section and register
1491	 * start address
1492	 */
1493	for (i = 0; i < skip_size; i++) {
1494		*buf = ATH10K_MAGIC_NOT_COPIED;
1495		buf++;
1496	}
1497
1498	count = 0;
1499
1500	for (i = 0; cur_section != NULL; i++) {
1501		section_size = cur_section->end - cur_section->start;
1502
1503		if (section_size <= 0) {
1504			ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
1505				    cur_section->start,
1506				    cur_section->end);
1507			break;
1508		}
1509
1510		if ((i + 1) == mem_region->section_table.size) {
1511			/* last section */
1512			next_section = NULL;
1513			skip_size = 0;
1514		} else {
1515			next_section = cur_section + 1;
1516
1517			if (cur_section->end > next_section->start) {
1518				ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
1519					    next_section->start,
1520					    cur_section->end);
1521				break;
1522			}
1523
1524			skip_size = next_section->start - cur_section->end;
1525		}
1526
1527		if (buf_len < (skip_size + section_size)) {
1528			ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
1529			break;
1530		}
1531
1532		buf_len -= skip_size + section_size;
1533
1534		/* read section to dest memory */
1535		ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
1536					       buf, section_size);
1537		if (ret) {
1538			ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
1539				    cur_section->start, ret);
1540			break;
1541		}
1542
1543		buf += section_size;
1544		count += section_size;
1545
1546		/* fill in the gap between this section and the next */
1547		for (j = 0; j < skip_size; j++) {
1548			*buf = ATH10K_MAGIC_NOT_COPIED;
1549			buf++;
1550		}
1551
1552		count += skip_size;
1553
1554		if (!next_section)
1555			/* this was the last section */
1556			break;
1557
1558		cur_section = next_section;
1559	}
1560
1561	return count;
1562}
1563
1564static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
1565{
1566	u32 val;
1567
1568	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1569			   FW_RAM_CONFIG_ADDRESS, config);
1570
1571	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1572				FW_RAM_CONFIG_ADDRESS);
1573	if (val != config) {
1574		ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
1575			    val, config);
1576		return -EIO;
1577	}
1578
1579	return 0;
1580}
1581
1582/* Always returns the length */
1583static int ath10k_pci_dump_memory_sram(struct ath10k *ar,
1584				       const struct ath10k_mem_region *region,
1585				       u8 *buf)
1586{
1587	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1588	u32 base_addr, i;
1589
1590	base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG);
1591	base_addr += region->start;
1592
1593	for (i = 0; i < region->len; i += 4) {
1594		iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG);
1595		*(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG);
1596	}
1597
1598	return region->len;
1599}
1600
1601/* if an error happened returns < 0, otherwise the length */
1602static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
1603				      const struct ath10k_mem_region *region,
1604				      u8 *buf)
1605{
1606	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1607	u32 i;
1608	int ret;
1609
1610	mutex_lock(&ar->conf_mutex);
1611	if (ar->state != ATH10K_STATE_ON) {
1612		ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
1613		ret = -EIO;
1614		goto done;
1615	}
1616
1617	for (i = 0; i < region->len; i += 4)
1618		*(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
1619
1620	ret = region->len;
1621done:
1622	mutex_unlock(&ar->conf_mutex);
1623	return ret;
1624}
1625
1626/* if an error happened returns < 0, otherwise the length */
1627static int ath10k_pci_dump_memory_generic(struct ath10k *ar,
1628					  const struct ath10k_mem_region *current_region,
1629					  u8 *buf)
1630{
1631	int ret;
1632
1633	if (current_region->section_table.size > 0)
1634		/* Copy each section individually. */
1635		return ath10k_pci_dump_memory_section(ar,
1636						      current_region,
1637						      buf,
1638						      current_region->len);
1639
1640	/* No individual memory sections defined so we can
1641	 * copy the entire memory region.
1642	 */
1643	ret = ath10k_pci_diag_read_mem(ar,
1644				       current_region->start,
1645				       buf,
1646				       current_region->len);
1647	if (ret) {
1648		ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
1649			    current_region->name, ret);
1650		return ret;
1651	}
1652
1653	return current_region->len;
1654}
1655
1656static void ath10k_pci_dump_memory(struct ath10k *ar,
1657				   struct ath10k_fw_crash_data *crash_data)
1658{
1659	const struct ath10k_hw_mem_layout *mem_layout;
1660	const struct ath10k_mem_region *current_region;
1661	struct ath10k_dump_ram_data_hdr *hdr;
1662	u32 count, shift;
1663	size_t buf_len;
1664	int ret, i;
1665	u8 *buf;
1666
1667	lockdep_assert_held(&ar->dump_mutex);
1668
1669	if (!crash_data)
1670		return;
1671
1672	mem_layout = ath10k_coredump_get_mem_layout(ar);
1673	if (!mem_layout)
1674		return;
1675
1676	current_region = &mem_layout->region_table.regions[0];
1677
1678	buf = crash_data->ramdump_buf;
1679	buf_len = crash_data->ramdump_buf_len;
1680
1681	memset(buf, 0, buf_len);
1682
1683	for (i = 0; i < mem_layout->region_table.size; i++) {
1684		count = 0;
1685
1686		if (current_region->len > buf_len) {
1687			ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
1688				    current_region->name,
1689				    current_region->len,
1690				    buf_len);
1691			break;
1692		}
1693
1694		/* To get IRAM dump, the host driver needs to switch target
1695		 * ram config from DRAM to IRAM.
1696		 */
1697		if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
1698		    current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
1699			shift = current_region->start >> 20;
1700
1701			ret = ath10k_pci_set_ram_config(ar, shift);
1702			if (ret) {
1703				ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
1704					    current_region->name, ret);
1705				break;
1706			}
1707		}
1708
1709		/* Reserve space for the header. */
1710		hdr = (void *)buf;
1711		buf += sizeof(*hdr);
1712		buf_len -= sizeof(*hdr);
1713
1714		switch (current_region->type) {
1715		case ATH10K_MEM_REGION_TYPE_IOSRAM:
1716			count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
1717			break;
1718		case ATH10K_MEM_REGION_TYPE_IOREG:
1719			ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
1720			if (ret < 0)
1721				break;
1722
1723			count = ret;
1724			break;
1725		default:
1726			ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
1727			if (ret < 0)
1728				break;
1729
1730			count = ret;
1731			break;
1732		}
1733
1734		hdr->region_type = cpu_to_le32(current_region->type);
1735		hdr->start = cpu_to_le32(current_region->start);
1736		hdr->length = cpu_to_le32(count);
1737
1738		if (count == 0)
1739			/* Note: the header remains, just with zero length. */
1740			break;
1741
1742		buf += count;
1743		buf_len -= count;
1744
1745		current_region++;
1746	}
1747}
1748
1749static void ath10k_pci_fw_dump_work(struct work_struct *work)
1750{
1751	struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,
1752						 dump_work);
1753	struct ath10k_fw_crash_data *crash_data;
1754	struct ath10k *ar = ar_pci->ar;
1755	char guid[UUID_STRING_LEN + 1];
1756
1757	mutex_lock(&ar->dump_mutex);
1758
1759	spin_lock_bh(&ar->data_lock);
 
1760	ar->stats.fw_crash_counter++;
1761	spin_unlock_bh(&ar->data_lock);
1762
1763	crash_data = ath10k_coredump_new(ar);
1764
1765	if (crash_data)
1766		scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1767	else
1768		scnprintf(guid, sizeof(guid), "n/a");
1769
1770	ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1771	ath10k_print_driver_info(ar);
1772	ath10k_pci_dump_registers(ar, crash_data);
1773	ath10k_ce_dump_registers(ar, crash_data);
1774	ath10k_pci_dump_memory(ar, crash_data);
1775
1776	mutex_unlock(&ar->dump_mutex);
1777
1778	ath10k_core_start_recovery(ar);
1779}
1780
1781static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1782{
1783	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1784
1785	queue_work(ar->workqueue, &ar_pci->dump_work);
1786}
1787
1788void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1789					int force)
1790{
1791	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1792
1793	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1794
1795	if (!force) {
1796		int resources;
1797		/*
1798		 * Decide whether to actually poll for completions, or just
1799		 * wait for a later chance.
1800		 * If there seem to be plenty of resources left, then just wait
1801		 * since checking involves reading a CE register, which is a
1802		 * relatively expensive operation.
1803		 */
1804		resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1805
1806		/*
1807		 * If at least 50% of the total resources are still available,
1808		 * don't bother checking again yet.
1809		 */
1810		if (resources > (ar_pci->attr[pipe].src_nentries >> 1))
1811			return;
1812	}
1813	ath10k_ce_per_engine_service(ar, pipe);
1814}
1815
1816static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
1817{
1818	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1819
1820	del_timer_sync(&ar_pci->rx_post_retry);
1821}
1822
1823int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1824				       u8 *ul_pipe, u8 *dl_pipe)
1825{
1826	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1827	const struct ce_service_to_pipe *entry;
1828	bool ul_set = false, dl_set = false;
1829	int i;
1830
1831	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1832
1833	for (i = 0; i < ARRAY_SIZE(pci_target_service_to_ce_map_wlan); i++) {
1834		entry = &ar_pci->serv_to_pipe[i];
1835
1836		if (__le32_to_cpu(entry->service_id) != service_id)
1837			continue;
1838
1839		switch (__le32_to_cpu(entry->pipedir)) {
1840		case PIPEDIR_NONE:
1841			break;
1842		case PIPEDIR_IN:
1843			WARN_ON(dl_set);
1844			*dl_pipe = __le32_to_cpu(entry->pipenum);
1845			dl_set = true;
1846			break;
1847		case PIPEDIR_OUT:
1848			WARN_ON(ul_set);
1849			*ul_pipe = __le32_to_cpu(entry->pipenum);
1850			ul_set = true;
1851			break;
1852		case PIPEDIR_INOUT:
1853			WARN_ON(dl_set);
1854			WARN_ON(ul_set);
1855			*dl_pipe = __le32_to_cpu(entry->pipenum);
1856			*ul_pipe = __le32_to_cpu(entry->pipenum);
1857			dl_set = true;
1858			ul_set = true;
1859			break;
1860		}
1861	}
1862
1863	if (!ul_set || !dl_set)
1864		return -ENOENT;
1865
1866	return 0;
1867}
1868
1869void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1870				     u8 *ul_pipe, u8 *dl_pipe)
1871{
1872	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1873
1874	(void)ath10k_pci_hif_map_service_to_pipe(ar,
1875						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1876						 ul_pipe, dl_pipe);
1877}
1878
1879void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1880{
1881	u32 val;
1882
1883	switch (ar->hw_rev) {
1884	case ATH10K_HW_QCA988X:
1885	case ATH10K_HW_QCA9887:
1886	case ATH10K_HW_QCA6174:
1887	case ATH10K_HW_QCA9377:
1888		val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1889					CORE_CTRL_ADDRESS);
1890		val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1891		ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1892				   CORE_CTRL_ADDRESS, val);
1893		break;
1894	case ATH10K_HW_QCA99X0:
1895	case ATH10K_HW_QCA9984:
1896	case ATH10K_HW_QCA9888:
1897	case ATH10K_HW_QCA4019:
1898		/* TODO: Find appropriate register configuration for QCA99X0
1899		 *  to mask irq/MSI.
1900		 */
1901		break;
1902	case ATH10K_HW_WCN3990:
1903		break;
1904	}
1905}
1906
1907static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1908{
1909	u32 val;
1910
1911	switch (ar->hw_rev) {
1912	case ATH10K_HW_QCA988X:
1913	case ATH10K_HW_QCA9887:
1914	case ATH10K_HW_QCA6174:
1915	case ATH10K_HW_QCA9377:
1916		val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1917					CORE_CTRL_ADDRESS);
1918		val |= CORE_CTRL_PCIE_REG_31_MASK;
1919		ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1920				   CORE_CTRL_ADDRESS, val);
1921		break;
1922	case ATH10K_HW_QCA99X0:
1923	case ATH10K_HW_QCA9984:
1924	case ATH10K_HW_QCA9888:
1925	case ATH10K_HW_QCA4019:
1926		/* TODO: Find appropriate register configuration for QCA99X0
1927		 *  to unmask irq/MSI.
1928		 */
1929		break;
1930	case ATH10K_HW_WCN3990:
1931		break;
1932	}
1933}
1934
1935static void ath10k_pci_irq_disable(struct ath10k *ar)
1936{
1937	ath10k_ce_disable_interrupts(ar);
1938	ath10k_pci_disable_and_clear_legacy_irq(ar);
1939	ath10k_pci_irq_msi_fw_mask(ar);
1940}
1941
1942static void ath10k_pci_irq_sync(struct ath10k *ar)
1943{
1944	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1945
1946	synchronize_irq(ar_pci->pdev->irq);
1947}
1948
1949static void ath10k_pci_irq_enable(struct ath10k *ar)
1950{
1951	ath10k_ce_enable_interrupts(ar);
1952	ath10k_pci_enable_legacy_irq(ar);
1953	ath10k_pci_irq_msi_fw_unmask(ar);
1954}
1955
1956static int ath10k_pci_hif_start(struct ath10k *ar)
1957{
1958	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1959
1960	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1961
1962	ath10k_core_napi_enable(ar);
1963
1964	ath10k_pci_irq_enable(ar);
1965	ath10k_pci_rx_post(ar);
1966
1967	pcie_capability_clear_and_set_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1968					   PCI_EXP_LNKCTL_ASPMC,
1969					   ar_pci->link_ctl & PCI_EXP_LNKCTL_ASPMC);
1970
1971	return 0;
1972}
1973
1974static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1975{
1976	struct ath10k *ar;
1977	struct ath10k_ce_pipe *ce_pipe;
1978	struct ath10k_ce_ring *ce_ring;
1979	struct sk_buff *skb;
1980	int i;
1981
1982	ar = pci_pipe->hif_ce_state;
1983	ce_pipe = pci_pipe->ce_hdl;
1984	ce_ring = ce_pipe->dest_ring;
1985
1986	if (!ce_ring)
1987		return;
1988
1989	if (!pci_pipe->buf_sz)
1990		return;
1991
1992	for (i = 0; i < ce_ring->nentries; i++) {
1993		skb = ce_ring->per_transfer_context[i];
1994		if (!skb)
1995			continue;
1996
1997		ce_ring->per_transfer_context[i] = NULL;
1998
1999		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
2000				 skb->len + skb_tailroom(skb),
2001				 DMA_FROM_DEVICE);
2002		dev_kfree_skb_any(skb);
2003	}
2004}
2005
2006static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
2007{
2008	struct ath10k *ar;
2009	struct ath10k_ce_pipe *ce_pipe;
2010	struct ath10k_ce_ring *ce_ring;
2011	struct sk_buff *skb;
2012	int i;
2013
2014	ar = pci_pipe->hif_ce_state;
2015	ce_pipe = pci_pipe->ce_hdl;
2016	ce_ring = ce_pipe->src_ring;
2017
2018	if (!ce_ring)
2019		return;
2020
2021	if (!pci_pipe->buf_sz)
2022		return;
2023
2024	for (i = 0; i < ce_ring->nentries; i++) {
2025		skb = ce_ring->per_transfer_context[i];
2026		if (!skb)
2027			continue;
2028
2029		ce_ring->per_transfer_context[i] = NULL;
2030
2031		ath10k_htc_tx_completion_handler(ar, skb);
2032	}
2033}
2034
2035/*
2036 * Cleanup residual buffers for device shutdown:
2037 *    buffers that were enqueued for receive
2038 *    buffers that were to be sent
2039 * Note: Buffers that had completed but which were
2040 * not yet processed are on a completion queue. They
2041 * are handled when the completion thread shuts down.
2042 */
2043static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
2044{
2045	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2046	int pipe_num;
2047
2048	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
2049		struct ath10k_pci_pipe *pipe_info;
2050
2051		pipe_info = &ar_pci->pipe_info[pipe_num];
2052		ath10k_pci_rx_pipe_cleanup(pipe_info);
2053		ath10k_pci_tx_pipe_cleanup(pipe_info);
2054	}
2055}
2056
2057void ath10k_pci_ce_deinit(struct ath10k *ar)
2058{
2059	int i;
2060
2061	for (i = 0; i < CE_COUNT; i++)
2062		ath10k_ce_deinit_pipe(ar, i);
2063}
2064
2065void ath10k_pci_flush(struct ath10k *ar)
2066{
2067	ath10k_pci_rx_retry_sync(ar);
2068	ath10k_pci_buffer_cleanup(ar);
2069}
2070
2071static void ath10k_pci_hif_stop(struct ath10k *ar)
2072{
2073	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2074	unsigned long flags;
2075
2076	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
2077
2078	ath10k_pci_irq_disable(ar);
2079	ath10k_pci_irq_sync(ar);
2080
2081	ath10k_core_napi_sync_disable(ar);
2082
2083	cancel_work_sync(&ar_pci->dump_work);
2084
2085	/* Most likely the device has HTT Rx ring configured. The only way to
2086	 * prevent the device from accessing (and possible corrupting) host
2087	 * memory is to reset the chip now.
2088	 *
2089	 * There's also no known way of masking MSI interrupts on the device.
2090	 * For ranged MSI the CE-related interrupts can be masked. However
2091	 * regardless how many MSI interrupts are assigned the first one
2092	 * is always used for firmware indications (crashes) and cannot be
2093	 * masked. To prevent the device from asserting the interrupt reset it
2094	 * before proceeding with cleanup.
2095	 */
2096	ath10k_pci_safe_chip_reset(ar);
2097
 
 
2098	ath10k_pci_flush(ar);
 
 
2099
2100	spin_lock_irqsave(&ar_pci->ps_lock, flags);
2101	WARN_ON(ar_pci->ps_wake_refcount > 0);
2102	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
2103}
2104
2105int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
2106				    void *req, u32 req_len,
2107				    void *resp, u32 *resp_len)
2108{
2109	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2110	struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
2111	struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
2112	struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
2113	struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
2114	dma_addr_t req_paddr = 0;
2115	dma_addr_t resp_paddr = 0;
2116	struct bmi_xfer xfer = {};
2117	void *treq, *tresp = NULL;
2118	int ret = 0;
2119
2120	might_sleep();
2121
2122	if (resp && !resp_len)
2123		return -EINVAL;
2124
2125	if (resp && resp_len && *resp_len == 0)
2126		return -EINVAL;
2127
2128	treq = kmemdup(req, req_len, GFP_KERNEL);
2129	if (!treq)
2130		return -ENOMEM;
2131
2132	req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
2133	ret = dma_mapping_error(ar->dev, req_paddr);
2134	if (ret) {
2135		ret = -EIO;
2136		goto err_dma;
2137	}
2138
2139	if (resp && resp_len) {
2140		tresp = kzalloc(*resp_len, GFP_KERNEL);
2141		if (!tresp) {
2142			ret = -ENOMEM;
2143			goto err_req;
2144		}
2145
2146		resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
2147					    DMA_FROM_DEVICE);
2148		ret = dma_mapping_error(ar->dev, resp_paddr);
2149		if (ret) {
2150			ret = -EIO;
2151			goto err_req;
2152		}
2153
2154		xfer.wait_for_resp = true;
2155		xfer.resp_len = 0;
2156
2157		ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
2158	}
2159
2160	ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
2161	if (ret)
2162		goto err_resp;
2163
2164	ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
2165	if (ret) {
2166		dma_addr_t unused_buffer;
2167		unsigned int unused_nbytes;
2168		unsigned int unused_id;
2169
2170		ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
2171					   &unused_nbytes, &unused_id);
2172	} else {
2173		/* non-zero means we did not time out */
2174		ret = 0;
2175	}
2176
2177err_resp:
2178	if (resp) {
2179		dma_addr_t unused_buffer;
2180
2181		ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
2182		dma_unmap_single(ar->dev, resp_paddr,
2183				 *resp_len, DMA_FROM_DEVICE);
2184	}
2185err_req:
2186	dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
2187
2188	if (ret == 0 && resp_len) {
2189		*resp_len = min(*resp_len, xfer.resp_len);
2190		memcpy(resp, tresp, *resp_len);
2191	}
2192err_dma:
2193	kfree(treq);
2194	kfree(tresp);
2195
2196	return ret;
2197}
2198
2199static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
2200{
2201	struct bmi_xfer *xfer;
2202
2203	if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
2204		return;
2205
2206	xfer->tx_done = true;
2207}
2208
2209static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
2210{
2211	struct ath10k *ar = ce_state->ar;
2212	struct bmi_xfer *xfer;
2213	unsigned int nbytes;
2214
2215	if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
2216					  &nbytes))
2217		return;
2218
2219	if (WARN_ON_ONCE(!xfer))
2220		return;
2221
2222	if (!xfer->wait_for_resp) {
2223		ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
2224		return;
2225	}
2226
2227	xfer->resp_len = nbytes;
2228	xfer->rx_done = true;
2229}
2230
2231static int ath10k_pci_bmi_wait(struct ath10k *ar,
2232			       struct ath10k_ce_pipe *tx_pipe,
2233			       struct ath10k_ce_pipe *rx_pipe,
2234			       struct bmi_xfer *xfer)
2235{
2236	unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
2237	unsigned long started = jiffies;
2238	unsigned long dur;
2239	int ret;
2240
2241	while (time_before_eq(jiffies, timeout)) {
2242		ath10k_pci_bmi_send_done(tx_pipe);
2243		ath10k_pci_bmi_recv_data(rx_pipe);
2244
2245		if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
2246			ret = 0;
2247			goto out;
2248		}
2249
2250		schedule();
2251	}
2252
2253	ret = -ETIMEDOUT;
2254
2255out:
2256	dur = jiffies - started;
2257	if (dur > HZ)
2258		ath10k_dbg(ar, ATH10K_DBG_BMI,
2259			   "bmi cmd took %lu jiffies hz %d ret %d\n",
2260			   dur, HZ, ret);
2261	return ret;
2262}
2263
2264/*
2265 * Send an interrupt to the device to wake up the Target CPU
2266 * so it has an opportunity to notice any changed state.
2267 */
2268static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
2269{
2270	u32 addr, val;
2271
2272	addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
2273	val = ath10k_pci_read32(ar, addr);
2274	val |= CORE_CTRL_CPU_INTR_MASK;
2275	ath10k_pci_write32(ar, addr, val);
2276
2277	return 0;
2278}
2279
2280static int ath10k_pci_get_num_banks(struct ath10k *ar)
2281{
2282	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2283
2284	switch (ar_pci->pdev->device) {
2285	case QCA988X_2_0_DEVICE_ID_UBNT:
2286	case QCA988X_2_0_DEVICE_ID:
2287	case QCA99X0_2_0_DEVICE_ID:
2288	case QCA9888_2_0_DEVICE_ID:
2289	case QCA9984_1_0_DEVICE_ID:
2290	case QCA9887_1_0_DEVICE_ID:
2291		return 1;
2292	case QCA6164_2_1_DEVICE_ID:
2293	case QCA6174_2_1_DEVICE_ID:
2294		switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) {
2295		case QCA6174_HW_1_0_CHIP_ID_REV:
2296		case QCA6174_HW_1_1_CHIP_ID_REV:
2297		case QCA6174_HW_2_1_CHIP_ID_REV:
2298		case QCA6174_HW_2_2_CHIP_ID_REV:
2299			return 3;
2300		case QCA6174_HW_1_3_CHIP_ID_REV:
2301			return 2;
2302		case QCA6174_HW_3_0_CHIP_ID_REV:
2303		case QCA6174_HW_3_1_CHIP_ID_REV:
2304		case QCA6174_HW_3_2_CHIP_ID_REV:
2305			return 9;
2306		}
2307		break;
2308	case QCA9377_1_0_DEVICE_ID:
2309		return 9;
2310	}
2311
2312	ath10k_warn(ar, "unknown number of banks, assuming 1\n");
2313	return 1;
2314}
2315
2316static int ath10k_bus_get_num_banks(struct ath10k *ar)
2317{
2318	struct ath10k_ce *ce = ath10k_ce_priv(ar);
2319
2320	return ce->bus_ops->get_num_banks(ar);
2321}
2322
2323int ath10k_pci_init_config(struct ath10k *ar)
2324{
2325	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2326	u32 interconnect_targ_addr;
2327	u32 pcie_state_targ_addr = 0;
2328	u32 pipe_cfg_targ_addr = 0;
2329	u32 svc_to_pipe_map = 0;
2330	u32 pcie_config_flags = 0;
2331	u32 ealloc_value;
2332	u32 ealloc_targ_addr;
2333	u32 flag2_value;
2334	u32 flag2_targ_addr;
2335	int ret = 0;
2336
2337	/* Download to Target the CE Config and the service-to-CE map */
2338	interconnect_targ_addr =
2339		host_interest_item_address(HI_ITEM(hi_interconnect_state));
2340
2341	/* Supply Target-side CE configuration */
2342	ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
2343				     &pcie_state_targ_addr);
2344	if (ret != 0) {
2345		ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
2346		return ret;
2347	}
2348
2349	if (pcie_state_targ_addr == 0) {
2350		ret = -EIO;
2351		ath10k_err(ar, "Invalid pcie state addr\n");
2352		return ret;
2353	}
2354
2355	ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2356					  offsetof(struct pcie_state,
2357						   pipe_cfg_addr)),
2358				     &pipe_cfg_targ_addr);
2359	if (ret != 0) {
2360		ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
2361		return ret;
2362	}
2363
2364	if (pipe_cfg_targ_addr == 0) {
2365		ret = -EIO;
2366		ath10k_err(ar, "Invalid pipe cfg addr\n");
2367		return ret;
2368	}
2369
2370	ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
2371					ar_pci->pipe_config,
2372					sizeof(struct ce_pipe_config) *
2373					NUM_TARGET_CE_CONFIG_WLAN);
2374
2375	if (ret != 0) {
2376		ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
2377		return ret;
2378	}
2379
2380	ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2381					  offsetof(struct pcie_state,
2382						   svc_to_pipe_map)),
2383				     &svc_to_pipe_map);
2384	if (ret != 0) {
2385		ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
2386		return ret;
2387	}
2388
2389	if (svc_to_pipe_map == 0) {
2390		ret = -EIO;
2391		ath10k_err(ar, "Invalid svc_to_pipe map\n");
2392		return ret;
2393	}
2394
2395	ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
2396					ar_pci->serv_to_pipe,
2397					sizeof(pci_target_service_to_ce_map_wlan));
2398	if (ret != 0) {
2399		ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
2400		return ret;
2401	}
2402
2403	ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2404					  offsetof(struct pcie_state,
2405						   config_flags)),
2406				     &pcie_config_flags);
2407	if (ret != 0) {
2408		ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
2409		return ret;
2410	}
2411
2412	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2413
2414	ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2415					   offsetof(struct pcie_state,
2416						    config_flags)),
2417				      pcie_config_flags);
2418	if (ret != 0) {
2419		ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
2420		return ret;
2421	}
2422
2423	/* configure early allocation */
2424	ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2425
2426	ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
2427	if (ret != 0) {
2428		ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
2429		return ret;
2430	}
2431
2432	/* first bank is switched to IRAM */
2433	ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2434			 HI_EARLY_ALLOC_MAGIC_MASK);
2435	ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
2436			  HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
2437			 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2438
2439	ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
2440	if (ret != 0) {
2441		ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
2442		return ret;
2443	}
2444
2445	/* Tell Target to proceed with initialization */
2446	flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2447
2448	ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
2449	if (ret != 0) {
2450		ath10k_err(ar, "Failed to get option val: %d\n", ret);
2451		return ret;
2452	}
2453
2454	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2455
2456	ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
2457	if (ret != 0) {
2458		ath10k_err(ar, "Failed to set option val: %d\n", ret);
2459		return ret;
2460	}
2461
2462	return 0;
2463}
2464
2465static void ath10k_pci_override_ce_config(struct ath10k *ar)
2466{
2467	struct ce_attr *attr;
2468	struct ce_pipe_config *config;
2469	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2470
2471	/* For QCA6174 we're overriding the Copy Engine 5 configuration,
2472	 * since it is currently used for other feature.
2473	 */
2474
2475	/* Override Host's Copy Engine 5 configuration */
2476	attr = &ar_pci->attr[5];
2477	attr->src_sz_max = 0;
2478	attr->dest_nentries = 0;
2479
2480	/* Override Target firmware's Copy Engine configuration */
2481	config = &ar_pci->pipe_config[5];
2482	config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2483	config->nbytes_max = __cpu_to_le32(2048);
2484
2485	/* Map from service/endpoint to Copy Engine */
2486	ar_pci->serv_to_pipe[15].pipenum = __cpu_to_le32(1);
2487}
2488
2489int ath10k_pci_alloc_pipes(struct ath10k *ar)
2490{
2491	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2492	struct ath10k_pci_pipe *pipe;
2493	struct ath10k_ce *ce = ath10k_ce_priv(ar);
2494	int i, ret;
2495
2496	for (i = 0; i < CE_COUNT; i++) {
2497		pipe = &ar_pci->pipe_info[i];
2498		pipe->ce_hdl = &ce->ce_states[i];
2499		pipe->pipe_num = i;
2500		pipe->hif_ce_state = ar;
2501
2502		ret = ath10k_ce_alloc_pipe(ar, i, &ar_pci->attr[i]);
2503		if (ret) {
2504			ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
2505				   i, ret);
2506			return ret;
2507		}
2508
2509		/* Last CE is Diagnostic Window */
2510		if (i == CE_DIAG_PIPE) {
2511			ar_pci->ce_diag = pipe->ce_hdl;
2512			continue;
2513		}
2514
2515		pipe->buf_sz = (size_t)(ar_pci->attr[i].src_sz_max);
2516	}
2517
2518	return 0;
2519}
2520
2521void ath10k_pci_free_pipes(struct ath10k *ar)
2522{
2523	int i;
2524
2525	for (i = 0; i < CE_COUNT; i++)
2526		ath10k_ce_free_pipe(ar, i);
2527}
2528
2529int ath10k_pci_init_pipes(struct ath10k *ar)
2530{
2531	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2532	int i, ret;
2533
2534	for (i = 0; i < CE_COUNT; i++) {
2535		ret = ath10k_ce_init_pipe(ar, i, &ar_pci->attr[i]);
2536		if (ret) {
2537			ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2538				   i, ret);
2539			return ret;
2540		}
2541	}
2542
2543	return 0;
2544}
2545
2546static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2547{
2548	return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2549	       FW_IND_EVENT_PENDING;
2550}
2551
2552static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2553{
2554	u32 val;
2555
2556	val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2557	val &= ~FW_IND_EVENT_PENDING;
2558	ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2559}
2560
2561static bool ath10k_pci_has_device_gone(struct ath10k *ar)
2562{
2563	u32 val;
2564
2565	val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2566	return (val == 0xffffffff);
2567}
2568
2569/* this function effectively clears target memory controller assert line */
2570static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2571{
2572	u32 val;
2573
2574	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2575	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2576			       val | SOC_RESET_CONTROL_SI0_RST_MASK);
2577	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2578
2579	msleep(10);
2580
2581	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2582	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2583			       val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2584	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2585
2586	msleep(10);
2587}
2588
2589static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2590{
2591	u32 val;
2592
2593	ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2594
2595	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2596	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2597			       val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
 
2598}
2599
2600static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2601{
2602	u32 val;
2603
2604	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
 
2605
2606	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2607			       val | SOC_RESET_CONTROL_CE_RST_MASK);
2608	msleep(10);
2609	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2610			       val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2611}
2612
2613static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2614{
2615	u32 val;
2616
2617	val = ath10k_pci_soc_read32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS);
2618	ath10k_pci_soc_write32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS,
2619			       val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
 
 
2620}
2621
2622static int ath10k_pci_warm_reset(struct ath10k *ar)
2623{
2624	int ret;
2625
2626	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2627
2628	spin_lock_bh(&ar->data_lock);
2629	ar->stats.fw_warm_reset_counter++;
2630	spin_unlock_bh(&ar->data_lock);
2631
2632	ath10k_pci_irq_disable(ar);
2633
2634	/* Make sure the target CPU is not doing anything dangerous, e.g. if it
2635	 * were to access copy engine while host performs copy engine reset
2636	 * then it is possible for the device to confuse pci-e controller to
2637	 * the point of bringing host system to a complete stop (i.e. hang).
2638	 */
2639	ath10k_pci_warm_reset_si0(ar);
2640	ath10k_pci_warm_reset_cpu(ar);
2641	ath10k_pci_init_pipes(ar);
2642	ath10k_pci_wait_for_target_init(ar);
2643
2644	ath10k_pci_warm_reset_clear_lf(ar);
2645	ath10k_pci_warm_reset_ce(ar);
2646	ath10k_pci_warm_reset_cpu(ar);
2647	ath10k_pci_init_pipes(ar);
2648
2649	ret = ath10k_pci_wait_for_target_init(ar);
2650	if (ret) {
2651		ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2652		return ret;
2653	}
2654
2655	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2656
2657	return 0;
2658}
2659
2660static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
2661{
2662	ath10k_pci_irq_disable(ar);
2663	return ath10k_pci_qca99x0_chip_reset(ar);
2664}
2665
2666static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2667{
2668	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2669
2670	if (!ar_pci->pci_soft_reset)
2671		return -ENOTSUPP;
2672
2673	return ar_pci->pci_soft_reset(ar);
2674}
2675
2676static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2677{
2678	int i, ret;
2679	u32 val;
2680
2681	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2682
2683	/* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2684	 * It is thus preferred to use warm reset which is safer but may not be
2685	 * able to recover the device from all possible fail scenarios.
2686	 *
2687	 * Warm reset doesn't always work on first try so attempt it a few
2688	 * times before giving up.
2689	 */
2690	for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2691		ret = ath10k_pci_warm_reset(ar);
2692		if (ret) {
2693			ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2694				    i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2695				    ret);
2696			continue;
2697		}
2698
2699		/* FIXME: Sometimes copy engine doesn't recover after warm
2700		 * reset. In most cases this needs cold reset. In some of these
2701		 * cases the device is in such a state that a cold reset may
2702		 * lock up the host.
2703		 *
2704		 * Reading any host interest register via copy engine is
2705		 * sufficient to verify if device is capable of booting
2706		 * firmware blob.
2707		 */
2708		ret = ath10k_pci_init_pipes(ar);
2709		if (ret) {
2710			ath10k_warn(ar, "failed to init copy engine: %d\n",
2711				    ret);
2712			continue;
2713		}
2714
2715		ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2716					     &val);
2717		if (ret) {
2718			ath10k_warn(ar, "failed to poke copy engine: %d\n",
2719				    ret);
2720			continue;
2721		}
2722
2723		ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2724		return 0;
2725	}
2726
2727	if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2728		ath10k_warn(ar, "refusing cold reset as requested\n");
2729		return -EPERM;
2730	}
2731
2732	ret = ath10k_pci_cold_reset(ar);
2733	if (ret) {
2734		ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2735		return ret;
2736	}
2737
2738	ret = ath10k_pci_wait_for_target_init(ar);
2739	if (ret) {
2740		ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2741			    ret);
2742		return ret;
2743	}
2744
2745	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2746
2747	return 0;
2748}
2749
2750static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2751{
2752	int ret;
2753
2754	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2755
2756	/* FIXME: QCA6174 requires cold + warm reset to work. */
2757
2758	ret = ath10k_pci_cold_reset(ar);
2759	if (ret) {
2760		ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2761		return ret;
2762	}
2763
2764	ret = ath10k_pci_wait_for_target_init(ar);
2765	if (ret) {
2766		ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2767			    ret);
2768		return ret;
2769	}
2770
2771	ret = ath10k_pci_warm_reset(ar);
2772	if (ret) {
2773		ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2774		return ret;
2775	}
2776
2777	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2778
2779	return 0;
2780}
2781
2782static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2783{
2784	int ret;
2785
2786	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2787
2788	ret = ath10k_pci_cold_reset(ar);
2789	if (ret) {
2790		ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2791		return ret;
2792	}
2793
2794	ret = ath10k_pci_wait_for_target_init(ar);
2795	if (ret) {
2796		ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2797			    ret);
2798		return ret;
2799	}
2800
2801	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2802
2803	return 0;
2804}
2805
2806static int ath10k_pci_chip_reset(struct ath10k *ar)
2807{
2808	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2809
2810	if (WARN_ON(!ar_pci->pci_hard_reset))
2811		return -ENOTSUPP;
2812
2813	return ar_pci->pci_hard_reset(ar);
2814}
2815
2816static int ath10k_pci_hif_power_up(struct ath10k *ar,
2817				   enum ath10k_firmware_mode fw_mode)
2818{
2819	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2820	int ret;
2821
2822	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2823
2824	pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2825				  &ar_pci->link_ctl);
2826	pcie_capability_clear_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2827				   PCI_EXP_LNKCTL_ASPMC);
2828
2829	/*
2830	 * Bring the target up cleanly.
2831	 *
2832	 * The target may be in an undefined state with an AUX-powered Target
2833	 * and a Host in WoW mode. If the Host crashes, loses power, or is
2834	 * restarted (without unloading the driver) then the Target is left
2835	 * (aux) powered and running. On a subsequent driver load, the Target
2836	 * is in an unexpected state. We try to catch that here in order to
2837	 * reset the Target and retry the probe.
2838	 */
2839	ret = ath10k_pci_chip_reset(ar);
2840	if (ret) {
2841		if (ath10k_pci_has_fw_crashed(ar)) {
2842			ath10k_warn(ar, "firmware crashed during chip reset\n");
2843			ath10k_pci_fw_crashed_clear(ar);
2844			ath10k_pci_fw_crashed_dump(ar);
2845		}
2846
2847		ath10k_err(ar, "failed to reset chip: %d\n", ret);
2848		goto err_sleep;
2849	}
2850
2851	ret = ath10k_pci_init_pipes(ar);
2852	if (ret) {
2853		ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2854		goto err_sleep;
2855	}
2856
2857	ret = ath10k_pci_init_config(ar);
2858	if (ret) {
2859		ath10k_err(ar, "failed to setup init config: %d\n", ret);
2860		goto err_ce;
2861	}
2862
2863	ret = ath10k_pci_wake_target_cpu(ar);
2864	if (ret) {
2865		ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2866		goto err_ce;
2867	}
 
2868
2869	return 0;
2870
2871err_ce:
2872	ath10k_pci_ce_deinit(ar);
2873
2874err_sleep:
2875	return ret;
2876}
2877
2878void ath10k_pci_hif_power_down(struct ath10k *ar)
2879{
2880	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2881
2882	/* Currently hif_power_up performs effectively a reset and hif_stop
2883	 * resets the chip as well so there's no point in resetting here.
2884	 */
2885}
2886
2887static int ath10k_pci_hif_suspend(struct ath10k *ar)
2888{
2889	/* Nothing to do; the important stuff is in the driver suspend. */
2890	return 0;
2891}
2892
2893static int ath10k_pci_suspend(struct ath10k *ar)
2894{
2895	/* The grace timer can still be counting down and ar->ps_awake be true.
2896	 * It is known that the device may be asleep after resuming regardless
2897	 * of the SoC powersave state before suspending. Hence make sure the
2898	 * device is asleep before proceeding.
2899	 */
2900	ath10k_pci_sleep_sync(ar);
2901
2902	return 0;
2903}
2904
2905static int ath10k_pci_hif_resume(struct ath10k *ar)
2906{
2907	/* Nothing to do; the important stuff is in the driver resume. */
2908	return 0;
2909}
2910
2911static int ath10k_pci_resume(struct ath10k *ar)
2912{
2913	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2914	struct pci_dev *pdev = ar_pci->pdev;
2915	u32 val;
2916	int ret = 0;
2917
2918	ret = ath10k_pci_force_wake(ar);
2919	if (ret) {
2920		ath10k_err(ar, "failed to wake up target: %d\n", ret);
2921		return ret;
2922	}
2923
2924	/* Suspend/Resume resets the PCI configuration space, so we have to
2925	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2926	 * from interfering with C3 CPU state. pci_restore_state won't help
2927	 * here since it only restores the first 64 bytes pci config header.
2928	 */
2929	pci_read_config_dword(pdev, 0x40, &val);
2930	if ((val & 0x0000ff00) != 0)
2931		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2932
2933	return ret;
2934}
 
2935
2936static bool ath10k_pci_validate_cal(void *data, size_t size)
2937{
2938	__le16 *cal_words = data;
2939	u16 checksum = 0;
2940	size_t i;
2941
2942	if (size % 2 != 0)
2943		return false;
2944
2945	for (i = 0; i < size / 2; i++)
2946		checksum ^= le16_to_cpu(cal_words[i]);
2947
2948	return checksum == 0xffff;
2949}
2950
2951static void ath10k_pci_enable_eeprom(struct ath10k *ar)
2952{
2953	/* Enable SI clock */
2954	ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
2955
2956	/* Configure GPIOs for I2C operation */
2957	ath10k_pci_write32(ar,
2958			   GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2959			   4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
2960			   SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
2961			      GPIO_PIN0_CONFIG) |
2962			   SM(1, GPIO_PIN0_PAD_PULL));
2963
2964	ath10k_pci_write32(ar,
2965			   GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2966			   4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
2967			   SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
2968			   SM(1, GPIO_PIN0_PAD_PULL));
2969
2970	ath10k_pci_write32(ar,
2971			   GPIO_BASE_ADDRESS +
2972			   QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
2973			   1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
2974
2975	/* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
2976	ath10k_pci_write32(ar,
2977			   SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
2978			   SM(1, SI_CONFIG_ERR_INT) |
2979			   SM(1, SI_CONFIG_BIDIR_OD_DATA) |
2980			   SM(1, SI_CONFIG_I2C) |
2981			   SM(1, SI_CONFIG_POS_SAMPLE) |
2982			   SM(1, SI_CONFIG_INACTIVE_DATA) |
2983			   SM(1, SI_CONFIG_INACTIVE_CLK) |
2984			   SM(8, SI_CONFIG_DIVIDER));
2985}
2986
2987static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
2988{
2989	u32 reg;
2990	int wait_limit;
2991
2992	/* set device select byte and for the read operation */
2993	reg = QCA9887_EEPROM_SELECT_READ |
2994	      SM(addr, QCA9887_EEPROM_ADDR_LO) |
2995	      SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
2996	ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
2997
2998	/* write transmit data, transfer length, and START bit */
2999	ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
3000			   SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
3001			   SM(4, SI_CS_TX_CNT));
3002
3003	/* wait max 1 sec */
3004	wait_limit = 100000;
3005
3006	/* wait for SI_CS_DONE_INT */
3007	do {
3008		reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
3009		if (MS(reg, SI_CS_DONE_INT))
3010			break;
3011
3012		wait_limit--;
3013		udelay(10);
3014	} while (wait_limit > 0);
3015
3016	if (!MS(reg, SI_CS_DONE_INT)) {
3017		ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
3018			   addr);
3019		return -ETIMEDOUT;
3020	}
3021
3022	/* clear SI_CS_DONE_INT */
3023	ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
3024
3025	if (MS(reg, SI_CS_DONE_ERR)) {
3026		ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
3027		return -EIO;
3028	}
3029
3030	/* extract receive data */
3031	reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
3032	*out = reg;
3033
3034	return 0;
3035}
3036
3037static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
3038					   size_t *data_len)
3039{
3040	u8 *caldata = NULL;
3041	size_t calsize, i;
3042	int ret;
3043
3044	if (!QCA_REV_9887(ar))
3045		return -EOPNOTSUPP;
3046
3047	calsize = ar->hw_params.cal_data_len;
3048	caldata = kmalloc(calsize, GFP_KERNEL);
3049	if (!caldata)
3050		return -ENOMEM;
3051
3052	ath10k_pci_enable_eeprom(ar);
3053
3054	for (i = 0; i < calsize; i++) {
3055		ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
3056		if (ret)
3057			goto err_free;
3058	}
3059
3060	if (!ath10k_pci_validate_cal(caldata, calsize))
3061		goto err_free;
3062
3063	*data = caldata;
3064	*data_len = calsize;
3065
3066	return 0;
3067
3068err_free:
3069	kfree(caldata);
3070
3071	return -EINVAL;
3072}
3073
3074static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
3075	.tx_sg			= ath10k_pci_hif_tx_sg,
3076	.diag_read		= ath10k_pci_hif_diag_read,
3077	.diag_write		= ath10k_pci_diag_write_mem,
3078	.exchange_bmi_msg	= ath10k_pci_hif_exchange_bmi_msg,
3079	.start			= ath10k_pci_hif_start,
3080	.stop			= ath10k_pci_hif_stop,
3081	.map_service_to_pipe	= ath10k_pci_hif_map_service_to_pipe,
3082	.get_default_pipe	= ath10k_pci_hif_get_default_pipe,
3083	.send_complete_check	= ath10k_pci_hif_send_complete_check,
3084	.get_free_queue_number	= ath10k_pci_hif_get_free_queue_number,
3085	.power_up		= ath10k_pci_hif_power_up,
3086	.power_down		= ath10k_pci_hif_power_down,
3087	.read32			= ath10k_pci_read32,
3088	.write32		= ath10k_pci_write32,
 
3089	.suspend		= ath10k_pci_hif_suspend,
3090	.resume			= ath10k_pci_hif_resume,
 
3091	.fetch_cal_eeprom	= ath10k_pci_hif_fetch_cal_eeprom,
3092};
3093
3094/*
3095 * Top-level interrupt handler for all PCI interrupts from a Target.
3096 * When a block of MSI interrupts is allocated, this top-level handler
3097 * is not used; instead, we directly call the correct sub-handler.
3098 */
3099static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
3100{
3101	struct ath10k *ar = arg;
3102	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3103	int ret;
3104
3105	if (ath10k_pci_has_device_gone(ar))
3106		return IRQ_NONE;
3107
3108	ret = ath10k_pci_force_wake(ar);
3109	if (ret) {
3110		ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
3111		return IRQ_NONE;
3112	}
3113
3114	if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
3115	    !ath10k_pci_irq_pending(ar))
3116		return IRQ_NONE;
3117
3118	ath10k_pci_disable_and_clear_legacy_irq(ar);
3119	ath10k_pci_irq_msi_fw_mask(ar);
3120	napi_schedule(&ar->napi);
3121
3122	return IRQ_HANDLED;
3123}
3124
3125static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
3126{
3127	struct ath10k *ar = container_of(ctx, struct ath10k, napi);
3128	int done = 0;
3129
3130	if (ath10k_pci_has_fw_crashed(ar)) {
3131		ath10k_pci_fw_crashed_clear(ar);
3132		ath10k_pci_fw_crashed_dump(ar);
3133		napi_complete(ctx);
3134		return done;
3135	}
3136
3137	ath10k_ce_per_engine_service_any(ar);
3138
3139	done = ath10k_htt_txrx_compl_task(ar, budget);
3140
3141	if (done < budget) {
3142		napi_complete_done(ctx, done);
3143		/* In case of MSI, it is possible that interrupts are received
3144		 * while NAPI poll is inprogress. So pending interrupts that are
3145		 * received after processing all copy engine pipes by NAPI poll
3146		 * will not be handled again. This is causing failure to
3147		 * complete boot sequence in x86 platform. So before enabling
3148		 * interrupts safer to check for pending interrupts for
3149		 * immediate servicing.
3150		 */
3151		if (ath10k_ce_interrupt_summary(ar)) {
3152			napi_schedule(ctx);
3153			goto out;
3154		}
3155		ath10k_pci_enable_legacy_irq(ar);
3156		ath10k_pci_irq_msi_fw_unmask(ar);
3157	}
3158
3159out:
3160	return done;
3161}
3162
3163static int ath10k_pci_request_irq_msi(struct ath10k *ar)
3164{
3165	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3166	int ret;
3167
3168	ret = request_irq(ar_pci->pdev->irq,
3169			  ath10k_pci_interrupt_handler,
3170			  IRQF_SHARED, "ath10k_pci", ar);
3171	if (ret) {
3172		ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
3173			    ar_pci->pdev->irq, ret);
3174		return ret;
3175	}
3176
3177	return 0;
3178}
3179
3180static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
3181{
3182	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3183	int ret;
3184
3185	ret = request_irq(ar_pci->pdev->irq,
3186			  ath10k_pci_interrupt_handler,
3187			  IRQF_SHARED, "ath10k_pci", ar);
3188	if (ret) {
3189		ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
3190			    ar_pci->pdev->irq, ret);
3191		return ret;
3192	}
3193
3194	return 0;
3195}
3196
3197static int ath10k_pci_request_irq(struct ath10k *ar)
3198{
3199	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3200
3201	switch (ar_pci->oper_irq_mode) {
3202	case ATH10K_PCI_IRQ_LEGACY:
3203		return ath10k_pci_request_irq_legacy(ar);
3204	case ATH10K_PCI_IRQ_MSI:
3205		return ath10k_pci_request_irq_msi(ar);
3206	default:
3207		return -EINVAL;
3208	}
3209}
3210
3211static void ath10k_pci_free_irq(struct ath10k *ar)
3212{
3213	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3214
3215	free_irq(ar_pci->pdev->irq, ar);
3216}
3217
3218void ath10k_pci_init_napi(struct ath10k *ar)
3219{
3220	netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll);
 
3221}
3222
3223static int ath10k_pci_init_irq(struct ath10k *ar)
3224{
3225	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3226	int ret;
3227
3228	ath10k_pci_init_napi(ar);
3229
3230	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
3231		ath10k_info(ar, "limiting irq mode to: %d\n",
3232			    ath10k_pci_irq_mode);
3233
3234	/* Try MSI */
3235	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
3236		ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
3237		ret = pci_enable_msi(ar_pci->pdev);
3238		if (ret == 0)
3239			return 0;
3240
3241		/* MHI failed, try legacy irq next */
3242	}
3243
3244	/* Try legacy irq
3245	 *
3246	 * A potential race occurs here: The CORE_BASE write
3247	 * depends on target correctly decoding AXI address but
3248	 * host won't know when target writes BAR to CORE_CTRL.
3249	 * This write might get lost if target has NOT written BAR.
3250	 * For now, fix the race by repeating the write in below
3251	 * synchronization checking.
3252	 */
3253	ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
3254
3255	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3256			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3257
3258	return 0;
3259}
3260
3261static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
3262{
3263	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3264			   0);
3265}
3266
3267static int ath10k_pci_deinit_irq(struct ath10k *ar)
3268{
3269	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3270
3271	switch (ar_pci->oper_irq_mode) {
3272	case ATH10K_PCI_IRQ_LEGACY:
3273		ath10k_pci_deinit_irq_legacy(ar);
3274		break;
3275	default:
3276		pci_disable_msi(ar_pci->pdev);
3277		break;
3278	}
3279
3280	return 0;
3281}
3282
3283int ath10k_pci_wait_for_target_init(struct ath10k *ar)
3284{
3285	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3286	unsigned long timeout;
3287	u32 val;
3288
3289	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
3290
3291	timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
3292
3293	do {
3294		val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
3295
3296		ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
3297			   val);
3298
3299		/* target should never return this */
3300		if (val == 0xffffffff)
3301			continue;
3302
3303		/* the device has crashed so don't bother trying anymore */
3304		if (val & FW_IND_EVENT_PENDING)
3305			break;
3306
3307		if (val & FW_IND_INITIALIZED)
3308			break;
3309
3310		if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
3311			/* Fix potential race by repeating CORE_BASE writes */
3312			ath10k_pci_enable_legacy_irq(ar);
3313
3314		mdelay(10);
3315	} while (time_before(jiffies, timeout));
3316
3317	ath10k_pci_disable_and_clear_legacy_irq(ar);
3318	ath10k_pci_irq_msi_fw_mask(ar);
3319
3320	if (val == 0xffffffff) {
3321		ath10k_err(ar, "failed to read device register, device is gone\n");
3322		return -EIO;
3323	}
3324
3325	if (val & FW_IND_EVENT_PENDING) {
3326		ath10k_warn(ar, "device has crashed during init\n");
3327		return -ECOMM;
3328	}
3329
3330	if (!(val & FW_IND_INITIALIZED)) {
3331		ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
3332			   val);
3333		return -ETIMEDOUT;
3334	}
3335
3336	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
3337	return 0;
3338}
3339
3340static int ath10k_pci_cold_reset(struct ath10k *ar)
3341{
3342	u32 val;
3343
3344	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
3345
3346	spin_lock_bh(&ar->data_lock);
3347
3348	ar->stats.fw_cold_reset_counter++;
3349
3350	spin_unlock_bh(&ar->data_lock);
3351
3352	/* Put Target, including PCIe, into RESET. */
3353	val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
3354	val |= 1;
3355	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3356
3357	/* After writing into SOC_GLOBAL_RESET to put device into
3358	 * reset and pulling out of reset pcie may not be stable
3359	 * for any immediate pcie register access and cause bus error,
3360	 * add delay before any pcie access request to fix this issue.
3361	 */
3362	msleep(20);
3363
3364	/* Pull Target, including PCIe, out of RESET. */
3365	val &= ~1;
3366	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3367
3368	msleep(20);
3369
3370	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
3371
3372	return 0;
3373}
3374
3375static int ath10k_pci_claim(struct ath10k *ar)
3376{
3377	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3378	struct pci_dev *pdev = ar_pci->pdev;
3379	int ret;
3380
3381	pci_set_drvdata(pdev, ar);
3382
3383	ret = pci_enable_device(pdev);
3384	if (ret) {
3385		ath10k_err(ar, "failed to enable pci device: %d\n", ret);
3386		return ret;
3387	}
3388
3389	ret = pci_request_region(pdev, BAR_NUM, "ath");
3390	if (ret) {
3391		ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
3392			   ret);
3393		goto err_device;
3394	}
3395
3396	/* Target expects 32 bit DMA. Enforce it. */
3397	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3398	if (ret) {
3399		ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
3400		goto err_region;
3401	}
3402
 
 
 
 
 
 
 
3403	pci_set_master(pdev);
3404
3405	/* Arrange for access to Target SoC registers. */
3406	ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
3407	ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
3408	if (!ar_pci->mem) {
3409		ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
3410		ret = -EIO;
3411		goto err_region;
3412	}
3413
3414	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
3415	return 0;
3416
 
 
 
3417err_region:
3418	pci_release_region(pdev, BAR_NUM);
3419
3420err_device:
3421	pci_disable_device(pdev);
3422
3423	return ret;
3424}
3425
3426static void ath10k_pci_release(struct ath10k *ar)
3427{
3428	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3429	struct pci_dev *pdev = ar_pci->pdev;
3430
3431	pci_iounmap(pdev, ar_pci->mem);
3432	pci_release_region(pdev, BAR_NUM);
 
3433	pci_disable_device(pdev);
3434}
3435
3436static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
3437{
3438	const struct ath10k_pci_supp_chip *supp_chip;
3439	int i;
3440	u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
3441
3442	for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
3443		supp_chip = &ath10k_pci_supp_chips[i];
3444
3445		if (supp_chip->dev_id == dev_id &&
3446		    supp_chip->rev_id == rev_id)
3447			return true;
3448	}
3449
3450	return false;
3451}
3452
3453int ath10k_pci_setup_resource(struct ath10k *ar)
3454{
3455	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3456	struct ath10k_ce *ce = ath10k_ce_priv(ar);
3457	int ret;
3458
3459	spin_lock_init(&ce->ce_lock);
3460	spin_lock_init(&ar_pci->ps_lock);
3461	mutex_init(&ar_pci->ce_diag_mutex);
3462
3463	INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work);
3464
3465	timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
3466
3467	ar_pci->attr = kmemdup(pci_host_ce_config_wlan,
3468			       sizeof(pci_host_ce_config_wlan),
3469			       GFP_KERNEL);
3470	if (!ar_pci->attr)
3471		return -ENOMEM;
3472
3473	ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan,
3474				      sizeof(pci_target_ce_config_wlan),
3475				      GFP_KERNEL);
3476	if (!ar_pci->pipe_config) {
3477		ret = -ENOMEM;
3478		goto err_free_attr;
3479	}
3480
3481	ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan,
3482				       sizeof(pci_target_service_to_ce_map_wlan),
3483				       GFP_KERNEL);
3484	if (!ar_pci->serv_to_pipe) {
3485		ret = -ENOMEM;
3486		goto err_free_pipe_config;
3487	}
3488
3489	if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
3490		ath10k_pci_override_ce_config(ar);
3491
3492	ret = ath10k_pci_alloc_pipes(ar);
3493	if (ret) {
3494		ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
3495			   ret);
3496		goto err_free_serv_to_pipe;
3497	}
3498
3499	return 0;
3500
3501err_free_serv_to_pipe:
3502	kfree(ar_pci->serv_to_pipe);
3503err_free_pipe_config:
3504	kfree(ar_pci->pipe_config);
3505err_free_attr:
3506	kfree(ar_pci->attr);
3507	return ret;
3508}
3509
3510void ath10k_pci_release_resource(struct ath10k *ar)
3511{
3512	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3513
3514	ath10k_pci_rx_retry_sync(ar);
3515	netif_napi_del(&ar->napi);
3516	ath10k_pci_ce_deinit(ar);
3517	ath10k_pci_free_pipes(ar);
3518	kfree(ar_pci->attr);
3519	kfree(ar_pci->pipe_config);
3520	kfree(ar_pci->serv_to_pipe);
3521}
3522
3523static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
3524	.read32		= ath10k_bus_pci_read32,
3525	.write32	= ath10k_bus_pci_write32,
3526	.get_num_banks	= ath10k_pci_get_num_banks,
3527};
3528
3529static int ath10k_pci_probe(struct pci_dev *pdev,
3530			    const struct pci_device_id *pci_dev)
3531{
3532	int ret = 0;
3533	struct ath10k *ar;
3534	struct ath10k_pci *ar_pci;
3535	enum ath10k_hw_rev hw_rev;
3536	struct ath10k_bus_params bus_params = {};
3537	bool pci_ps, is_qca988x = false;
3538	int (*pci_soft_reset)(struct ath10k *ar);
3539	int (*pci_hard_reset)(struct ath10k *ar);
3540	u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
3541
3542	switch (pci_dev->device) {
3543	case QCA988X_2_0_DEVICE_ID_UBNT:
3544	case QCA988X_2_0_DEVICE_ID:
3545		hw_rev = ATH10K_HW_QCA988X;
3546		pci_ps = false;
3547		is_qca988x = true;
3548		pci_soft_reset = ath10k_pci_warm_reset;
3549		pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3550		targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3551		break;
3552	case QCA9887_1_0_DEVICE_ID:
3553		hw_rev = ATH10K_HW_QCA9887;
3554		pci_ps = false;
3555		pci_soft_reset = ath10k_pci_warm_reset;
3556		pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3557		targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3558		break;
3559	case QCA6164_2_1_DEVICE_ID:
3560	case QCA6174_2_1_DEVICE_ID:
3561		hw_rev = ATH10K_HW_QCA6174;
3562		pci_ps = true;
3563		pci_soft_reset = ath10k_pci_warm_reset;
3564		pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3565		targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3566		break;
3567	case QCA99X0_2_0_DEVICE_ID:
3568		hw_rev = ATH10K_HW_QCA99X0;
3569		pci_ps = false;
3570		pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3571		pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3572		targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3573		break;
3574	case QCA9984_1_0_DEVICE_ID:
3575		hw_rev = ATH10K_HW_QCA9984;
3576		pci_ps = false;
3577		pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3578		pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3579		targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3580		break;
3581	case QCA9888_2_0_DEVICE_ID:
3582		hw_rev = ATH10K_HW_QCA9888;
3583		pci_ps = false;
3584		pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3585		pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3586		targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3587		break;
3588	case QCA9377_1_0_DEVICE_ID:
3589		hw_rev = ATH10K_HW_QCA9377;
3590		pci_ps = true;
3591		pci_soft_reset = ath10k_pci_warm_reset;
3592		pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3593		targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3594		break;
3595	default:
3596		WARN_ON(1);
3597		return -ENOTSUPP;
3598	}
3599
3600	ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3601				hw_rev, &ath10k_pci_hif_ops);
3602	if (!ar) {
3603		dev_err(&pdev->dev, "failed to allocate core\n");
3604		return -ENOMEM;
3605	}
3606
3607	ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3608		   pdev->vendor, pdev->device,
3609		   pdev->subsystem_vendor, pdev->subsystem_device);
3610
3611	ar_pci = ath10k_pci_priv(ar);
3612	ar_pci->pdev = pdev;
3613	ar_pci->dev = &pdev->dev;
3614	ar_pci->ar = ar;
3615	ar->dev_id = pci_dev->device;
3616	ar_pci->pci_ps = pci_ps;
3617	ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;
3618	ar_pci->pci_soft_reset = pci_soft_reset;
3619	ar_pci->pci_hard_reset = pci_hard_reset;
3620	ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
3621	ar->ce_priv = &ar_pci->ce;
3622
3623	ar->id.vendor = pdev->vendor;
3624	ar->id.device = pdev->device;
3625	ar->id.subsystem_vendor = pdev->subsystem_vendor;
3626	ar->id.subsystem_device = pdev->subsystem_device;
3627
3628	timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
 
3629
3630	ret = ath10k_pci_setup_resource(ar);
3631	if (ret) {
3632		ath10k_err(ar, "failed to setup resource: %d\n", ret);
3633		goto err_core_destroy;
3634	}
3635
3636	ret = ath10k_pci_claim(ar);
3637	if (ret) {
3638		ath10k_err(ar, "failed to claim device: %d\n", ret);
3639		goto err_free_pipes;
3640	}
3641
3642	ret = ath10k_pci_force_wake(ar);
3643	if (ret) {
3644		ath10k_warn(ar, "failed to wake up device : %d\n", ret);
3645		goto err_sleep;
3646	}
3647
3648	ath10k_pci_ce_deinit(ar);
3649	ath10k_pci_irq_disable(ar);
3650
3651	ret = ath10k_pci_init_irq(ar);
3652	if (ret) {
3653		ath10k_err(ar, "failed to init irqs: %d\n", ret);
3654		goto err_sleep;
3655	}
3656
3657	ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3658		    ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
3659		    ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3660
3661	ret = ath10k_pci_request_irq(ar);
3662	if (ret) {
3663		ath10k_warn(ar, "failed to request irqs: %d\n", ret);
3664		goto err_deinit_irq;
3665	}
3666
3667	bus_params.dev_type = ATH10K_DEV_TYPE_LL;
3668	bus_params.link_can_suspend = true;
3669	/* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that
3670	 * fall off the bus during chip_reset. These chips have the same pci
3671	 * device id as the QCA9880 BR4A or 2R4E. So that's why the check.
3672	 */
3673	if (is_qca988x) {
3674		bus_params.chip_id =
3675			ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3676		if (bus_params.chip_id != 0xffffffff) {
3677			if (!ath10k_pci_chip_is_supported(pdev->device,
3678							  bus_params.chip_id)) {
3679				ret = -ENODEV;
3680				goto err_unsupported;
3681			}
3682		}
3683	}
3684
3685	ret = ath10k_pci_chip_reset(ar);
3686	if (ret) {
3687		ath10k_err(ar, "failed to reset chip: %d\n", ret);
3688		goto err_free_irq;
3689	}
3690
3691	bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3692	if (bus_params.chip_id == 0xffffffff) {
3693		ret = -ENODEV;
3694		goto err_unsupported;
3695	}
3696
3697	if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
3698		ret = -ENODEV;
3699		goto err_unsupported;
 
3700	}
3701
3702	ret = ath10k_core_register(ar, &bus_params);
3703	if (ret) {
3704		ath10k_err(ar, "failed to register driver core: %d\n", ret);
3705		goto err_free_irq;
3706	}
3707
3708	return 0;
3709
3710err_unsupported:
3711	ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3712		   pdev->device, bus_params.chip_id);
3713
3714err_free_irq:
3715	ath10k_pci_free_irq(ar);
 
3716
3717err_deinit_irq:
3718	ath10k_pci_release_resource(ar);
3719
3720err_sleep:
3721	ath10k_pci_sleep_sync(ar);
3722	ath10k_pci_release(ar);
3723
3724err_free_pipes:
3725	ath10k_pci_free_pipes(ar);
3726
3727err_core_destroy:
3728	ath10k_core_destroy(ar);
3729
3730	return ret;
3731}
3732
3733static void ath10k_pci_remove(struct pci_dev *pdev)
3734{
3735	struct ath10k *ar = pci_get_drvdata(pdev);
 
3736
3737	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3738
3739	if (!ar)
3740		return;
3741
 
 
 
 
 
3742	ath10k_core_unregister(ar);
3743	ath10k_pci_free_irq(ar);
3744	ath10k_pci_deinit_irq(ar);
3745	ath10k_pci_release_resource(ar);
3746	ath10k_pci_sleep_sync(ar);
3747	ath10k_pci_release(ar);
3748	ath10k_core_destroy(ar);
3749}
3750
3751MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3752
3753static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
3754{
3755	struct ath10k *ar = dev_get_drvdata(dev);
3756	int ret;
3757
3758	ret = ath10k_pci_suspend(ar);
3759	if (ret)
3760		ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
3761
3762	return ret;
3763}
3764
3765static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
3766{
3767	struct ath10k *ar = dev_get_drvdata(dev);
3768	int ret;
3769
3770	ret = ath10k_pci_resume(ar);
3771	if (ret)
3772		ath10k_warn(ar, "failed to resume hif: %d\n", ret);
3773
3774	return ret;
3775}
3776
3777static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
3778			 ath10k_pci_pm_suspend,
3779			 ath10k_pci_pm_resume);
3780
3781static struct pci_driver ath10k_pci_driver = {
3782	.name = "ath10k_pci",
3783	.id_table = ath10k_pci_id_table,
3784	.probe = ath10k_pci_probe,
3785	.remove = ath10k_pci_remove,
3786#ifdef CONFIG_PM
3787	.driver.pm = &ath10k_pci_pm_ops,
3788#endif
3789};
3790
3791static int __init ath10k_pci_init(void)
3792{
3793	int ret1, ret2;
3794
3795	ret1 = pci_register_driver(&ath10k_pci_driver);
3796	if (ret1)
3797		printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3798		       ret1);
3799
3800	ret2 = ath10k_ahb_init();
3801	if (ret2)
3802		printk(KERN_ERR "ahb init failed: %d\n", ret2);
3803
3804	if (ret1 && ret2)
3805		return ret1;
3806
3807	/* registered to at least one bus */
3808	return 0;
3809}
3810module_init(ath10k_pci_init);
3811
3812static void __exit ath10k_pci_exit(void)
3813{
3814	pci_unregister_driver(&ath10k_pci_driver);
3815	ath10k_ahb_exit();
3816}
3817
3818module_exit(ath10k_pci_exit);
3819
3820MODULE_AUTHOR("Qualcomm Atheros");
3821MODULE_DESCRIPTION("Driver support for Qualcomm Atheros PCIe/AHB 802.11ac WLAN devices");
3822MODULE_LICENSE("Dual BSD/GPL");
3823
3824/* QCA988x 2.0 firmware files */
3825MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3826MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3827MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3828MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3829MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3830MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3831
3832/* QCA9887 1.0 firmware files */
3833MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3834MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
3835MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3836
3837/* QCA6174 2.1 firmware files */
3838MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3839MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3840MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3841MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3842
3843/* QCA6174 3.1 firmware files */
3844MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3845MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3846MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3847MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
3848MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3849
3850/* QCA9377 1.0 firmware files */
3851MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3852MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3853MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
v4.10.11
 
   1/*
   2 * Copyright (c) 2005-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#include <linux/pci.h>
  19#include <linux/module.h>
  20#include <linux/interrupt.h>
  21#include <linux/spinlock.h>
  22#include <linux/bitops.h>
  23
  24#include "core.h"
  25#include "debug.h"
 
  26
  27#include "targaddrs.h"
  28#include "bmi.h"
  29
  30#include "hif.h"
  31#include "htc.h"
  32
  33#include "ce.h"
  34#include "pci.h"
  35
  36enum ath10k_pci_reset_mode {
  37	ATH10K_PCI_RESET_AUTO = 0,
  38	ATH10K_PCI_RESET_WARM_ONLY = 1,
  39};
  40
  41static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
  42static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
  43
  44module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
  45MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
  46
  47module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
  48MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
  49
  50/* how long wait to wait for target to initialise, in ms */
  51#define ATH10K_PCI_TARGET_WAIT 3000
  52#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
  53
 
 
 
 
 
 
 
 
 
  54static const struct pci_device_id ath10k_pci_id_table[] = {
 
 
 
  55	{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
  56	{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
  57	{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
  58	{ PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
  59	{ PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
  60	{ PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
  61	{ PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
  62	{ PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
  63	{0}
  64};
  65
  66static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
  67	/* QCA988X pre 2.0 chips are not supported because they need some nasty
  68	 * hacks. ath10k doesn't have them and these devices crash horribly
  69	 * because of that.
  70	 */
 
  71	{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
  72
  73	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
  74	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
  75	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
  76	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
  77	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
  78
  79	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
  80	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
  81	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
  82	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
  83	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
  84
  85	{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
  86
  87	{ QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
  88
  89	{ QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
  90
  91	{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
  92	{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
  93
  94	{ QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
  95};
  96
  97static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
  98static int ath10k_pci_cold_reset(struct ath10k *ar);
  99static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
 100static int ath10k_pci_init_irq(struct ath10k *ar);
 101static int ath10k_pci_deinit_irq(struct ath10k *ar);
 102static int ath10k_pci_request_irq(struct ath10k *ar);
 103static void ath10k_pci_free_irq(struct ath10k *ar);
 104static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
 
 105			       struct ath10k_ce_pipe *rx_pipe,
 106			       struct bmi_xfer *xfer);
 107static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
 108static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
 109static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 110static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
 111static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
 112static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 113static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
 114
 115static struct ce_attr host_ce_config_wlan[] = {
 116	/* CE0: host->target HTC control and raw streams */
 117	{
 118		.flags = CE_ATTR_FLAGS,
 119		.src_nentries = 16,
 120		.src_sz_max = 256,
 121		.dest_nentries = 0,
 122		.send_cb = ath10k_pci_htc_tx_cb,
 123	},
 124
 125	/* CE1: target->host HTT + HTC control */
 126	{
 127		.flags = CE_ATTR_FLAGS,
 128		.src_nentries = 0,
 129		.src_sz_max = 2048,
 130		.dest_nentries = 512,
 131		.recv_cb = ath10k_pci_htt_htc_rx_cb,
 132	},
 133
 134	/* CE2: target->host WMI */
 135	{
 136		.flags = CE_ATTR_FLAGS,
 137		.src_nentries = 0,
 138		.src_sz_max = 2048,
 139		.dest_nentries = 128,
 140		.recv_cb = ath10k_pci_htc_rx_cb,
 141	},
 142
 143	/* CE3: host->target WMI */
 144	{
 145		.flags = CE_ATTR_FLAGS,
 146		.src_nentries = 32,
 147		.src_sz_max = 2048,
 148		.dest_nentries = 0,
 149		.send_cb = ath10k_pci_htc_tx_cb,
 150	},
 151
 152	/* CE4: host->target HTT */
 153	{
 154		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
 155		.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
 156		.src_sz_max = 256,
 157		.dest_nentries = 0,
 158		.send_cb = ath10k_pci_htt_tx_cb,
 159	},
 160
 161	/* CE5: target->host HTT (HIF->HTT) */
 162	{
 163		.flags = CE_ATTR_FLAGS,
 164		.src_nentries = 0,
 165		.src_sz_max = 512,
 166		.dest_nentries = 512,
 167		.recv_cb = ath10k_pci_htt_rx_cb,
 168	},
 169
 170	/* CE6: target autonomous hif_memcpy */
 171	{
 172		.flags = CE_ATTR_FLAGS,
 173		.src_nentries = 0,
 174		.src_sz_max = 0,
 175		.dest_nentries = 0,
 176	},
 177
 178	/* CE7: ce_diag, the Diagnostic Window */
 179	{
 180		.flags = CE_ATTR_FLAGS,
 181		.src_nentries = 2,
 182		.src_sz_max = DIAG_TRANSFER_LIMIT,
 183		.dest_nentries = 2,
 184	},
 185
 186	/* CE8: target->host pktlog */
 187	{
 188		.flags = CE_ATTR_FLAGS,
 189		.src_nentries = 0,
 190		.src_sz_max = 2048,
 191		.dest_nentries = 128,
 192		.recv_cb = ath10k_pci_pktlog_rx_cb,
 193	},
 194
 195	/* CE9 target autonomous qcache memcpy */
 196	{
 197		.flags = CE_ATTR_FLAGS,
 198		.src_nentries = 0,
 199		.src_sz_max = 0,
 200		.dest_nentries = 0,
 201	},
 202
 203	/* CE10: target autonomous hif memcpy */
 204	{
 205		.flags = CE_ATTR_FLAGS,
 206		.src_nentries = 0,
 207		.src_sz_max = 0,
 208		.dest_nentries = 0,
 209	},
 210
 211	/* CE11: target autonomous hif memcpy */
 212	{
 213		.flags = CE_ATTR_FLAGS,
 214		.src_nentries = 0,
 215		.src_sz_max = 0,
 216		.dest_nentries = 0,
 217	},
 218};
 219
 220/* Target firmware's Copy Engine configuration. */
 221static struct ce_pipe_config target_ce_config_wlan[] = {
 222	/* CE0: host->target HTC control and raw streams */
 223	{
 224		.pipenum = __cpu_to_le32(0),
 225		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
 226		.nentries = __cpu_to_le32(32),
 227		.nbytes_max = __cpu_to_le32(256),
 228		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
 229		.reserved = __cpu_to_le32(0),
 230	},
 231
 232	/* CE1: target->host HTT + HTC control */
 233	{
 234		.pipenum = __cpu_to_le32(1),
 235		.pipedir = __cpu_to_le32(PIPEDIR_IN),
 236		.nentries = __cpu_to_le32(32),
 237		.nbytes_max = __cpu_to_le32(2048),
 238		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
 239		.reserved = __cpu_to_le32(0),
 240	},
 241
 242	/* CE2: target->host WMI */
 243	{
 244		.pipenum = __cpu_to_le32(2),
 245		.pipedir = __cpu_to_le32(PIPEDIR_IN),
 246		.nentries = __cpu_to_le32(64),
 247		.nbytes_max = __cpu_to_le32(2048),
 248		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
 249		.reserved = __cpu_to_le32(0),
 250	},
 251
 252	/* CE3: host->target WMI */
 253	{
 254		.pipenum = __cpu_to_le32(3),
 255		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
 256		.nentries = __cpu_to_le32(32),
 257		.nbytes_max = __cpu_to_le32(2048),
 258		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
 259		.reserved = __cpu_to_le32(0),
 260	},
 261
 262	/* CE4: host->target HTT */
 263	{
 264		.pipenum = __cpu_to_le32(4),
 265		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
 266		.nentries = __cpu_to_le32(256),
 267		.nbytes_max = __cpu_to_le32(256),
 268		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
 269		.reserved = __cpu_to_le32(0),
 270	},
 271
 272	/* NB: 50% of src nentries, since tx has 2 frags */
 273
 274	/* CE5: target->host HTT (HIF->HTT) */
 275	{
 276		.pipenum = __cpu_to_le32(5),
 277		.pipedir = __cpu_to_le32(PIPEDIR_IN),
 278		.nentries = __cpu_to_le32(32),
 279		.nbytes_max = __cpu_to_le32(512),
 280		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
 281		.reserved = __cpu_to_le32(0),
 282	},
 283
 284	/* CE6: Reserved for target autonomous hif_memcpy */
 285	{
 286		.pipenum = __cpu_to_le32(6),
 287		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 288		.nentries = __cpu_to_le32(32),
 289		.nbytes_max = __cpu_to_le32(4096),
 290		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
 291		.reserved = __cpu_to_le32(0),
 292	},
 293
 294	/* CE7 used only by Host */
 295	{
 296		.pipenum = __cpu_to_le32(7),
 297		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 298		.nentries = __cpu_to_le32(0),
 299		.nbytes_max = __cpu_to_le32(0),
 300		.flags = __cpu_to_le32(0),
 301		.reserved = __cpu_to_le32(0),
 302	},
 303
 304	/* CE8 target->host packtlog */
 305	{
 306		.pipenum = __cpu_to_le32(8),
 307		.pipedir = __cpu_to_le32(PIPEDIR_IN),
 308		.nentries = __cpu_to_le32(64),
 309		.nbytes_max = __cpu_to_le32(2048),
 310		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
 311		.reserved = __cpu_to_le32(0),
 312	},
 313
 314	/* CE9 target autonomous qcache memcpy */
 315	{
 316		.pipenum = __cpu_to_le32(9),
 317		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 318		.nentries = __cpu_to_le32(32),
 319		.nbytes_max = __cpu_to_le32(2048),
 320		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
 321		.reserved = __cpu_to_le32(0),
 322	},
 323
 324	/* It not necessary to send target wlan configuration for CE10 & CE11
 325	 * as these CEs are not actively used in target.
 326	 */
 327};
 328
 329/*
 330 * Map from service/endpoint to Copy Engine.
 331 * This table is derived from the CE_PCI TABLE, above.
 332 * It is passed to the Target at startup for use by firmware.
 333 */
 334static struct service_to_pipe target_service_to_ce_map_wlan[] = {
 335	{
 336		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 337		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
 338		__cpu_to_le32(3),
 339	},
 340	{
 341		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 342		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
 343		__cpu_to_le32(2),
 344	},
 345	{
 346		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 347		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
 348		__cpu_to_le32(3),
 349	},
 350	{
 351		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 352		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
 353		__cpu_to_le32(2),
 354	},
 355	{
 356		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 357		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
 358		__cpu_to_le32(3),
 359	},
 360	{
 361		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 362		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
 363		__cpu_to_le32(2),
 364	},
 365	{
 366		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 367		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
 368		__cpu_to_le32(3),
 369	},
 370	{
 371		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 372		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
 373		__cpu_to_le32(2),
 374	},
 375	{
 376		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 377		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
 378		__cpu_to_le32(3),
 379	},
 380	{
 381		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 382		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
 383		__cpu_to_le32(2),
 384	},
 385	{
 386		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 387		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
 388		__cpu_to_le32(0),
 389	},
 390	{
 391		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 392		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
 393		__cpu_to_le32(1),
 394	},
 395	{ /* not used */
 396		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 397		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
 398		__cpu_to_le32(0),
 399	},
 400	{ /* not used */
 401		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 402		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
 403		__cpu_to_le32(1),
 404	},
 405	{
 406		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 407		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
 408		__cpu_to_le32(4),
 409	},
 410	{
 411		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 412		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
 413		__cpu_to_le32(5),
 414	},
 415
 416	/* (Additions here) */
 417
 418	{ /* must be last */
 419		__cpu_to_le32(0),
 420		__cpu_to_le32(0),
 421		__cpu_to_le32(0),
 422	},
 423};
 424
 425static bool ath10k_pci_is_awake(struct ath10k *ar)
 426{
 427	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 428	u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 429			   RTC_STATE_ADDRESS);
 430
 431	return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
 432}
 433
 434static void __ath10k_pci_wake(struct ath10k *ar)
 435{
 436	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 437
 438	lockdep_assert_held(&ar_pci->ps_lock);
 439
 440	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
 441		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 442
 443	iowrite32(PCIE_SOC_WAKE_V_MASK,
 444		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 445		  PCIE_SOC_WAKE_ADDRESS);
 446}
 447
 448static void __ath10k_pci_sleep(struct ath10k *ar)
 449{
 450	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 451
 452	lockdep_assert_held(&ar_pci->ps_lock);
 453
 454	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
 455		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 456
 457	iowrite32(PCIE_SOC_WAKE_RESET,
 458		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 459		  PCIE_SOC_WAKE_ADDRESS);
 460	ar_pci->ps_awake = false;
 461}
 462
 463static int ath10k_pci_wake_wait(struct ath10k *ar)
 464{
 465	int tot_delay = 0;
 466	int curr_delay = 5;
 467
 468	while (tot_delay < PCIE_WAKE_TIMEOUT) {
 469		if (ath10k_pci_is_awake(ar)) {
 470			if (tot_delay > PCIE_WAKE_LATE_US)
 471				ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n",
 472					    tot_delay / 1000);
 473			return 0;
 474		}
 475
 476		udelay(curr_delay);
 477		tot_delay += curr_delay;
 478
 479		if (curr_delay < 50)
 480			curr_delay += 5;
 481	}
 482
 483	return -ETIMEDOUT;
 484}
 485
 486static int ath10k_pci_force_wake(struct ath10k *ar)
 487{
 488	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 489	unsigned long flags;
 490	int ret = 0;
 491
 492	if (ar_pci->pci_ps)
 493		return ret;
 494
 495	spin_lock_irqsave(&ar_pci->ps_lock, flags);
 496
 497	if (!ar_pci->ps_awake) {
 498		iowrite32(PCIE_SOC_WAKE_V_MASK,
 499			  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 500			  PCIE_SOC_WAKE_ADDRESS);
 501
 502		ret = ath10k_pci_wake_wait(ar);
 503		if (ret == 0)
 504			ar_pci->ps_awake = true;
 505	}
 506
 507	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 508
 509	return ret;
 510}
 511
 512static void ath10k_pci_force_sleep(struct ath10k *ar)
 513{
 514	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 515	unsigned long flags;
 516
 517	spin_lock_irqsave(&ar_pci->ps_lock, flags);
 518
 519	iowrite32(PCIE_SOC_WAKE_RESET,
 520		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 521		  PCIE_SOC_WAKE_ADDRESS);
 522	ar_pci->ps_awake = false;
 523
 524	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 525}
 526
 527static int ath10k_pci_wake(struct ath10k *ar)
 528{
 529	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 530	unsigned long flags;
 531	int ret = 0;
 532
 533	if (ar_pci->pci_ps == 0)
 534		return ret;
 535
 536	spin_lock_irqsave(&ar_pci->ps_lock, flags);
 537
 538	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
 539		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 540
 541	/* This function can be called very frequently. To avoid excessive
 542	 * CPU stalls for MMIO reads use a cache var to hold the device state.
 543	 */
 544	if (!ar_pci->ps_awake) {
 545		__ath10k_pci_wake(ar);
 546
 547		ret = ath10k_pci_wake_wait(ar);
 548		if (ret == 0)
 549			ar_pci->ps_awake = true;
 550	}
 551
 552	if (ret == 0) {
 553		ar_pci->ps_wake_refcount++;
 554		WARN_ON(ar_pci->ps_wake_refcount == 0);
 555	}
 556
 557	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 558
 559	return ret;
 560}
 561
 562static void ath10k_pci_sleep(struct ath10k *ar)
 563{
 564	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 565	unsigned long flags;
 566
 567	if (ar_pci->pci_ps == 0)
 568		return;
 569
 570	spin_lock_irqsave(&ar_pci->ps_lock, flags);
 571
 572	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
 573		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 574
 575	if (WARN_ON(ar_pci->ps_wake_refcount == 0))
 576		goto skip;
 577
 578	ar_pci->ps_wake_refcount--;
 579
 580	mod_timer(&ar_pci->ps_timer, jiffies +
 581		  msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
 582
 583skip:
 584	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 585}
 586
 587static void ath10k_pci_ps_timer(unsigned long ptr)
 588{
 589	struct ath10k *ar = (void *)ptr;
 590	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 591	unsigned long flags;
 592
 593	spin_lock_irqsave(&ar_pci->ps_lock, flags);
 594
 595	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
 596		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 597
 598	if (ar_pci->ps_wake_refcount > 0)
 599		goto skip;
 600
 601	__ath10k_pci_sleep(ar);
 602
 603skip:
 604	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 605}
 606
 607static void ath10k_pci_sleep_sync(struct ath10k *ar)
 608{
 609	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 610	unsigned long flags;
 611
 612	if (ar_pci->pci_ps == 0) {
 613		ath10k_pci_force_sleep(ar);
 614		return;
 615	}
 616
 617	del_timer_sync(&ar_pci->ps_timer);
 618
 619	spin_lock_irqsave(&ar_pci->ps_lock, flags);
 620	WARN_ON(ar_pci->ps_wake_refcount > 0);
 621	__ath10k_pci_sleep(ar);
 622	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 623}
 624
 625static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
 626{
 627	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 628	int ret;
 629
 630	if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
 631		ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
 632			    offset, offset + sizeof(value), ar_pci->mem_len);
 633		return;
 634	}
 635
 636	ret = ath10k_pci_wake(ar);
 637	if (ret) {
 638		ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
 639			    value, offset, ret);
 640		return;
 641	}
 642
 643	iowrite32(value, ar_pci->mem + offset);
 644	ath10k_pci_sleep(ar);
 645}
 646
 647static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
 648{
 649	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 650	u32 val;
 651	int ret;
 652
 653	if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
 654		ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
 655			    offset, offset + sizeof(val), ar_pci->mem_len);
 656		return 0;
 657	}
 658
 659	ret = ath10k_pci_wake(ar);
 660	if (ret) {
 661		ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
 662			    offset, ret);
 663		return 0xffffffff;
 664	}
 665
 666	val = ioread32(ar_pci->mem + offset);
 667	ath10k_pci_sleep(ar);
 668
 669	return val;
 670}
 671
 672inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
 673{
 674	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 675
 676	ar_pci->bus_ops->write32(ar, offset, value);
 677}
 678
 679inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
 680{
 681	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 682
 683	return ar_pci->bus_ops->read32(ar, offset);
 684}
 685
 686u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
 687{
 688	return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
 689}
 690
 691void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
 692{
 693	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
 694}
 695
 696u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
 697{
 698	return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
 699}
 700
 701void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
 702{
 703	ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
 704}
 705
 706bool ath10k_pci_irq_pending(struct ath10k *ar)
 707{
 708	u32 cause;
 709
 710	/* Check if the shared legacy irq is for us */
 711	cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 712				  PCIE_INTR_CAUSE_ADDRESS);
 713	if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
 714		return true;
 715
 716	return false;
 717}
 718
 719void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
 720{
 721	/* IMPORTANT: INTR_CLR register has to be set after
 722	 * INTR_ENABLE is set to 0, otherwise interrupt can not be
 723	 * really cleared. */
 
 724	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
 725			   0);
 726	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
 727			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 728
 729	/* IMPORTANT: this extra read transaction is required to
 730	 * flush the posted write buffer. */
 
 731	(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 732				PCIE_INTR_ENABLE_ADDRESS);
 733}
 734
 735void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
 736{
 737	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
 738			   PCIE_INTR_ENABLE_ADDRESS,
 739			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 740
 741	/* IMPORTANT: this extra read transaction is required to
 742	 * flush the posted write buffer. */
 
 743	(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 744				PCIE_INTR_ENABLE_ADDRESS);
 745}
 746
 747static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
 748{
 749	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 750
 751	if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
 752		return "msi";
 753
 754	return "legacy";
 755}
 756
 757static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
 758{
 759	struct ath10k *ar = pipe->hif_ce_state;
 760	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 761	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 762	struct sk_buff *skb;
 763	dma_addr_t paddr;
 764	int ret;
 765
 766	skb = dev_alloc_skb(pipe->buf_sz);
 767	if (!skb)
 768		return -ENOMEM;
 769
 770	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
 771
 772	paddr = dma_map_single(ar->dev, skb->data,
 773			       skb->len + skb_tailroom(skb),
 774			       DMA_FROM_DEVICE);
 775	if (unlikely(dma_mapping_error(ar->dev, paddr))) {
 776		ath10k_warn(ar, "failed to dma map pci rx buf\n");
 777		dev_kfree_skb_any(skb);
 778		return -EIO;
 779	}
 780
 781	ATH10K_SKB_RXCB(skb)->paddr = paddr;
 782
 783	spin_lock_bh(&ar_pci->ce_lock);
 784	ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
 785	spin_unlock_bh(&ar_pci->ce_lock);
 786	if (ret) {
 787		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
 788				 DMA_FROM_DEVICE);
 789		dev_kfree_skb_any(skb);
 790		return ret;
 791	}
 792
 793	return 0;
 794}
 795
 796static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
 797{
 798	struct ath10k *ar = pipe->hif_ce_state;
 799	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
 800	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 801	int ret, num;
 802
 803	if (pipe->buf_sz == 0)
 804		return;
 805
 806	if (!ce_pipe->dest_ring)
 807		return;
 808
 809	spin_lock_bh(&ar_pci->ce_lock);
 810	num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
 811	spin_unlock_bh(&ar_pci->ce_lock);
 812
 813	while (num >= 0) {
 814		ret = __ath10k_pci_rx_post_buf(pipe);
 815		if (ret) {
 816			if (ret == -ENOSPC)
 817				break;
 818			ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
 819			mod_timer(&ar_pci->rx_post_retry, jiffies +
 820				  ATH10K_PCI_RX_POST_RETRY_MS);
 821			break;
 822		}
 823		num--;
 824	}
 825}
 826
 827void ath10k_pci_rx_post(struct ath10k *ar)
 828{
 829	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 830	int i;
 831
 832	for (i = 0; i < CE_COUNT; i++)
 833		ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
 834}
 835
 836void ath10k_pci_rx_replenish_retry(unsigned long ptr)
 837{
 838	struct ath10k *ar = (void *)ptr;
 
 839
 840	ath10k_pci_rx_post(ar);
 841}
 842
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 843static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 844{
 845	u32 val = 0;
 846
 847	switch (ar->hw_rev) {
 848	case ATH10K_HW_QCA988X:
 849	case ATH10K_HW_QCA9887:
 850	case ATH10K_HW_QCA6174:
 851	case ATH10K_HW_QCA9377:
 852		val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 853					  CORE_CTRL_ADDRESS) &
 854		       0x7ff) << 21;
 855		break;
 856	case ATH10K_HW_QCA9888:
 857	case ATH10K_HW_QCA99X0:
 858	case ATH10K_HW_QCA9984:
 859	case ATH10K_HW_QCA4019:
 860		val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
 861		break;
 862	}
 863
 864	val |= 0x100000 | (addr & 0xfffff);
 865	return val;
 866}
 867
 868/*
 869 * Diagnostic read/write access is provided for startup/config/debug usage.
 870 * Caller must guarantee proper alignment, when applicable, and single user
 871 * at any moment.
 872 */
 873static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
 874				    int nbytes)
 875{
 876	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 877	int ret = 0;
 878	u32 *buf;
 879	unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
 880	struct ath10k_ce_pipe *ce_diag;
 881	/* Host buffer address in CE space */
 882	u32 ce_data;
 883	dma_addr_t ce_data_base = 0;
 884	void *data_buf = NULL;
 885	int i;
 886
 887	spin_lock_bh(&ar_pci->ce_lock);
 888
 889	ce_diag = ar_pci->ce_diag;
 890
 891	/*
 892	 * Allocate a temporary bounce buffer to hold caller's data
 893	 * to be DMA'ed from Target. This guarantees
 894	 *   1) 4-byte alignment
 895	 *   2) Buffer in DMA-able space
 896	 */
 897	alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
 898
 899	data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
 900						       alloc_nbytes,
 901						       &ce_data_base,
 902						       GFP_ATOMIC);
 903
 904	if (!data_buf) {
 905		ret = -ENOMEM;
 906		goto done;
 907	}
 908	memset(data_buf, 0, alloc_nbytes);
 
 
 
 
 
 
 
 
 909
 910	remaining_bytes = nbytes;
 911	ce_data = ce_data_base;
 912	while (remaining_bytes) {
 913		nbytes = min_t(unsigned int, remaining_bytes,
 914			       DIAG_TRANSFER_LIMIT);
 915
 916		ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
 917		if (ret != 0)
 918			goto done;
 919
 920		/* Request CE to send from Target(!) address to Host buffer */
 921		/*
 922		 * The address supplied by the caller is in the
 923		 * Target CPU virtual address space.
 924		 *
 925		 * In order to use this address with the diagnostic CE,
 926		 * convert it from Target CPU virtual address space
 927		 * to CE address space
 928		 */
 929		address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
 930
 931		ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
 932					    0);
 933		if (ret)
 934			goto done;
 935
 936		i = 0;
 937		while (ath10k_ce_completed_send_next_nolock(ce_diag,
 938							    NULL) != 0) {
 939			mdelay(1);
 940			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 
 941				ret = -EBUSY;
 942				goto done;
 943			}
 944		}
 945
 946		i = 0;
 947		while (ath10k_ce_completed_recv_next_nolock(ce_diag,
 948							    (void **)&buf,
 949							    &completed_nbytes)
 950								!= 0) {
 951			mdelay(1);
 952
 953			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 954				ret = -EBUSY;
 955				goto done;
 956			}
 957		}
 958
 959		if (nbytes != completed_nbytes) {
 960			ret = -EIO;
 961			goto done;
 962		}
 963
 964		if (*buf != ce_data) {
 965			ret = -EIO;
 966			goto done;
 967		}
 968
 969		remaining_bytes -= nbytes;
 970
 971		if (ret) {
 972			ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
 973				    address, ret);
 974			break;
 975		}
 976		memcpy(data, data_buf, nbytes);
 977
 978		address += nbytes;
 979		data += nbytes;
 980	}
 981
 982done:
 983
 984	if (data_buf)
 985		dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
 986				  ce_data_base);
 987
 988	spin_unlock_bh(&ar_pci->ce_lock);
 989
 990	return ret;
 991}
 992
 993static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
 994{
 995	__le32 val = 0;
 996	int ret;
 997
 998	ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
 999	*value = __le32_to_cpu(val);
1000
1001	return ret;
1002}
1003
1004static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
1005				     u32 src, u32 len)
1006{
1007	u32 host_addr, addr;
1008	int ret;
1009
1010	host_addr = host_interest_item_address(src);
1011
1012	ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
1013	if (ret != 0) {
1014		ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
1015			    src, ret);
1016		return ret;
1017	}
1018
1019	ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1020	if (ret != 0) {
1021		ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
1022			    addr, len, ret);
1023		return ret;
1024	}
1025
1026	return 0;
1027}
1028
1029#define ath10k_pci_diag_read_hi(ar, dest, src, len)		\
1030	__ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
1031
1032int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1033			      const void *data, int nbytes)
1034{
1035	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1036	int ret = 0;
1037	u32 *buf;
1038	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
1039	struct ath10k_ce_pipe *ce_diag;
1040	void *data_buf = NULL;
1041	u32 ce_data;	/* Host buffer address in CE space */
1042	dma_addr_t ce_data_base = 0;
1043	int i;
1044
1045	spin_lock_bh(&ar_pci->ce_lock);
1046
1047	ce_diag = ar_pci->ce_diag;
1048
1049	/*
1050	 * Allocate a temporary bounce buffer to hold caller's data
1051	 * to be DMA'ed to Target. This guarantees
1052	 *   1) 4-byte alignment
1053	 *   2) Buffer in DMA-able space
1054	 */
1055	orig_nbytes = nbytes;
1056	data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
1057						       orig_nbytes,
1058						       &ce_data_base,
1059						       GFP_ATOMIC);
1060	if (!data_buf) {
1061		ret = -ENOMEM;
1062		goto done;
1063	}
1064
1065	/* Copy caller's data to allocated DMA buf */
1066	memcpy(data_buf, data, orig_nbytes);
1067
1068	/*
1069	 * The address supplied by the caller is in the
1070	 * Target CPU virtual address space.
1071	 *
1072	 * In order to use this address with the diagnostic CE,
1073	 * convert it from
1074	 *    Target CPU virtual address space
1075	 * to
1076	 *    CE address space
1077	 */
1078	address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1079
1080	remaining_bytes = orig_nbytes;
1081	ce_data = ce_data_base;
1082	while (remaining_bytes) {
1083		/* FIXME: check cast */
1084		nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1085
 
 
 
1086		/* Set up to receive directly into Target(!) address */
1087		ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
1088		if (ret != 0)
1089			goto done;
1090
1091		/*
1092		 * Request CE to send caller-supplied data that
1093		 * was copied to bounce buffer to Target(!) address.
1094		 */
1095		ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
1096					    nbytes, 0, 0);
1097		if (ret != 0)
1098			goto done;
1099
1100		i = 0;
1101		while (ath10k_ce_completed_send_next_nolock(ce_diag,
1102							    NULL) != 0) {
1103			mdelay(1);
1104
1105			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1106				ret = -EBUSY;
1107				goto done;
1108			}
1109		}
1110
1111		i = 0;
1112		while (ath10k_ce_completed_recv_next_nolock(ce_diag,
1113							    (void **)&buf,
1114							    &completed_nbytes)
1115								!= 0) {
1116			mdelay(1);
1117
1118			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1119				ret = -EBUSY;
1120				goto done;
1121			}
1122		}
1123
1124		if (nbytes != completed_nbytes) {
1125			ret = -EIO;
1126			goto done;
1127		}
1128
1129		if (*buf != address) {
1130			ret = -EIO;
1131			goto done;
1132		}
1133
1134		remaining_bytes -= nbytes;
1135		address += nbytes;
1136		ce_data += nbytes;
1137	}
1138
1139done:
1140	if (data_buf) {
1141		dma_free_coherent(ar->dev, orig_nbytes, data_buf,
1142				  ce_data_base);
1143	}
1144
1145	if (ret != 0)
1146		ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1147			    address, ret);
1148
1149	spin_unlock_bh(&ar_pci->ce_lock);
1150
1151	return ret;
1152}
1153
1154static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1155{
1156	__le32 val = __cpu_to_le32(value);
1157
1158	return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1159}
1160
1161/* Called by lower (CE) layer when a send to Target completes. */
1162static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1163{
1164	struct ath10k *ar = ce_state->ar;
1165	struct sk_buff_head list;
1166	struct sk_buff *skb;
1167
1168	__skb_queue_head_init(&list);
1169	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1170		/* no need to call tx completion for NULL pointers */
1171		if (skb == NULL)
1172			continue;
1173
1174		__skb_queue_tail(&list, skb);
1175	}
1176
1177	while ((skb = __skb_dequeue(&list)))
1178		ath10k_htc_tx_completion_handler(ar, skb);
1179}
1180
1181static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1182				     void (*callback)(struct ath10k *ar,
1183						      struct sk_buff *skb))
1184{
1185	struct ath10k *ar = ce_state->ar;
1186	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1187	struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
1188	struct sk_buff *skb;
1189	struct sk_buff_head list;
1190	void *transfer_context;
1191	unsigned int nbytes, max_nbytes;
1192
1193	__skb_queue_head_init(&list);
1194	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1195					     &nbytes) == 0) {
1196		skb = transfer_context;
1197		max_nbytes = skb->len + skb_tailroom(skb);
1198		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1199				 max_nbytes, DMA_FROM_DEVICE);
1200
1201		if (unlikely(max_nbytes < nbytes)) {
1202			ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1203				    nbytes, max_nbytes);
1204			dev_kfree_skb_any(skb);
1205			continue;
1206		}
1207
1208		skb_put(skb, nbytes);
1209		__skb_queue_tail(&list, skb);
1210	}
1211
1212	while ((skb = __skb_dequeue(&list))) {
1213		ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1214			   ce_state->id, skb->len);
1215		ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1216				skb->data, skb->len);
1217
1218		callback(ar, skb);
1219	}
1220
1221	ath10k_pci_rx_post_pipe(pipe_info);
1222}
1223
1224static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1225					 void (*callback)(struct ath10k *ar,
1226							  struct sk_buff *skb))
1227{
1228	struct ath10k *ar = ce_state->ar;
1229	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1230	struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
1231	struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1232	struct sk_buff *skb;
1233	struct sk_buff_head list;
1234	void *transfer_context;
1235	unsigned int nbytes, max_nbytes, nentries;
1236	int orig_len;
1237
1238	/* No need to aquire ce_lock for CE5, since this is the only place CE5
1239	 * is processed other than init and deinit. Before releasing CE5
1240	 * buffers, interrupts are disabled. Thus CE5 access is serialized.
1241	 */
1242	__skb_queue_head_init(&list);
1243	while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1244						    &nbytes) == 0) {
1245		skb = transfer_context;
1246		max_nbytes = skb->len + skb_tailroom(skb);
1247
1248		if (unlikely(max_nbytes < nbytes)) {
1249			ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1250				    nbytes, max_nbytes);
1251			continue;
1252		}
1253
1254		dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1255					max_nbytes, DMA_FROM_DEVICE);
1256		skb_put(skb, nbytes);
1257		__skb_queue_tail(&list, skb);
1258	}
1259
1260	nentries = skb_queue_len(&list);
1261	while ((skb = __skb_dequeue(&list))) {
1262		ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1263			   ce_state->id, skb->len);
1264		ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1265				skb->data, skb->len);
1266
1267		orig_len = skb->len;
1268		callback(ar, skb);
1269		skb_push(skb, orig_len - skb->len);
1270		skb_reset_tail_pointer(skb);
1271		skb_trim(skb, 0);
1272
1273		/*let device gain the buffer again*/
1274		dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1275					   skb->len + skb_tailroom(skb),
1276					   DMA_FROM_DEVICE);
1277	}
1278	ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1279}
1280
1281/* Called by lower (CE) layer when data is received from the Target. */
1282static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1283{
1284	ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1285}
1286
1287static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1288{
1289	/* CE4 polling needs to be done whenever CE pipe which transports
1290	 * HTT Rx (target->host) is processed.
1291	 */
1292	ath10k_ce_per_engine_service(ce_state->ar, 4);
1293
1294	ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1295}
1296
1297/* Called by lower (CE) layer when data is received from the Target.
1298 * Only 10.4 firmware uses separate CE to transfer pktlog data.
1299 */
1300static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1301{
1302	ath10k_pci_process_rx_cb(ce_state,
1303				 ath10k_htt_rx_pktlog_completion_handler);
1304}
1305
1306/* Called by lower (CE) layer when a send to HTT Target completes. */
1307static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1308{
1309	struct ath10k *ar = ce_state->ar;
1310	struct sk_buff *skb;
1311
1312	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1313		/* no need to call tx completion for NULL pointers */
1314		if (!skb)
1315			continue;
1316
1317		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1318				 skb->len, DMA_TO_DEVICE);
1319		ath10k_htt_hif_tx_complete(ar, skb);
1320	}
1321}
1322
1323static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1324{
1325	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1326	ath10k_htt_t2h_msg_handler(ar, skb);
1327}
1328
1329/* Called by lower (CE) layer when HTT data is received from the Target. */
1330static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1331{
1332	/* CE4 polling needs to be done whenever CE pipe which transports
1333	 * HTT Rx (target->host) is processed.
1334	 */
1335	ath10k_ce_per_engine_service(ce_state->ar, 4);
1336
1337	ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1338}
1339
1340int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1341			 struct ath10k_hif_sg_item *items, int n_items)
1342{
1343	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
1344	struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1345	struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1346	struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1347	unsigned int nentries_mask;
1348	unsigned int sw_index;
1349	unsigned int write_index;
1350	int err, i = 0;
1351
1352	spin_lock_bh(&ar_pci->ce_lock);
1353
1354	nentries_mask = src_ring->nentries_mask;
1355	sw_index = src_ring->sw_index;
1356	write_index = src_ring->write_index;
1357
1358	if (unlikely(CE_RING_DELTA(nentries_mask,
1359				   write_index, sw_index - 1) < n_items)) {
1360		err = -ENOBUFS;
1361		goto err;
1362	}
1363
1364	for (i = 0; i < n_items - 1; i++) {
1365		ath10k_dbg(ar, ATH10K_DBG_PCI,
1366			   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1367			   i, items[i].paddr, items[i].len, n_items);
1368		ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1369				items[i].vaddr, items[i].len);
1370
1371		err = ath10k_ce_send_nolock(ce_pipe,
1372					    items[i].transfer_context,
1373					    items[i].paddr,
1374					    items[i].len,
1375					    items[i].transfer_id,
1376					    CE_SEND_FLAG_GATHER);
1377		if (err)
1378			goto err;
1379	}
1380
1381	/* `i` is equal to `n_items -1` after for() */
1382
1383	ath10k_dbg(ar, ATH10K_DBG_PCI,
1384		   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1385		   i, items[i].paddr, items[i].len, n_items);
1386	ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1387			items[i].vaddr, items[i].len);
1388
1389	err = ath10k_ce_send_nolock(ce_pipe,
1390				    items[i].transfer_context,
1391				    items[i].paddr,
1392				    items[i].len,
1393				    items[i].transfer_id,
1394				    0);
1395	if (err)
1396		goto err;
1397
1398	spin_unlock_bh(&ar_pci->ce_lock);
1399	return 0;
1400
1401err:
1402	for (; i > 0; i--)
1403		__ath10k_ce_send_revert(ce_pipe);
1404
1405	spin_unlock_bh(&ar_pci->ce_lock);
1406	return err;
1407}
1408
1409int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1410			     size_t buf_len)
1411{
1412	return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1413}
1414
1415u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1416{
1417	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1418
1419	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1420
1421	return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1422}
1423
1424static void ath10k_pci_dump_registers(struct ath10k *ar,
1425				      struct ath10k_fw_crash_data *crash_data)
1426{
1427	__le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1428	int i, ret;
1429
1430	lockdep_assert_held(&ar->data_lock);
1431
1432	ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1433				      hi_failure_state,
1434				      REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1435	if (ret) {
1436		ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1437		return;
1438	}
1439
1440	BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1441
1442	ath10k_err(ar, "firmware register dump:\n");
1443	for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1444		ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1445			   i,
1446			   __le32_to_cpu(reg_dump_values[i]),
1447			   __le32_to_cpu(reg_dump_values[i + 1]),
1448			   __le32_to_cpu(reg_dump_values[i + 2]),
1449			   __le32_to_cpu(reg_dump_values[i + 3]));
1450
1451	if (!crash_data)
1452		return;
1453
1454	for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1455		crash_data->registers[i] = reg_dump_values[i];
1456}
1457
1458static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1459{
 
 
1460	struct ath10k_fw_crash_data *crash_data;
1461	char uuid[50];
 
 
 
1462
1463	spin_lock_bh(&ar->data_lock);
1464
1465	ar->stats.fw_crash_counter++;
 
1466
1467	crash_data = ath10k_debug_get_new_fw_crash_data(ar);
1468
1469	if (crash_data)
1470		scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
1471	else
1472		scnprintf(uuid, sizeof(uuid), "n/a");
1473
1474	ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
1475	ath10k_print_driver_info(ar);
1476	ath10k_pci_dump_registers(ar, crash_data);
 
 
 
 
1477
1478	spin_unlock_bh(&ar->data_lock);
 
 
 
 
 
1479
1480	queue_work(ar->workqueue, &ar->restart_work);
1481}
1482
1483void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1484					int force)
1485{
 
 
1486	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1487
1488	if (!force) {
1489		int resources;
1490		/*
1491		 * Decide whether to actually poll for completions, or just
1492		 * wait for a later chance.
1493		 * If there seem to be plenty of resources left, then just wait
1494		 * since checking involves reading a CE register, which is a
1495		 * relatively expensive operation.
1496		 */
1497		resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1498
1499		/*
1500		 * If at least 50% of the total resources are still available,
1501		 * don't bother checking again yet.
1502		 */
1503		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1504			return;
1505	}
1506	ath10k_ce_per_engine_service(ar, pipe);
1507}
1508
1509static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
1510{
1511	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1512
1513	del_timer_sync(&ar_pci->rx_post_retry);
1514}
1515
1516int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1517				       u8 *ul_pipe, u8 *dl_pipe)
1518{
1519	const struct service_to_pipe *entry;
 
1520	bool ul_set = false, dl_set = false;
1521	int i;
1522
1523	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1524
1525	for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1526		entry = &target_service_to_ce_map_wlan[i];
1527
1528		if (__le32_to_cpu(entry->service_id) != service_id)
1529			continue;
1530
1531		switch (__le32_to_cpu(entry->pipedir)) {
1532		case PIPEDIR_NONE:
1533			break;
1534		case PIPEDIR_IN:
1535			WARN_ON(dl_set);
1536			*dl_pipe = __le32_to_cpu(entry->pipenum);
1537			dl_set = true;
1538			break;
1539		case PIPEDIR_OUT:
1540			WARN_ON(ul_set);
1541			*ul_pipe = __le32_to_cpu(entry->pipenum);
1542			ul_set = true;
1543			break;
1544		case PIPEDIR_INOUT:
1545			WARN_ON(dl_set);
1546			WARN_ON(ul_set);
1547			*dl_pipe = __le32_to_cpu(entry->pipenum);
1548			*ul_pipe = __le32_to_cpu(entry->pipenum);
1549			dl_set = true;
1550			ul_set = true;
1551			break;
1552		}
1553	}
1554
1555	if (WARN_ON(!ul_set || !dl_set))
1556		return -ENOENT;
1557
1558	return 0;
1559}
1560
1561void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1562				     u8 *ul_pipe, u8 *dl_pipe)
1563{
1564	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1565
1566	(void)ath10k_pci_hif_map_service_to_pipe(ar,
1567						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1568						 ul_pipe, dl_pipe);
1569}
1570
1571void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1572{
1573	u32 val;
1574
1575	switch (ar->hw_rev) {
1576	case ATH10K_HW_QCA988X:
1577	case ATH10K_HW_QCA9887:
1578	case ATH10K_HW_QCA6174:
1579	case ATH10K_HW_QCA9377:
1580		val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1581					CORE_CTRL_ADDRESS);
1582		val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1583		ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1584				   CORE_CTRL_ADDRESS, val);
1585		break;
1586	case ATH10K_HW_QCA99X0:
1587	case ATH10K_HW_QCA9984:
1588	case ATH10K_HW_QCA9888:
1589	case ATH10K_HW_QCA4019:
1590		/* TODO: Find appropriate register configuration for QCA99X0
1591		 *  to mask irq/MSI.
1592		 */
1593		 break;
 
 
1594	}
1595}
1596
1597static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1598{
1599	u32 val;
1600
1601	switch (ar->hw_rev) {
1602	case ATH10K_HW_QCA988X:
1603	case ATH10K_HW_QCA9887:
1604	case ATH10K_HW_QCA6174:
1605	case ATH10K_HW_QCA9377:
1606		val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1607					CORE_CTRL_ADDRESS);
1608		val |= CORE_CTRL_PCIE_REG_31_MASK;
1609		ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1610				   CORE_CTRL_ADDRESS, val);
1611		break;
1612	case ATH10K_HW_QCA99X0:
1613	case ATH10K_HW_QCA9984:
1614	case ATH10K_HW_QCA9888:
1615	case ATH10K_HW_QCA4019:
1616		/* TODO: Find appropriate register configuration for QCA99X0
1617		 *  to unmask irq/MSI.
1618		 */
1619		break;
 
 
1620	}
1621}
1622
1623static void ath10k_pci_irq_disable(struct ath10k *ar)
1624{
1625	ath10k_ce_disable_interrupts(ar);
1626	ath10k_pci_disable_and_clear_legacy_irq(ar);
1627	ath10k_pci_irq_msi_fw_mask(ar);
1628}
1629
1630static void ath10k_pci_irq_sync(struct ath10k *ar)
1631{
1632	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1633
1634	synchronize_irq(ar_pci->pdev->irq);
1635}
1636
1637static void ath10k_pci_irq_enable(struct ath10k *ar)
1638{
1639	ath10k_ce_enable_interrupts(ar);
1640	ath10k_pci_enable_legacy_irq(ar);
1641	ath10k_pci_irq_msi_fw_unmask(ar);
1642}
1643
1644static int ath10k_pci_hif_start(struct ath10k *ar)
1645{
1646	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1647
1648	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1649
 
 
1650	ath10k_pci_irq_enable(ar);
1651	ath10k_pci_rx_post(ar);
1652
1653	pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1654				   ar_pci->link_ctl);
 
1655
1656	return 0;
1657}
1658
1659static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1660{
1661	struct ath10k *ar;
1662	struct ath10k_ce_pipe *ce_pipe;
1663	struct ath10k_ce_ring *ce_ring;
1664	struct sk_buff *skb;
1665	int i;
1666
1667	ar = pci_pipe->hif_ce_state;
1668	ce_pipe = pci_pipe->ce_hdl;
1669	ce_ring = ce_pipe->dest_ring;
1670
1671	if (!ce_ring)
1672		return;
1673
1674	if (!pci_pipe->buf_sz)
1675		return;
1676
1677	for (i = 0; i < ce_ring->nentries; i++) {
1678		skb = ce_ring->per_transfer_context[i];
1679		if (!skb)
1680			continue;
1681
1682		ce_ring->per_transfer_context[i] = NULL;
1683
1684		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1685				 skb->len + skb_tailroom(skb),
1686				 DMA_FROM_DEVICE);
1687		dev_kfree_skb_any(skb);
1688	}
1689}
1690
1691static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1692{
1693	struct ath10k *ar;
1694	struct ath10k_ce_pipe *ce_pipe;
1695	struct ath10k_ce_ring *ce_ring;
1696	struct sk_buff *skb;
1697	int i;
1698
1699	ar = pci_pipe->hif_ce_state;
1700	ce_pipe = pci_pipe->ce_hdl;
1701	ce_ring = ce_pipe->src_ring;
1702
1703	if (!ce_ring)
1704		return;
1705
1706	if (!pci_pipe->buf_sz)
1707		return;
1708
1709	for (i = 0; i < ce_ring->nentries; i++) {
1710		skb = ce_ring->per_transfer_context[i];
1711		if (!skb)
1712			continue;
1713
1714		ce_ring->per_transfer_context[i] = NULL;
1715
1716		ath10k_htc_tx_completion_handler(ar, skb);
1717	}
1718}
1719
1720/*
1721 * Cleanup residual buffers for device shutdown:
1722 *    buffers that were enqueued for receive
1723 *    buffers that were to be sent
1724 * Note: Buffers that had completed but which were
1725 * not yet processed are on a completion queue. They
1726 * are handled when the completion thread shuts down.
1727 */
1728static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1729{
1730	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1731	int pipe_num;
1732
1733	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1734		struct ath10k_pci_pipe *pipe_info;
1735
1736		pipe_info = &ar_pci->pipe_info[pipe_num];
1737		ath10k_pci_rx_pipe_cleanup(pipe_info);
1738		ath10k_pci_tx_pipe_cleanup(pipe_info);
1739	}
1740}
1741
1742void ath10k_pci_ce_deinit(struct ath10k *ar)
1743{
1744	int i;
1745
1746	for (i = 0; i < CE_COUNT; i++)
1747		ath10k_ce_deinit_pipe(ar, i);
1748}
1749
1750void ath10k_pci_flush(struct ath10k *ar)
1751{
1752	ath10k_pci_rx_retry_sync(ar);
1753	ath10k_pci_buffer_cleanup(ar);
1754}
1755
1756static void ath10k_pci_hif_stop(struct ath10k *ar)
1757{
1758	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1759	unsigned long flags;
1760
1761	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
1762
 
 
 
 
 
 
 
1763	/* Most likely the device has HTT Rx ring configured. The only way to
1764	 * prevent the device from accessing (and possible corrupting) host
1765	 * memory is to reset the chip now.
1766	 *
1767	 * There's also no known way of masking MSI interrupts on the device.
1768	 * For ranged MSI the CE-related interrupts can be masked. However
1769	 * regardless how many MSI interrupts are assigned the first one
1770	 * is always used for firmware indications (crashes) and cannot be
1771	 * masked. To prevent the device from asserting the interrupt reset it
1772	 * before proceeding with cleanup.
1773	 */
1774	ath10k_pci_safe_chip_reset(ar);
1775
1776	ath10k_pci_irq_disable(ar);
1777	ath10k_pci_irq_sync(ar);
1778	ath10k_pci_flush(ar);
1779	napi_synchronize(&ar->napi);
1780	napi_disable(&ar->napi);
1781
1782	spin_lock_irqsave(&ar_pci->ps_lock, flags);
1783	WARN_ON(ar_pci->ps_wake_refcount > 0);
1784	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
1785}
1786
1787int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1788				    void *req, u32 req_len,
1789				    void *resp, u32 *resp_len)
1790{
1791	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1792	struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1793	struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1794	struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1795	struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1796	dma_addr_t req_paddr = 0;
1797	dma_addr_t resp_paddr = 0;
1798	struct bmi_xfer xfer = {};
1799	void *treq, *tresp = NULL;
1800	int ret = 0;
1801
1802	might_sleep();
1803
1804	if (resp && !resp_len)
1805		return -EINVAL;
1806
1807	if (resp && resp_len && *resp_len == 0)
1808		return -EINVAL;
1809
1810	treq = kmemdup(req, req_len, GFP_KERNEL);
1811	if (!treq)
1812		return -ENOMEM;
1813
1814	req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1815	ret = dma_mapping_error(ar->dev, req_paddr);
1816	if (ret) {
1817		ret = -EIO;
1818		goto err_dma;
1819	}
1820
1821	if (resp && resp_len) {
1822		tresp = kzalloc(*resp_len, GFP_KERNEL);
1823		if (!tresp) {
1824			ret = -ENOMEM;
1825			goto err_req;
1826		}
1827
1828		resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1829					    DMA_FROM_DEVICE);
1830		ret = dma_mapping_error(ar->dev, resp_paddr);
1831		if (ret) {
1832			ret = -EIO;
1833			goto err_req;
1834		}
1835
1836		xfer.wait_for_resp = true;
1837		xfer.resp_len = 0;
1838
1839		ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
1840	}
1841
1842	ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1843	if (ret)
1844		goto err_resp;
1845
1846	ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1847	if (ret) {
1848		u32 unused_buffer;
1849		unsigned int unused_nbytes;
1850		unsigned int unused_id;
1851
1852		ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1853					   &unused_nbytes, &unused_id);
1854	} else {
1855		/* non-zero means we did not time out */
1856		ret = 0;
1857	}
1858
1859err_resp:
1860	if (resp) {
1861		u32 unused_buffer;
1862
1863		ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1864		dma_unmap_single(ar->dev, resp_paddr,
1865				 *resp_len, DMA_FROM_DEVICE);
1866	}
1867err_req:
1868	dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1869
1870	if (ret == 0 && resp_len) {
1871		*resp_len = min(*resp_len, xfer.resp_len);
1872		memcpy(resp, tresp, xfer.resp_len);
1873	}
1874err_dma:
1875	kfree(treq);
1876	kfree(tresp);
1877
1878	return ret;
1879}
1880
1881static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1882{
1883	struct bmi_xfer *xfer;
1884
1885	if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
1886		return;
1887
1888	xfer->tx_done = true;
1889}
1890
1891static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1892{
1893	struct ath10k *ar = ce_state->ar;
1894	struct bmi_xfer *xfer;
1895	unsigned int nbytes;
1896
1897	if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
1898					  &nbytes))
1899		return;
1900
1901	if (WARN_ON_ONCE(!xfer))
1902		return;
1903
1904	if (!xfer->wait_for_resp) {
1905		ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
1906		return;
1907	}
1908
1909	xfer->resp_len = nbytes;
1910	xfer->rx_done = true;
1911}
1912
1913static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
 
1914			       struct ath10k_ce_pipe *rx_pipe,
1915			       struct bmi_xfer *xfer)
1916{
1917	unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
 
 
 
1918
1919	while (time_before_eq(jiffies, timeout)) {
1920		ath10k_pci_bmi_send_done(tx_pipe);
1921		ath10k_pci_bmi_recv_data(rx_pipe);
1922
1923		if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
1924			return 0;
 
 
1925
1926		schedule();
1927	}
1928
1929	return -ETIMEDOUT;
 
 
 
 
 
 
 
 
1930}
1931
1932/*
1933 * Send an interrupt to the device to wake up the Target CPU
1934 * so it has an opportunity to notice any changed state.
1935 */
1936static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1937{
1938	u32 addr, val;
1939
1940	addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
1941	val = ath10k_pci_read32(ar, addr);
1942	val |= CORE_CTRL_CPU_INTR_MASK;
1943	ath10k_pci_write32(ar, addr, val);
1944
1945	return 0;
1946}
1947
1948static int ath10k_pci_get_num_banks(struct ath10k *ar)
1949{
1950	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1951
1952	switch (ar_pci->pdev->device) {
 
1953	case QCA988X_2_0_DEVICE_ID:
1954	case QCA99X0_2_0_DEVICE_ID:
1955	case QCA9888_2_0_DEVICE_ID:
1956	case QCA9984_1_0_DEVICE_ID:
1957	case QCA9887_1_0_DEVICE_ID:
1958		return 1;
1959	case QCA6164_2_1_DEVICE_ID:
1960	case QCA6174_2_1_DEVICE_ID:
1961		switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
1962		case QCA6174_HW_1_0_CHIP_ID_REV:
1963		case QCA6174_HW_1_1_CHIP_ID_REV:
1964		case QCA6174_HW_2_1_CHIP_ID_REV:
1965		case QCA6174_HW_2_2_CHIP_ID_REV:
1966			return 3;
1967		case QCA6174_HW_1_3_CHIP_ID_REV:
1968			return 2;
1969		case QCA6174_HW_3_0_CHIP_ID_REV:
1970		case QCA6174_HW_3_1_CHIP_ID_REV:
1971		case QCA6174_HW_3_2_CHIP_ID_REV:
1972			return 9;
1973		}
1974		break;
1975	case QCA9377_1_0_DEVICE_ID:
1976		return 2;
1977	}
1978
1979	ath10k_warn(ar, "unknown number of banks, assuming 1\n");
1980	return 1;
1981}
1982
1983static int ath10k_bus_get_num_banks(struct ath10k *ar)
1984{
1985	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1986
1987	return ar_pci->bus_ops->get_num_banks(ar);
1988}
1989
1990int ath10k_pci_init_config(struct ath10k *ar)
1991{
 
1992	u32 interconnect_targ_addr;
1993	u32 pcie_state_targ_addr = 0;
1994	u32 pipe_cfg_targ_addr = 0;
1995	u32 svc_to_pipe_map = 0;
1996	u32 pcie_config_flags = 0;
1997	u32 ealloc_value;
1998	u32 ealloc_targ_addr;
1999	u32 flag2_value;
2000	u32 flag2_targ_addr;
2001	int ret = 0;
2002
2003	/* Download to Target the CE Config and the service-to-CE map */
2004	interconnect_targ_addr =
2005		host_interest_item_address(HI_ITEM(hi_interconnect_state));
2006
2007	/* Supply Target-side CE configuration */
2008	ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
2009				     &pcie_state_targ_addr);
2010	if (ret != 0) {
2011		ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
2012		return ret;
2013	}
2014
2015	if (pcie_state_targ_addr == 0) {
2016		ret = -EIO;
2017		ath10k_err(ar, "Invalid pcie state addr\n");
2018		return ret;
2019	}
2020
2021	ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2022					  offsetof(struct pcie_state,
2023						   pipe_cfg_addr)),
2024				     &pipe_cfg_targ_addr);
2025	if (ret != 0) {
2026		ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
2027		return ret;
2028	}
2029
2030	if (pipe_cfg_targ_addr == 0) {
2031		ret = -EIO;
2032		ath10k_err(ar, "Invalid pipe cfg addr\n");
2033		return ret;
2034	}
2035
2036	ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
2037					target_ce_config_wlan,
2038					sizeof(struct ce_pipe_config) *
2039					NUM_TARGET_CE_CONFIG_WLAN);
2040
2041	if (ret != 0) {
2042		ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
2043		return ret;
2044	}
2045
2046	ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2047					  offsetof(struct pcie_state,
2048						   svc_to_pipe_map)),
2049				     &svc_to_pipe_map);
2050	if (ret != 0) {
2051		ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
2052		return ret;
2053	}
2054
2055	if (svc_to_pipe_map == 0) {
2056		ret = -EIO;
2057		ath10k_err(ar, "Invalid svc_to_pipe map\n");
2058		return ret;
2059	}
2060
2061	ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
2062					target_service_to_ce_map_wlan,
2063					sizeof(target_service_to_ce_map_wlan));
2064	if (ret != 0) {
2065		ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
2066		return ret;
2067	}
2068
2069	ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2070					  offsetof(struct pcie_state,
2071						   config_flags)),
2072				     &pcie_config_flags);
2073	if (ret != 0) {
2074		ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
2075		return ret;
2076	}
2077
2078	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2079
2080	ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2081					   offsetof(struct pcie_state,
2082						    config_flags)),
2083				      pcie_config_flags);
2084	if (ret != 0) {
2085		ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
2086		return ret;
2087	}
2088
2089	/* configure early allocation */
2090	ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2091
2092	ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
2093	if (ret != 0) {
2094		ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
2095		return ret;
2096	}
2097
2098	/* first bank is switched to IRAM */
2099	ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2100			 HI_EARLY_ALLOC_MAGIC_MASK);
2101	ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
2102			  HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
2103			 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2104
2105	ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
2106	if (ret != 0) {
2107		ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
2108		return ret;
2109	}
2110
2111	/* Tell Target to proceed with initialization */
2112	flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2113
2114	ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
2115	if (ret != 0) {
2116		ath10k_err(ar, "Failed to get option val: %d\n", ret);
2117		return ret;
2118	}
2119
2120	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2121
2122	ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
2123	if (ret != 0) {
2124		ath10k_err(ar, "Failed to set option val: %d\n", ret);
2125		return ret;
2126	}
2127
2128	return 0;
2129}
2130
2131static void ath10k_pci_override_ce_config(struct ath10k *ar)
2132{
2133	struct ce_attr *attr;
2134	struct ce_pipe_config *config;
 
2135
2136	/* For QCA6174 we're overriding the Copy Engine 5 configuration,
2137	 * since it is currently used for other feature.
2138	 */
2139
2140	/* Override Host's Copy Engine 5 configuration */
2141	attr = &host_ce_config_wlan[5];
2142	attr->src_sz_max = 0;
2143	attr->dest_nentries = 0;
2144
2145	/* Override Target firmware's Copy Engine configuration */
2146	config = &target_ce_config_wlan[5];
2147	config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2148	config->nbytes_max = __cpu_to_le32(2048);
2149
2150	/* Map from service/endpoint to Copy Engine */
2151	target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
2152}
2153
2154int ath10k_pci_alloc_pipes(struct ath10k *ar)
2155{
2156	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2157	struct ath10k_pci_pipe *pipe;
 
2158	int i, ret;
2159
2160	for (i = 0; i < CE_COUNT; i++) {
2161		pipe = &ar_pci->pipe_info[i];
2162		pipe->ce_hdl = &ar_pci->ce_states[i];
2163		pipe->pipe_num = i;
2164		pipe->hif_ce_state = ar;
2165
2166		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
2167		if (ret) {
2168			ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
2169				   i, ret);
2170			return ret;
2171		}
2172
2173		/* Last CE is Diagnostic Window */
2174		if (i == CE_DIAG_PIPE) {
2175			ar_pci->ce_diag = pipe->ce_hdl;
2176			continue;
2177		}
2178
2179		pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
2180	}
2181
2182	return 0;
2183}
2184
2185void ath10k_pci_free_pipes(struct ath10k *ar)
2186{
2187	int i;
2188
2189	for (i = 0; i < CE_COUNT; i++)
2190		ath10k_ce_free_pipe(ar, i);
2191}
2192
2193int ath10k_pci_init_pipes(struct ath10k *ar)
2194{
 
2195	int i, ret;
2196
2197	for (i = 0; i < CE_COUNT; i++) {
2198		ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
2199		if (ret) {
2200			ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2201				   i, ret);
2202			return ret;
2203		}
2204	}
2205
2206	return 0;
2207}
2208
2209static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2210{
2211	return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2212	       FW_IND_EVENT_PENDING;
2213}
2214
2215static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2216{
2217	u32 val;
2218
2219	val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2220	val &= ~FW_IND_EVENT_PENDING;
2221	ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2222}
2223
2224static bool ath10k_pci_has_device_gone(struct ath10k *ar)
2225{
2226	u32 val;
2227
2228	val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2229	return (val == 0xffffffff);
2230}
2231
2232/* this function effectively clears target memory controller assert line */
2233static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2234{
2235	u32 val;
2236
2237	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2238	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2239			       val | SOC_RESET_CONTROL_SI0_RST_MASK);
2240	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2241
2242	msleep(10);
2243
2244	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2245	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2246			       val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2247	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2248
2249	msleep(10);
2250}
2251
2252static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2253{
2254	u32 val;
2255
2256	ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2257
2258	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2259				SOC_RESET_CONTROL_ADDRESS);
2260	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2261			   val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2262}
2263
2264static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2265{
2266	u32 val;
2267
2268	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2269				SOC_RESET_CONTROL_ADDRESS);
2270
2271	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2272			   val | SOC_RESET_CONTROL_CE_RST_MASK);
2273	msleep(10);
2274	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2275			   val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2276}
2277
2278static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2279{
2280	u32 val;
2281
2282	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2283				SOC_LF_TIMER_CONTROL0_ADDRESS);
2284	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
2285			   SOC_LF_TIMER_CONTROL0_ADDRESS,
2286			   val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2287}
2288
2289static int ath10k_pci_warm_reset(struct ath10k *ar)
2290{
2291	int ret;
2292
2293	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2294
2295	spin_lock_bh(&ar->data_lock);
2296	ar->stats.fw_warm_reset_counter++;
2297	spin_unlock_bh(&ar->data_lock);
2298
2299	ath10k_pci_irq_disable(ar);
2300
2301	/* Make sure the target CPU is not doing anything dangerous, e.g. if it
2302	 * were to access copy engine while host performs copy engine reset
2303	 * then it is possible for the device to confuse pci-e controller to
2304	 * the point of bringing host system to a complete stop (i.e. hang).
2305	 */
2306	ath10k_pci_warm_reset_si0(ar);
2307	ath10k_pci_warm_reset_cpu(ar);
2308	ath10k_pci_init_pipes(ar);
2309	ath10k_pci_wait_for_target_init(ar);
2310
2311	ath10k_pci_warm_reset_clear_lf(ar);
2312	ath10k_pci_warm_reset_ce(ar);
2313	ath10k_pci_warm_reset_cpu(ar);
2314	ath10k_pci_init_pipes(ar);
2315
2316	ret = ath10k_pci_wait_for_target_init(ar);
2317	if (ret) {
2318		ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2319		return ret;
2320	}
2321
2322	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2323
2324	return 0;
2325}
2326
2327static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
2328{
2329	ath10k_pci_irq_disable(ar);
2330	return ath10k_pci_qca99x0_chip_reset(ar);
2331}
2332
2333static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2334{
2335	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2336
2337	if (!ar_pci->pci_soft_reset)
2338		return -ENOTSUPP;
2339
2340	return ar_pci->pci_soft_reset(ar);
2341}
2342
2343static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2344{
2345	int i, ret;
2346	u32 val;
2347
2348	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2349
2350	/* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2351	 * It is thus preferred to use warm reset which is safer but may not be
2352	 * able to recover the device from all possible fail scenarios.
2353	 *
2354	 * Warm reset doesn't always work on first try so attempt it a few
2355	 * times before giving up.
2356	 */
2357	for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2358		ret = ath10k_pci_warm_reset(ar);
2359		if (ret) {
2360			ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2361				    i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2362				    ret);
2363			continue;
2364		}
2365
2366		/* FIXME: Sometimes copy engine doesn't recover after warm
2367		 * reset. In most cases this needs cold reset. In some of these
2368		 * cases the device is in such a state that a cold reset may
2369		 * lock up the host.
2370		 *
2371		 * Reading any host interest register via copy engine is
2372		 * sufficient to verify if device is capable of booting
2373		 * firmware blob.
2374		 */
2375		ret = ath10k_pci_init_pipes(ar);
2376		if (ret) {
2377			ath10k_warn(ar, "failed to init copy engine: %d\n",
2378				    ret);
2379			continue;
2380		}
2381
2382		ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2383					     &val);
2384		if (ret) {
2385			ath10k_warn(ar, "failed to poke copy engine: %d\n",
2386				    ret);
2387			continue;
2388		}
2389
2390		ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2391		return 0;
2392	}
2393
2394	if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2395		ath10k_warn(ar, "refusing cold reset as requested\n");
2396		return -EPERM;
2397	}
2398
2399	ret = ath10k_pci_cold_reset(ar);
2400	if (ret) {
2401		ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2402		return ret;
2403	}
2404
2405	ret = ath10k_pci_wait_for_target_init(ar);
2406	if (ret) {
2407		ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2408			    ret);
2409		return ret;
2410	}
2411
2412	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2413
2414	return 0;
2415}
2416
2417static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2418{
2419	int ret;
2420
2421	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2422
2423	/* FIXME: QCA6174 requires cold + warm reset to work. */
2424
2425	ret = ath10k_pci_cold_reset(ar);
2426	if (ret) {
2427		ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2428		return ret;
2429	}
2430
2431	ret = ath10k_pci_wait_for_target_init(ar);
2432	if (ret) {
2433		ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2434			    ret);
2435		return ret;
2436	}
2437
2438	ret = ath10k_pci_warm_reset(ar);
2439	if (ret) {
2440		ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2441		return ret;
2442	}
2443
2444	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2445
2446	return 0;
2447}
2448
2449static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2450{
2451	int ret;
2452
2453	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2454
2455	ret = ath10k_pci_cold_reset(ar);
2456	if (ret) {
2457		ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2458		return ret;
2459	}
2460
2461	ret = ath10k_pci_wait_for_target_init(ar);
2462	if (ret) {
2463		ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2464			    ret);
2465		return ret;
2466	}
2467
2468	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2469
2470	return 0;
2471}
2472
2473static int ath10k_pci_chip_reset(struct ath10k *ar)
2474{
2475	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2476
2477	if (WARN_ON(!ar_pci->pci_hard_reset))
2478		return -ENOTSUPP;
2479
2480	return ar_pci->pci_hard_reset(ar);
2481}
2482
2483static int ath10k_pci_hif_power_up(struct ath10k *ar)
 
2484{
2485	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2486	int ret;
2487
2488	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2489
2490	pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2491				  &ar_pci->link_ctl);
2492	pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2493				   ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2494
2495	/*
2496	 * Bring the target up cleanly.
2497	 *
2498	 * The target may be in an undefined state with an AUX-powered Target
2499	 * and a Host in WoW mode. If the Host crashes, loses power, or is
2500	 * restarted (without unloading the driver) then the Target is left
2501	 * (aux) powered and running. On a subsequent driver load, the Target
2502	 * is in an unexpected state. We try to catch that here in order to
2503	 * reset the Target and retry the probe.
2504	 */
2505	ret = ath10k_pci_chip_reset(ar);
2506	if (ret) {
2507		if (ath10k_pci_has_fw_crashed(ar)) {
2508			ath10k_warn(ar, "firmware crashed during chip reset\n");
2509			ath10k_pci_fw_crashed_clear(ar);
2510			ath10k_pci_fw_crashed_dump(ar);
2511		}
2512
2513		ath10k_err(ar, "failed to reset chip: %d\n", ret);
2514		goto err_sleep;
2515	}
2516
2517	ret = ath10k_pci_init_pipes(ar);
2518	if (ret) {
2519		ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2520		goto err_sleep;
2521	}
2522
2523	ret = ath10k_pci_init_config(ar);
2524	if (ret) {
2525		ath10k_err(ar, "failed to setup init config: %d\n", ret);
2526		goto err_ce;
2527	}
2528
2529	ret = ath10k_pci_wake_target_cpu(ar);
2530	if (ret) {
2531		ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2532		goto err_ce;
2533	}
2534	napi_enable(&ar->napi);
2535
2536	return 0;
2537
2538err_ce:
2539	ath10k_pci_ce_deinit(ar);
2540
2541err_sleep:
2542	return ret;
2543}
2544
2545void ath10k_pci_hif_power_down(struct ath10k *ar)
2546{
2547	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2548
2549	/* Currently hif_power_up performs effectively a reset and hif_stop
2550	 * resets the chip as well so there's no point in resetting here.
2551	 */
2552}
2553
2554#ifdef CONFIG_PM
 
 
 
 
2555
2556static int ath10k_pci_hif_suspend(struct ath10k *ar)
2557{
2558	/* The grace timer can still be counting down and ar->ps_awake be true.
2559	 * It is known that the device may be asleep after resuming regardless
2560	 * of the SoC powersave state before suspending. Hence make sure the
2561	 * device is asleep before proceeding.
2562	 */
2563	ath10k_pci_sleep_sync(ar);
2564
2565	return 0;
2566}
2567
2568static int ath10k_pci_hif_resume(struct ath10k *ar)
2569{
 
 
 
 
 
 
2570	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2571	struct pci_dev *pdev = ar_pci->pdev;
2572	u32 val;
2573	int ret = 0;
2574
2575	ret = ath10k_pci_force_wake(ar);
2576	if (ret) {
2577		ath10k_err(ar, "failed to wake up target: %d\n", ret);
2578		return ret;
2579	}
2580
2581	/* Suspend/Resume resets the PCI configuration space, so we have to
2582	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2583	 * from interfering with C3 CPU state. pci_restore_state won't help
2584	 * here since it only restores the first 64 bytes pci config header.
2585	 */
2586	pci_read_config_dword(pdev, 0x40, &val);
2587	if ((val & 0x0000ff00) != 0)
2588		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2589
2590	return ret;
2591}
2592#endif
2593
2594static bool ath10k_pci_validate_cal(void *data, size_t size)
2595{
2596	__le16 *cal_words = data;
2597	u16 checksum = 0;
2598	size_t i;
2599
2600	if (size % 2 != 0)
2601		return false;
2602
2603	for (i = 0; i < size / 2; i++)
2604		checksum ^= le16_to_cpu(cal_words[i]);
2605
2606	return checksum == 0xffff;
2607}
2608
2609static void ath10k_pci_enable_eeprom(struct ath10k *ar)
2610{
2611	/* Enable SI clock */
2612	ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
2613
2614	/* Configure GPIOs for I2C operation */
2615	ath10k_pci_write32(ar,
2616			   GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2617			   4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
2618			   SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
2619			      GPIO_PIN0_CONFIG) |
2620			   SM(1, GPIO_PIN0_PAD_PULL));
2621
2622	ath10k_pci_write32(ar,
2623			   GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2624			   4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
2625			   SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
2626			   SM(1, GPIO_PIN0_PAD_PULL));
2627
2628	ath10k_pci_write32(ar,
2629			   GPIO_BASE_ADDRESS +
2630			   QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
2631			   1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
2632
2633	/* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
2634	ath10k_pci_write32(ar,
2635			   SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
2636			   SM(1, SI_CONFIG_ERR_INT) |
2637			   SM(1, SI_CONFIG_BIDIR_OD_DATA) |
2638			   SM(1, SI_CONFIG_I2C) |
2639			   SM(1, SI_CONFIG_POS_SAMPLE) |
2640			   SM(1, SI_CONFIG_INACTIVE_DATA) |
2641			   SM(1, SI_CONFIG_INACTIVE_CLK) |
2642			   SM(8, SI_CONFIG_DIVIDER));
2643}
2644
2645static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
2646{
2647	u32 reg;
2648	int wait_limit;
2649
2650	/* set device select byte and for the read operation */
2651	reg = QCA9887_EEPROM_SELECT_READ |
2652	      SM(addr, QCA9887_EEPROM_ADDR_LO) |
2653	      SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
2654	ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
2655
2656	/* write transmit data, transfer length, and START bit */
2657	ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
2658			   SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
2659			   SM(4, SI_CS_TX_CNT));
2660
2661	/* wait max 1 sec */
2662	wait_limit = 100000;
2663
2664	/* wait for SI_CS_DONE_INT */
2665	do {
2666		reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
2667		if (MS(reg, SI_CS_DONE_INT))
2668			break;
2669
2670		wait_limit--;
2671		udelay(10);
2672	} while (wait_limit > 0);
2673
2674	if (!MS(reg, SI_CS_DONE_INT)) {
2675		ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
2676			   addr);
2677		return -ETIMEDOUT;
2678	}
2679
2680	/* clear SI_CS_DONE_INT */
2681	ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
2682
2683	if (MS(reg, SI_CS_DONE_ERR)) {
2684		ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
2685		return -EIO;
2686	}
2687
2688	/* extract receive data */
2689	reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
2690	*out = reg;
2691
2692	return 0;
2693}
2694
2695static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
2696					   size_t *data_len)
2697{
2698	u8 *caldata = NULL;
2699	size_t calsize, i;
2700	int ret;
2701
2702	if (!QCA_REV_9887(ar))
2703		return -EOPNOTSUPP;
2704
2705	calsize = ar->hw_params.cal_data_len;
2706	caldata = kmalloc(calsize, GFP_KERNEL);
2707	if (!caldata)
2708		return -ENOMEM;
2709
2710	ath10k_pci_enable_eeprom(ar);
2711
2712	for (i = 0; i < calsize; i++) {
2713		ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
2714		if (ret)
2715			goto err_free;
2716	}
2717
2718	if (!ath10k_pci_validate_cal(caldata, calsize))
2719		goto err_free;
2720
2721	*data = caldata;
2722	*data_len = calsize;
2723
2724	return 0;
2725
2726err_free:
2727	kfree(caldata);
2728
2729	return -EINVAL;
2730}
2731
2732static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2733	.tx_sg			= ath10k_pci_hif_tx_sg,
2734	.diag_read		= ath10k_pci_hif_diag_read,
2735	.diag_write		= ath10k_pci_diag_write_mem,
2736	.exchange_bmi_msg	= ath10k_pci_hif_exchange_bmi_msg,
2737	.start			= ath10k_pci_hif_start,
2738	.stop			= ath10k_pci_hif_stop,
2739	.map_service_to_pipe	= ath10k_pci_hif_map_service_to_pipe,
2740	.get_default_pipe	= ath10k_pci_hif_get_default_pipe,
2741	.send_complete_check	= ath10k_pci_hif_send_complete_check,
2742	.get_free_queue_number	= ath10k_pci_hif_get_free_queue_number,
2743	.power_up		= ath10k_pci_hif_power_up,
2744	.power_down		= ath10k_pci_hif_power_down,
2745	.read32			= ath10k_pci_read32,
2746	.write32		= ath10k_pci_write32,
2747#ifdef CONFIG_PM
2748	.suspend		= ath10k_pci_hif_suspend,
2749	.resume			= ath10k_pci_hif_resume,
2750#endif
2751	.fetch_cal_eeprom	= ath10k_pci_hif_fetch_cal_eeprom,
2752};
2753
2754/*
2755 * Top-level interrupt handler for all PCI interrupts from a Target.
2756 * When a block of MSI interrupts is allocated, this top-level handler
2757 * is not used; instead, we directly call the correct sub-handler.
2758 */
2759static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2760{
2761	struct ath10k *ar = arg;
2762	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2763	int ret;
2764
2765	if (ath10k_pci_has_device_gone(ar))
2766		return IRQ_NONE;
2767
2768	ret = ath10k_pci_force_wake(ar);
2769	if (ret) {
2770		ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
2771		return IRQ_NONE;
2772	}
2773
2774	if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
2775	    !ath10k_pci_irq_pending(ar))
2776		return IRQ_NONE;
2777
2778	ath10k_pci_disable_and_clear_legacy_irq(ar);
2779	ath10k_pci_irq_msi_fw_mask(ar);
2780	napi_schedule(&ar->napi);
2781
2782	return IRQ_HANDLED;
2783}
2784
2785static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
2786{
2787	struct ath10k *ar = container_of(ctx, struct ath10k, napi);
2788	int done = 0;
2789
2790	if (ath10k_pci_has_fw_crashed(ar)) {
2791		ath10k_pci_fw_crashed_clear(ar);
2792		ath10k_pci_fw_crashed_dump(ar);
2793		napi_complete(ctx);
2794		return done;
2795	}
2796
2797	ath10k_ce_per_engine_service_any(ar);
2798
2799	done = ath10k_htt_txrx_compl_task(ar, budget);
2800
2801	if (done < budget) {
2802		napi_complete(ctx);
2803		/* In case of MSI, it is possible that interrupts are received
2804		 * while NAPI poll is inprogress. So pending interrupts that are
2805		 * received after processing all copy engine pipes by NAPI poll
2806		 * will not be handled again. This is causing failure to
2807		 * complete boot sequence in x86 platform. So before enabling
2808		 * interrupts safer to check for pending interrupts for
2809		 * immediate servicing.
2810		 */
2811		if (CE_INTERRUPT_SUMMARY(ar)) {
2812			napi_reschedule(ctx);
2813			goto out;
2814		}
2815		ath10k_pci_enable_legacy_irq(ar);
2816		ath10k_pci_irq_msi_fw_unmask(ar);
2817	}
2818
2819out:
2820	return done;
2821}
2822
2823static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2824{
2825	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2826	int ret;
2827
2828	ret = request_irq(ar_pci->pdev->irq,
2829			  ath10k_pci_interrupt_handler,
2830			  IRQF_SHARED, "ath10k_pci", ar);
2831	if (ret) {
2832		ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
2833			    ar_pci->pdev->irq, ret);
2834		return ret;
2835	}
2836
2837	return 0;
2838}
2839
2840static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2841{
2842	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2843	int ret;
2844
2845	ret = request_irq(ar_pci->pdev->irq,
2846			  ath10k_pci_interrupt_handler,
2847			  IRQF_SHARED, "ath10k_pci", ar);
2848	if (ret) {
2849		ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
2850			    ar_pci->pdev->irq, ret);
2851		return ret;
2852	}
2853
2854	return 0;
2855}
2856
2857static int ath10k_pci_request_irq(struct ath10k *ar)
2858{
2859	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2860
2861	switch (ar_pci->oper_irq_mode) {
2862	case ATH10K_PCI_IRQ_LEGACY:
2863		return ath10k_pci_request_irq_legacy(ar);
2864	case ATH10K_PCI_IRQ_MSI:
2865		return ath10k_pci_request_irq_msi(ar);
2866	default:
2867		return -EINVAL;
2868	}
2869}
2870
2871static void ath10k_pci_free_irq(struct ath10k *ar)
2872{
2873	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2874
2875	free_irq(ar_pci->pdev->irq, ar);
2876}
2877
2878void ath10k_pci_init_napi(struct ath10k *ar)
2879{
2880	netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
2881		       ATH10K_NAPI_BUDGET);
2882}
2883
2884static int ath10k_pci_init_irq(struct ath10k *ar)
2885{
2886	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2887	int ret;
2888
2889	ath10k_pci_init_napi(ar);
2890
2891	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
2892		ath10k_info(ar, "limiting irq mode to: %d\n",
2893			    ath10k_pci_irq_mode);
2894
2895	/* Try MSI */
2896	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2897		ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
2898		ret = pci_enable_msi(ar_pci->pdev);
2899		if (ret == 0)
2900			return 0;
2901
2902		/* fall-through */
2903	}
2904
2905	/* Try legacy irq
2906	 *
2907	 * A potential race occurs here: The CORE_BASE write
2908	 * depends on target correctly decoding AXI address but
2909	 * host won't know when target writes BAR to CORE_CTRL.
2910	 * This write might get lost if target has NOT written BAR.
2911	 * For now, fix the race by repeating the write in below
2912	 * synchronization checking. */
 
2913	ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
2914
2915	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2916			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2917
2918	return 0;
2919}
2920
2921static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2922{
2923	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2924			   0);
2925}
2926
2927static int ath10k_pci_deinit_irq(struct ath10k *ar)
2928{
2929	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2930
2931	switch (ar_pci->oper_irq_mode) {
2932	case ATH10K_PCI_IRQ_LEGACY:
2933		ath10k_pci_deinit_irq_legacy(ar);
2934		break;
2935	default:
2936		pci_disable_msi(ar_pci->pdev);
2937		break;
2938	}
2939
2940	return 0;
2941}
2942
2943int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2944{
2945	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2946	unsigned long timeout;
2947	u32 val;
2948
2949	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2950
2951	timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2952
2953	do {
2954		val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2955
2956		ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2957			   val);
2958
2959		/* target should never return this */
2960		if (val == 0xffffffff)
2961			continue;
2962
2963		/* the device has crashed so don't bother trying anymore */
2964		if (val & FW_IND_EVENT_PENDING)
2965			break;
2966
2967		if (val & FW_IND_INITIALIZED)
2968			break;
2969
2970		if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
2971			/* Fix potential race by repeating CORE_BASE writes */
2972			ath10k_pci_enable_legacy_irq(ar);
2973
2974		mdelay(10);
2975	} while (time_before(jiffies, timeout));
2976
2977	ath10k_pci_disable_and_clear_legacy_irq(ar);
2978	ath10k_pci_irq_msi_fw_mask(ar);
2979
2980	if (val == 0xffffffff) {
2981		ath10k_err(ar, "failed to read device register, device is gone\n");
2982		return -EIO;
2983	}
2984
2985	if (val & FW_IND_EVENT_PENDING) {
2986		ath10k_warn(ar, "device has crashed during init\n");
2987		return -ECOMM;
2988	}
2989
2990	if (!(val & FW_IND_INITIALIZED)) {
2991		ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
2992			   val);
2993		return -ETIMEDOUT;
2994	}
2995
2996	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
2997	return 0;
2998}
2999
3000static int ath10k_pci_cold_reset(struct ath10k *ar)
3001{
3002	u32 val;
3003
3004	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
3005
3006	spin_lock_bh(&ar->data_lock);
3007
3008	ar->stats.fw_cold_reset_counter++;
3009
3010	spin_unlock_bh(&ar->data_lock);
3011
3012	/* Put Target, including PCIe, into RESET. */
3013	val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
3014	val |= 1;
3015	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3016
3017	/* After writing into SOC_GLOBAL_RESET to put device into
3018	 * reset and pulling out of reset pcie may not be stable
3019	 * for any immediate pcie register access and cause bus error,
3020	 * add delay before any pcie access request to fix this issue.
3021	 */
3022	msleep(20);
3023
3024	/* Pull Target, including PCIe, out of RESET. */
3025	val &= ~1;
3026	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3027
3028	msleep(20);
3029
3030	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
3031
3032	return 0;
3033}
3034
3035static int ath10k_pci_claim(struct ath10k *ar)
3036{
3037	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3038	struct pci_dev *pdev = ar_pci->pdev;
3039	int ret;
3040
3041	pci_set_drvdata(pdev, ar);
3042
3043	ret = pci_enable_device(pdev);
3044	if (ret) {
3045		ath10k_err(ar, "failed to enable pci device: %d\n", ret);
3046		return ret;
3047	}
3048
3049	ret = pci_request_region(pdev, BAR_NUM, "ath");
3050	if (ret) {
3051		ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
3052			   ret);
3053		goto err_device;
3054	}
3055
3056	/* Target expects 32 bit DMA. Enforce it. */
3057	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3058	if (ret) {
3059		ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
3060		goto err_region;
3061	}
3062
3063	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3064	if (ret) {
3065		ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
3066			   ret);
3067		goto err_region;
3068	}
3069
3070	pci_set_master(pdev);
3071
3072	/* Arrange for access to Target SoC registers. */
3073	ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
3074	ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
3075	if (!ar_pci->mem) {
3076		ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
3077		ret = -EIO;
3078		goto err_master;
3079	}
3080
3081	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
3082	return 0;
3083
3084err_master:
3085	pci_clear_master(pdev);
3086
3087err_region:
3088	pci_release_region(pdev, BAR_NUM);
3089
3090err_device:
3091	pci_disable_device(pdev);
3092
3093	return ret;
3094}
3095
3096static void ath10k_pci_release(struct ath10k *ar)
3097{
3098	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3099	struct pci_dev *pdev = ar_pci->pdev;
3100
3101	pci_iounmap(pdev, ar_pci->mem);
3102	pci_release_region(pdev, BAR_NUM);
3103	pci_clear_master(pdev);
3104	pci_disable_device(pdev);
3105}
3106
3107static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
3108{
3109	const struct ath10k_pci_supp_chip *supp_chip;
3110	int i;
3111	u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
3112
3113	for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
3114		supp_chip = &ath10k_pci_supp_chips[i];
3115
3116		if (supp_chip->dev_id == dev_id &&
3117		    supp_chip->rev_id == rev_id)
3118			return true;
3119	}
3120
3121	return false;
3122}
3123
3124int ath10k_pci_setup_resource(struct ath10k *ar)
3125{
3126	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
3127	int ret;
3128
3129	spin_lock_init(&ar_pci->ce_lock);
3130	spin_lock_init(&ar_pci->ps_lock);
 
 
 
 
 
 
 
 
 
 
 
3131
3132	setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
3133		    (unsigned long)ar);
 
 
 
 
 
 
 
 
 
 
 
 
 
3134
3135	if (QCA_REV_6174(ar))
3136		ath10k_pci_override_ce_config(ar);
3137
3138	ret = ath10k_pci_alloc_pipes(ar);
3139	if (ret) {
3140		ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
3141			   ret);
3142		return ret;
3143	}
3144
3145	return 0;
 
 
 
 
 
 
 
 
3146}
3147
3148void ath10k_pci_release_resource(struct ath10k *ar)
3149{
 
 
3150	ath10k_pci_rx_retry_sync(ar);
3151	netif_napi_del(&ar->napi);
3152	ath10k_pci_ce_deinit(ar);
3153	ath10k_pci_free_pipes(ar);
 
 
 
3154}
3155
3156static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
3157	.read32		= ath10k_bus_pci_read32,
3158	.write32	= ath10k_bus_pci_write32,
3159	.get_num_banks	= ath10k_pci_get_num_banks,
3160};
3161
3162static int ath10k_pci_probe(struct pci_dev *pdev,
3163			    const struct pci_device_id *pci_dev)
3164{
3165	int ret = 0;
3166	struct ath10k *ar;
3167	struct ath10k_pci *ar_pci;
3168	enum ath10k_hw_rev hw_rev;
3169	u32 chip_id;
3170	bool pci_ps;
3171	int (*pci_soft_reset)(struct ath10k *ar);
3172	int (*pci_hard_reset)(struct ath10k *ar);
 
3173
3174	switch (pci_dev->device) {
 
3175	case QCA988X_2_0_DEVICE_ID:
3176		hw_rev = ATH10K_HW_QCA988X;
3177		pci_ps = false;
 
3178		pci_soft_reset = ath10k_pci_warm_reset;
3179		pci_hard_reset = ath10k_pci_qca988x_chip_reset;
 
3180		break;
3181	case QCA9887_1_0_DEVICE_ID:
3182		hw_rev = ATH10K_HW_QCA9887;
3183		pci_ps = false;
3184		pci_soft_reset = ath10k_pci_warm_reset;
3185		pci_hard_reset = ath10k_pci_qca988x_chip_reset;
 
3186		break;
3187	case QCA6164_2_1_DEVICE_ID:
3188	case QCA6174_2_1_DEVICE_ID:
3189		hw_rev = ATH10K_HW_QCA6174;
3190		pci_ps = true;
3191		pci_soft_reset = ath10k_pci_warm_reset;
3192		pci_hard_reset = ath10k_pci_qca6174_chip_reset;
 
3193		break;
3194	case QCA99X0_2_0_DEVICE_ID:
3195		hw_rev = ATH10K_HW_QCA99X0;
3196		pci_ps = false;
3197		pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3198		pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
 
3199		break;
3200	case QCA9984_1_0_DEVICE_ID:
3201		hw_rev = ATH10K_HW_QCA9984;
3202		pci_ps = false;
3203		pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3204		pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
 
3205		break;
3206	case QCA9888_2_0_DEVICE_ID:
3207		hw_rev = ATH10K_HW_QCA9888;
3208		pci_ps = false;
3209		pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3210		pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
 
3211		break;
3212	case QCA9377_1_0_DEVICE_ID:
3213		hw_rev = ATH10K_HW_QCA9377;
3214		pci_ps = true;
3215		pci_soft_reset = NULL;
3216		pci_hard_reset = ath10k_pci_qca6174_chip_reset;
 
3217		break;
3218	default:
3219		WARN_ON(1);
3220		return -ENOTSUPP;
3221	}
3222
3223	ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3224				hw_rev, &ath10k_pci_hif_ops);
3225	if (!ar) {
3226		dev_err(&pdev->dev, "failed to allocate core\n");
3227		return -ENOMEM;
3228	}
3229
3230	ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3231		   pdev->vendor, pdev->device,
3232		   pdev->subsystem_vendor, pdev->subsystem_device);
3233
3234	ar_pci = ath10k_pci_priv(ar);
3235	ar_pci->pdev = pdev;
3236	ar_pci->dev = &pdev->dev;
3237	ar_pci->ar = ar;
3238	ar->dev_id = pci_dev->device;
3239	ar_pci->pci_ps = pci_ps;
3240	ar_pci->bus_ops = &ath10k_pci_bus_ops;
3241	ar_pci->pci_soft_reset = pci_soft_reset;
3242	ar_pci->pci_hard_reset = pci_hard_reset;
 
 
3243
3244	ar->id.vendor = pdev->vendor;
3245	ar->id.device = pdev->device;
3246	ar->id.subsystem_vendor = pdev->subsystem_vendor;
3247	ar->id.subsystem_device = pdev->subsystem_device;
3248
3249	setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
3250		    (unsigned long)ar);
3251
3252	ret = ath10k_pci_setup_resource(ar);
3253	if (ret) {
3254		ath10k_err(ar, "failed to setup resource: %d\n", ret);
3255		goto err_core_destroy;
3256	}
3257
3258	ret = ath10k_pci_claim(ar);
3259	if (ret) {
3260		ath10k_err(ar, "failed to claim device: %d\n", ret);
3261		goto err_free_pipes;
3262	}
3263
3264	ret = ath10k_pci_force_wake(ar);
3265	if (ret) {
3266		ath10k_warn(ar, "failed to wake up device : %d\n", ret);
3267		goto err_sleep;
3268	}
3269
3270	ath10k_pci_ce_deinit(ar);
3271	ath10k_pci_irq_disable(ar);
3272
3273	ret = ath10k_pci_init_irq(ar);
3274	if (ret) {
3275		ath10k_err(ar, "failed to init irqs: %d\n", ret);
3276		goto err_sleep;
3277	}
3278
3279	ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3280		    ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
3281		    ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3282
3283	ret = ath10k_pci_request_irq(ar);
3284	if (ret) {
3285		ath10k_warn(ar, "failed to request irqs: %d\n", ret);
3286		goto err_deinit_irq;
3287	}
3288
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3289	ret = ath10k_pci_chip_reset(ar);
3290	if (ret) {
3291		ath10k_err(ar, "failed to reset chip: %d\n", ret);
3292		goto err_free_irq;
3293	}
3294
3295	chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3296	if (chip_id == 0xffffffff) {
3297		ath10k_err(ar, "failed to get chip id\n");
3298		goto err_free_irq;
3299	}
3300
3301	if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
3302		ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3303			   pdev->device, chip_id);
3304		goto err_free_irq;
3305	}
3306
3307	ret = ath10k_core_register(ar, chip_id);
3308	if (ret) {
3309		ath10k_err(ar, "failed to register driver core: %d\n", ret);
3310		goto err_free_irq;
3311	}
3312
3313	return 0;
3314
 
 
 
 
3315err_free_irq:
3316	ath10k_pci_free_irq(ar);
3317	ath10k_pci_rx_retry_sync(ar);
3318
3319err_deinit_irq:
3320	ath10k_pci_deinit_irq(ar);
3321
3322err_sleep:
3323	ath10k_pci_sleep_sync(ar);
3324	ath10k_pci_release(ar);
3325
3326err_free_pipes:
3327	ath10k_pci_free_pipes(ar);
3328
3329err_core_destroy:
3330	ath10k_core_destroy(ar);
3331
3332	return ret;
3333}
3334
3335static void ath10k_pci_remove(struct pci_dev *pdev)
3336{
3337	struct ath10k *ar = pci_get_drvdata(pdev);
3338	struct ath10k_pci *ar_pci;
3339
3340	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3341
3342	if (!ar)
3343		return;
3344
3345	ar_pci = ath10k_pci_priv(ar);
3346
3347	if (!ar_pci)
3348		return;
3349
3350	ath10k_core_unregister(ar);
3351	ath10k_pci_free_irq(ar);
3352	ath10k_pci_deinit_irq(ar);
3353	ath10k_pci_release_resource(ar);
3354	ath10k_pci_sleep_sync(ar);
3355	ath10k_pci_release(ar);
3356	ath10k_core_destroy(ar);
3357}
3358
3359MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3361static struct pci_driver ath10k_pci_driver = {
3362	.name = "ath10k_pci",
3363	.id_table = ath10k_pci_id_table,
3364	.probe = ath10k_pci_probe,
3365	.remove = ath10k_pci_remove,
 
 
 
3366};
3367
3368static int __init ath10k_pci_init(void)
3369{
3370	int ret;
3371
3372	ret = pci_register_driver(&ath10k_pci_driver);
3373	if (ret)
3374		printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3375		       ret);
3376
3377	ret = ath10k_ahb_init();
3378	if (ret)
3379		printk(KERN_ERR "ahb init failed: %d\n", ret);
 
 
 
3380
3381	return ret;
 
3382}
3383module_init(ath10k_pci_init);
3384
3385static void __exit ath10k_pci_exit(void)
3386{
3387	pci_unregister_driver(&ath10k_pci_driver);
3388	ath10k_ahb_exit();
3389}
3390
3391module_exit(ath10k_pci_exit);
3392
3393MODULE_AUTHOR("Qualcomm Atheros");
3394MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
3395MODULE_LICENSE("Dual BSD/GPL");
3396
3397/* QCA988x 2.0 firmware files */
3398MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3399MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3400MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3401MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3402MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3403MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3404
3405/* QCA9887 1.0 firmware files */
3406MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3407MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
3408MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3409
3410/* QCA6174 2.1 firmware files */
3411MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3412MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3413MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3414MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3415
3416/* QCA6174 3.1 firmware files */
3417MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3418MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
 
3419MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
3420MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3421
3422/* QCA9377 1.0 firmware files */
 
3423MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3424MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);