Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Copyright (c) 2004-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
   4 * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
   5 *
   6 * Permission to use, copy, modify, and/or distribute this software for any
   7 * purpose with or without fee is hereby granted, provided that the above
   8 * copyright notice and this permission notice appear in all copies.
   9 *
  10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17 */
  18
  19#include <linux/module.h>
  20#include <linux/mmc/card.h>
  21#include <linux/mmc/mmc.h>
  22#include <linux/mmc/host.h>
  23#include <linux/mmc/sdio_func.h>
  24#include <linux/mmc/sdio_ids.h>
  25#include <linux/mmc/sdio.h>
  26#include <linux/mmc/sd.h>
  27#include <linux/bitfield.h>
  28#include "core.h"
  29#include "bmi.h"
  30#include "debug.h"
  31#include "hif.h"
  32#include "htc.h"
  33#include "targaddrs.h"
  34#include "trace.h"
  35#include "sdio.h"
  36
  37/* inlined helper functions */
  38
  39static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
  40						   size_t len)
  41{
  42	return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask);
  43}
  44
  45static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id)
  46{
  47	return (enum ath10k_htc_ep_id)pipe_id;
  48}
  49
  50static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)
  51{
  52	dev_kfree_skb(pkt->skb);
  53	pkt->skb = NULL;
  54	pkt->alloc_len = 0;
  55	pkt->act_len = 0;
  56	pkt->trailer_only = false;
  57}
  58
  59static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,
  60						size_t act_len, size_t full_len,
  61						bool part_of_bundle,
  62						bool last_in_bundle)
  63{
  64	pkt->skb = dev_alloc_skb(full_len);
  65	if (!pkt->skb)
  66		return -ENOMEM;
  67
  68	pkt->act_len = act_len;
  69	pkt->alloc_len = full_len;
  70	pkt->part_of_bundle = part_of_bundle;
  71	pkt->last_in_bundle = last_in_bundle;
  72	pkt->trailer_only = false;
  73
  74	return 0;
  75}
  76
  77static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)
  78{
  79	bool trailer_only = false;
  80	struct ath10k_htc_hdr *htc_hdr =
  81		(struct ath10k_htc_hdr *)pkt->skb->data;
  82	u16 len = __le16_to_cpu(htc_hdr->len);
  83
  84	if (len == htc_hdr->trailer_len)
  85		trailer_only = true;
  86
  87	return trailer_only;
  88}
  89
  90/* sdio/mmc functions */
  91
  92static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
  93					     unsigned int address,
  94					     unsigned char val)
  95{
  96	*arg = FIELD_PREP(BIT(31), write) |
  97	       FIELD_PREP(BIT(27), raw) |
  98	       FIELD_PREP(BIT(26), 1) |
  99	       FIELD_PREP(GENMASK(25, 9), address) |
 100	       FIELD_PREP(BIT(8), 1) |
 101	       FIELD_PREP(GENMASK(7, 0), val);
 102}
 103
 104static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
 105					   unsigned int address,
 106					   unsigned char byte)
 107{
 108	struct mmc_command io_cmd;
 109
 110	memset(&io_cmd, 0, sizeof(io_cmd));
 111	ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
 112	io_cmd.opcode = SD_IO_RW_DIRECT;
 113	io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
 114
 115	return mmc_wait_for_cmd(card->host, &io_cmd, 0);
 116}
 117
 118static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card,
 119					   unsigned int address,
 120					   unsigned char *byte)
 121{
 122	struct mmc_command io_cmd;
 123	int ret;
 124
 125	memset(&io_cmd, 0, sizeof(io_cmd));
 126	ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0);
 127	io_cmd.opcode = SD_IO_RW_DIRECT;
 128	io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
 129
 130	ret = mmc_wait_for_cmd(card->host, &io_cmd, 0);
 131	if (!ret)
 132		*byte = io_cmd.resp[0];
 133
 134	return ret;
 135}
 136
 137static int ath10k_sdio_config(struct ath10k *ar)
 138{
 139	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 140	struct sdio_func *func = ar_sdio->func;
 141	unsigned char byte, asyncintdelay = 2;
 142	int ret;
 143
 144	ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n");
 145
 146	sdio_claim_host(func);
 147
 148	byte = 0;
 149	ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
 150					      SDIO_CCCR_DRIVE_STRENGTH,
 151					      &byte);
 152
 153	byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK;
 154	byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK,
 155			   ATH10K_SDIO_DRIVE_DTSX_TYPE_D);
 156
 157	ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
 158					      SDIO_CCCR_DRIVE_STRENGTH,
 159					      byte);
 160
 161	byte = 0;
 162	ret = ath10k_sdio_func0_cmd52_rd_byte(
 163		func->card,
 164		CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
 165		&byte);
 166
 167	byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |
 168		 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |
 169		 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D);
 170
 171	ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
 172					      CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
 173					      byte);
 174	if (ret) {
 175		ath10k_warn(ar, "failed to enable driver strength: %d\n", ret);
 176		goto out;
 177	}
 178
 179	byte = 0;
 180	ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
 181					      CCCR_SDIO_IRQ_MODE_REG_SDIO3,
 182					      &byte);
 183
 184	byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3;
 185
 186	ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
 187					      CCCR_SDIO_IRQ_MODE_REG_SDIO3,
 188					      byte);
 189	if (ret) {
 190		ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n",
 191			    ret);
 192		goto out;
 193	}
 194
 195	byte = 0;
 196	ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
 197					      CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
 198					      &byte);
 199
 200	byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK;
 201	byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay);
 202
 203	ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
 204					      CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
 205					      byte);
 206
 207	/* give us some time to enable, in ms */
 208	func->enable_timeout = 100;
 209
 210	ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size);
 211	if (ret) {
 212		ath10k_warn(ar, "failed to set sdio block size to %d: %d\n",
 213			    ar_sdio->mbox_info.block_size, ret);
 214		goto out;
 215	}
 216
 217out:
 218	sdio_release_host(func);
 219	return ret;
 220}
 221
 222static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val)
 223{
 224	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 225	struct sdio_func *func = ar_sdio->func;
 226	int ret;
 227
 228	sdio_claim_host(func);
 229
 230	sdio_writel(func, val, addr, &ret);
 231	if (ret) {
 232		ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n",
 233			    val, addr, ret);
 234		goto out;
 235	}
 236
 237	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n",
 238		   addr, val);
 239
 240out:
 241	sdio_release_host(func);
 242
 243	return ret;
 244}
 245
 246static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val)
 247{
 248	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 249	struct sdio_func *func = ar_sdio->func;
 250	__le32 *buf;
 251	int ret;
 252
 253	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 254	if (!buf)
 255		return -ENOMEM;
 256
 257	*buf = cpu_to_le32(val);
 258
 259	sdio_claim_host(func);
 260
 261	ret = sdio_writesb(func, addr, buf, sizeof(*buf));
 262	if (ret) {
 263		ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n",
 264			    val, addr, ret);
 265		goto out;
 266	}
 267
 268	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n",
 269		   addr, val);
 270
 271out:
 272	sdio_release_host(func);
 273
 274	kfree(buf);
 275
 276	return ret;
 277}
 278
 279static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val)
 280{
 281	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 282	struct sdio_func *func = ar_sdio->func;
 283	int ret;
 284
 285	sdio_claim_host(func);
 286	*val = sdio_readl(func, addr, &ret);
 287	if (ret) {
 288		ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
 289			    addr, ret);
 290		goto out;
 291	}
 292
 293	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n",
 294		   addr, *val);
 295
 296out:
 297	sdio_release_host(func);
 298
 299	return ret;
 300}
 301
 302static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len)
 303{
 304	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 305	struct sdio_func *func = ar_sdio->func;
 306	int ret;
 307
 308	sdio_claim_host(func);
 309
 310	ret = sdio_memcpy_fromio(func, buf, addr, len);
 311	if (ret) {
 312		ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
 313			    addr, ret);
 314		goto out;
 315	}
 316
 317	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n",
 318		   addr, buf, len);
 319	ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len);
 320
 321out:
 322	sdio_release_host(func);
 323
 324	return ret;
 325}
 326
 327static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len)
 328{
 329	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 330	struct sdio_func *func = ar_sdio->func;
 331	int ret;
 332
 333	sdio_claim_host(func);
 334
 335	/* For some reason toio() doesn't have const for the buffer, need
 336	 * an ugly hack to workaround that.
 337	 */
 338	ret = sdio_memcpy_toio(func, addr, (void *)buf, len);
 339	if (ret) {
 340		ath10k_warn(ar, "failed to write to address 0x%x: %d\n",
 341			    addr, ret);
 342		goto out;
 343	}
 344
 345	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n",
 346		   addr, buf, len);
 347	ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len);
 348
 349out:
 350	sdio_release_host(func);
 351
 352	return ret;
 353}
 354
 355static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len)
 356{
 357	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 358	struct sdio_func *func = ar_sdio->func;
 359	int ret;
 360
 361	sdio_claim_host(func);
 362
 363	len = round_down(len, ar_sdio->mbox_info.block_size);
 364
 365	ret = sdio_readsb(func, buf, addr, len);
 366	if (ret) {
 367		ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n",
 368			    addr, ret);
 369		goto out;
 370	}
 371
 372	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n",
 373		   addr, buf, len);
 374	ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len);
 375
 376out:
 377	sdio_release_host(func);
 378
 379	return ret;
 380}
 381
 382/* HIF mbox functions */
 383
 384static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
 385					      struct ath10k_sdio_rx_data *pkt,
 386					      u32 *lookaheads,
 387					      int *n_lookaheads)
 388{
 389	struct ath10k_htc *htc = &ar->htc;
 390	struct sk_buff *skb = pkt->skb;
 391	struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;
 392	bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
 393	enum ath10k_htc_ep_id eid;
 394	u16 payload_len;
 395	u8 *trailer;
 396	int ret;
 397
 398	payload_len = le16_to_cpu(htc_hdr->len);
 399
 400	if (trailer_present) {
 401		trailer = skb->data + sizeof(*htc_hdr) +
 402			  payload_len - htc_hdr->trailer_len;
 403
 404		eid = pipe_id_to_eid(htc_hdr->eid);
 405
 406		ret = ath10k_htc_process_trailer(htc,
 407						 trailer,
 408						 htc_hdr->trailer_len,
 409						 eid,
 410						 lookaheads,
 411						 n_lookaheads);
 412		if (ret)
 413			return ret;
 414
 415		if (is_trailer_only_msg(pkt))
 416			pkt->trailer_only = true;
 417
 418		skb_trim(skb, skb->len - htc_hdr->trailer_len);
 419	}
 420
 421	skb_pull(skb, sizeof(*htc_hdr));
 422
 423	return 0;
 424}
 425
 426static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
 427					       u32 lookaheads[],
 428					       int *n_lookahead)
 429{
 430	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 431	struct ath10k_htc *htc = &ar->htc;
 432	struct ath10k_sdio_rx_data *pkt;
 433	struct ath10k_htc_ep *ep;
 434	enum ath10k_htc_ep_id id;
 435	int ret, i, *n_lookahead_local;
 436	u32 *lookaheads_local;
 437
 438	for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
 439		lookaheads_local = lookaheads;
 440		n_lookahead_local = n_lookahead;
 441
 442		id = ((struct ath10k_htc_hdr *)&lookaheads[i])->eid;
 443
 444		if (id >= ATH10K_HTC_EP_COUNT) {
 445			ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
 446				    id);
 447			ret = -ENOMEM;
 448			goto out;
 449		}
 450
 451		ep = &htc->endpoint[id];
 452
 453		if (ep->service_id == 0) {
 454			ath10k_warn(ar, "ep %d is not connected\n", id);
 455			ret = -ENOMEM;
 456			goto out;
 457		}
 458
 459		pkt = &ar_sdio->rx_pkts[i];
 460
 461		if (pkt->part_of_bundle && !pkt->last_in_bundle) {
 462			/* Only read lookahead's from RX trailers
 463			 * for the last packet in a bundle.
 464			 */
 465			lookaheads_local = NULL;
 466			n_lookahead_local = NULL;
 467		}
 468
 469		ret = ath10k_sdio_mbox_rx_process_packet(ar,
 470							 pkt,
 471							 lookaheads_local,
 472							 n_lookahead_local);
 473		if (ret)
 474			goto out;
 475
 476		if (!pkt->trailer_only)
 477			ep->ep_ops.ep_rx_complete(ar_sdio->ar, pkt->skb);
 478		else
 479			kfree_skb(pkt->skb);
 480
 481		/* The RX complete handler now owns the skb...*/
 482		pkt->skb = NULL;
 483		pkt->alloc_len = 0;
 484	}
 485
 486	ret = 0;
 487
 488out:
 489	/* Free all packets that was not passed on to the RX completion
 490	 * handler...
 491	 */
 492	for (; i < ar_sdio->n_rx_pkts; i++)
 493		ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
 494
 495	return ret;
 496}
 497
 498static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
 499					     struct ath10k_sdio_rx_data *rx_pkts,
 500					     struct ath10k_htc_hdr *htc_hdr,
 501					     size_t full_len, size_t act_len,
 502					     size_t *bndl_cnt)
 503{
 504	int ret, i;
 505
 506	*bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
 507
 508	if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE) {
 509		ath10k_warn(ar,
 510			    "HTC bundle length %u exceeds maximum %u\n",
 511			    le16_to_cpu(htc_hdr->len),
 512			    HTC_HOST_MAX_MSG_PER_BUNDLE);
 513		return -ENOMEM;
 514	}
 515
 516	/* Allocate bndl_cnt extra skb's for the bundle.
 517	 * The package containing the
 518	 * ATH10K_HTC_FLAG_BUNDLE_MASK flag is not included
 519	 * in bndl_cnt. The skb for that packet will be
 520	 * allocated separately.
 521	 */
 522	for (i = 0; i < *bndl_cnt; i++) {
 523		ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i],
 524						    act_len,
 525						    full_len,
 526						    true,
 527						    false);
 528		if (ret)
 529			return ret;
 530	}
 531
 532	return 0;
 533}
 534
 535static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
 536				     u32 lookaheads[], int n_lookaheads)
 537{
 538	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 539	struct ath10k_htc_hdr *htc_hdr;
 540	size_t full_len, act_len;
 541	bool last_in_bundle;
 542	int ret, i;
 543
 544	if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
 545		ath10k_warn(ar,
 546			    "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
 547			    n_lookaheads,
 548			    ATH10K_SDIO_MAX_RX_MSGS);
 549		ret = -ENOMEM;
 550		goto err;
 551	}
 552
 553	for (i = 0; i < n_lookaheads; i++) {
 554		htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
 555		last_in_bundle = false;
 556
 557		if (le16_to_cpu(htc_hdr->len) >
 558		    ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
 559			ath10k_warn(ar,
 560				    "payload length %d exceeds max htc length: %zu\n",
 561				    le16_to_cpu(htc_hdr->len),
 562				    ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
 563			ret = -ENOMEM;
 564			goto err;
 565		}
 566
 567		act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
 568		full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
 569
 570		if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
 571			ath10k_warn(ar,
 572				    "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
 573				    htc_hdr->eid, htc_hdr->flags,
 574				    le16_to_cpu(htc_hdr->len));
 575			ret = -EINVAL;
 576			goto err;
 577		}
 578
 579		if (htc_hdr->flags & ATH10K_HTC_FLAG_BUNDLE_MASK) {
 580			/* HTC header indicates that every packet to follow
 581			 * has the same padded length so that it can be
 582			 * optimally fetched as a full bundle.
 583			 */
 584			size_t bndl_cnt;
 585
 586			ret = ath10k_sdio_mbox_alloc_pkt_bundle(ar,
 587								&ar_sdio->rx_pkts[i],
 588								htc_hdr,
 589								full_len,
 590								act_len,
 591								&bndl_cnt);
 592
 593			n_lookaheads += bndl_cnt;
 594			i += bndl_cnt;
 595			/*Next buffer will be the last in the bundle */
 596			last_in_bundle = true;
 597		}
 598
 599		/* Allocate skb for packet. If the packet had the
 600		 * ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
 601		 * packet skb's have been allocated in the previous step.
 602		 */
 603		ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
 604						    act_len,
 605						    full_len,
 606						    last_in_bundle,
 607						    last_in_bundle);
 608	}
 609
 610	ar_sdio->n_rx_pkts = i;
 611
 612	return 0;
 613
 614err:
 615	for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) {
 616		if (!ar_sdio->rx_pkts[i].alloc_len)
 617			break;
 618		ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
 619	}
 620
 621	return ret;
 622}
 623
 624static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar,
 625				      struct ath10k_sdio_rx_data *pkt)
 626{
 627	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 628	struct sk_buff *skb = pkt->skb;
 629	int ret;
 630
 631	ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
 632				 skb->data, pkt->alloc_len);
 633	pkt->status = ret;
 634	if (!ret)
 635		skb_put(skb, pkt->act_len);
 636
 637	return ret;
 638}
 639
 640static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
 641{
 642	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 643	int ret, i;
 644
 645	for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
 646		ret = ath10k_sdio_mbox_rx_packet(ar,
 647						 &ar_sdio->rx_pkts[i]);
 648		if (ret)
 649			goto err;
 650	}
 651
 652	return 0;
 653
 654err:
 655	/* Free all packets that was not successfully fetched. */
 656	for (; i < ar_sdio->n_rx_pkts; i++)
 657		ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
 658
 659	return ret;
 660}
 661
 662/* This is the timeout for mailbox processing done in the sdio irq
 663 * handler. The timeout is deliberately set quite high since SDIO dump logs
 664 * over serial port can/will add a substantial overhead to the processing
 665 * (if enabled).
 666 */
 667#define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ)
 668
 669static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
 670						  u32 msg_lookahead, bool *done)
 671{
 672	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 673	u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS];
 674	int n_lookaheads = 1;
 675	unsigned long timeout;
 676	int ret;
 677
 678	*done = true;
 679
 680	/* Copy the lookahead obtained from the HTC register table into our
 681	 * temp array as a start value.
 682	 */
 683	lookaheads[0] = msg_lookahead;
 684
 685	timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;
 686	do {
 687		/* Try to allocate as many HTC RX packets indicated by
 688		 * n_lookaheads.
 689		 */
 690		ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads,
 691						n_lookaheads);
 692		if (ret)
 693			break;
 694
 695		if (ar_sdio->n_rx_pkts >= 2)
 696			/* A recv bundle was detected, force IRQ status
 697			 * re-check again.
 698			 */
 699			*done = false;
 700
 701		ret = ath10k_sdio_mbox_rx_fetch(ar);
 702
 703		/* Process fetched packets. This will potentially update
 704		 * n_lookaheads depending on if the packets contain lookahead
 705		 * reports.
 706		 */
 707		n_lookaheads = 0;
 708		ret = ath10k_sdio_mbox_rx_process_packets(ar,
 709							  lookaheads,
 710							  &n_lookaheads);
 711
 712		if (!n_lookaheads || ret)
 713			break;
 714
 715		/* For SYNCH processing, if we get here, we are running
 716		 * through the loop again due to updated lookaheads. Set
 717		 * flag that we should re-check IRQ status registers again
 718		 * before leaving IRQ processing, this can net better
 719		 * performance in high throughput situations.
 720		 */
 721		*done = false;
 722	} while (time_before(jiffies, timeout));
 723
 724	if (ret && (ret != -ECANCELED))
 725		ath10k_warn(ar, "failed to get pending recv messages: %d\n",
 726			    ret);
 727
 728	return ret;
 729}
 730
 731static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar)
 732{
 733	u32 val;
 734	int ret;
 735
 736	/* TODO: Add firmware crash handling */
 737	ath10k_warn(ar, "firmware crashed\n");
 738
 739	/* read counter to clear the interrupt, the debug error interrupt is
 740	 * counter 0.
 741	 */
 742	ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val);
 743	if (ret)
 744		ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret);
 745
 746	return ret;
 747}
 748
 749static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar)
 750{
 751	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 752	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
 753	u8 counter_int_status;
 754	int ret;
 755
 756	mutex_lock(&irq_data->mtx);
 757	counter_int_status = irq_data->irq_proc_reg->counter_int_status &
 758			     irq_data->irq_en_reg->cntr_int_status_en;
 759
 760	/* NOTE: other modules like GMBOX may use the counter interrupt for
 761	 * credit flow control on other counters, we only need to check for
 762	 * the debug assertion counter interrupt.
 763	 */
 764	if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK)
 765		ret = ath10k_sdio_mbox_proc_dbg_intr(ar);
 766	else
 767		ret = 0;
 768
 769	mutex_unlock(&irq_data->mtx);
 770
 771	return ret;
 772}
 773
 774static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar)
 775{
 776	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 777	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
 778	u8 error_int_status;
 779	int ret;
 780
 781	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n");
 782
 783	error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F;
 784	if (!error_int_status) {
 785		ath10k_warn(ar, "invalid error interrupt status: 0x%x\n",
 786			    error_int_status);
 787		return -EIO;
 788	}
 789
 790	ath10k_dbg(ar, ATH10K_DBG_SDIO,
 791		   "sdio error_int_status 0x%x\n", error_int_status);
 792
 793	if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK,
 794		      error_int_status))
 795		ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n");
 796
 797	if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
 798		      error_int_status))
 799		ath10k_warn(ar, "rx underflow interrupt error\n");
 800
 801	if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
 802		      error_int_status))
 803		ath10k_warn(ar, "tx overflow interrupt error\n");
 804
 805	/* Clear the interrupt */
 806	irq_data->irq_proc_reg->error_int_status &= ~error_int_status;
 807
 808	/* set W1C value to clear the interrupt, this hits the register first */
 809	ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS,
 810				    error_int_status);
 811	if (ret) {
 812		ath10k_warn(ar, "unable to write to error int status address: %d\n",
 813			    ret);
 814		return ret;
 815	}
 816
 817	return 0;
 818}
 819
 820static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
 821{
 822	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 823	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
 824	u8 cpu_int_status;
 825	int ret;
 826
 827	mutex_lock(&irq_data->mtx);
 828	cpu_int_status = irq_data->irq_proc_reg->cpu_int_status &
 829			 irq_data->irq_en_reg->cpu_int_status_en;
 830	if (!cpu_int_status) {
 831		ath10k_warn(ar, "CPU interrupt status is zero\n");
 832		ret = -EIO;
 833		goto out;
 834	}
 835
 836	/* Clear the interrupt */
 837	irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status;
 838
 839	/* Set up the register transfer buffer to hit the register 4 times,
 840	 * this is done to make the access 4-byte aligned to mitigate issues
 841	 * with host bus interconnects that restrict bus transfer lengths to
 842	 * be a multiple of 4-bytes.
 843	 *
 844	 * Set W1C value to clear the interrupt, this hits the register first.
 845	 */
 846	ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS,
 847				    cpu_int_status);
 848	if (ret) {
 849		ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n",
 850			    ret);
 851		goto out;
 852	}
 853
 854out:
 855	mutex_unlock(&irq_data->mtx);
 856	return ret;
 857}
 858
 859static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,
 860					    u8 *host_int_status,
 861					    u32 *lookahead)
 862{
 863	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 864	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
 865	struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg;
 866	struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg;
 867	u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1);
 868	int ret;
 869
 870	mutex_lock(&irq_data->mtx);
 871
 872	*lookahead = 0;
 873	*host_int_status = 0;
 874
 875	/* int_status_en is supposed to be non zero, otherwise interrupts
 876	 * shouldn't be enabled. There is however a short time frame during
 877	 * initialization between the irq register and int_status_en init
 878	 * where this can happen.
 879	 * We silently ignore this condition.
 880	 */
 881	if (!irq_en_reg->int_status_en) {
 882		ret = 0;
 883		goto out;
 884	}
 885
 886	/* Read the first sizeof(struct ath10k_irq_proc_registers)
 887	 * bytes of the HTC register table. This
 888	 * will yield us the value of different int status
 889	 * registers and the lookahead registers.
 890	 */
 891	ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
 892			       irq_proc_reg, sizeof(*irq_proc_reg));
 893	if (ret)
 894		goto out;
 895
 896	/* Update only those registers that are enabled */
 897	*host_int_status = irq_proc_reg->host_int_status &
 898			   irq_en_reg->int_status_en;
 899
 900	/* Look at mbox status */
 901	if (!(*host_int_status & htc_mbox)) {
 902		*lookahead = 0;
 903		ret = 0;
 904		goto out;
 905	}
 906
 907	/* Mask out pending mbox value, we use look ahead as
 908	 * the real flag for mbox processing.
 909	 */
 910	*host_int_status &= ~htc_mbox;
 911	if (irq_proc_reg->rx_lookahead_valid & htc_mbox) {
 912		*lookahead = le32_to_cpu(
 913			irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]);
 914		if (!*lookahead)
 915			ath10k_warn(ar, "sdio mbox lookahead is zero\n");
 916	}
 917
 918out:
 919	mutex_unlock(&irq_data->mtx);
 920	return ret;
 921}
 922
 923static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
 924					      bool *done)
 925{
 926	u8 host_int_status;
 927	u32 lookahead;
 928	int ret;
 929
 930	/* NOTE: HIF implementation guarantees that the context of this
 931	 * call allows us to perform SYNCHRONOUS I/O, that is we can block,
 932	 * sleep or call any API that can block or switch thread/task
 933	 * contexts. This is a fully schedulable context.
 934	 */
 935
 936	ret = ath10k_sdio_mbox_read_int_status(ar,
 937					       &host_int_status,
 938					       &lookahead);
 939	if (ret) {
 940		*done = true;
 941		goto out;
 942	}
 943
 944	if (!host_int_status && !lookahead) {
 945		ret = 0;
 946		*done = true;
 947		goto out;
 948	}
 949
 950	if (lookahead) {
 951		ath10k_dbg(ar, ATH10K_DBG_SDIO,
 952			   "sdio pending mailbox msg lookahead 0x%08x\n",
 953			   lookahead);
 954
 955		ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar,
 956							     lookahead,
 957							     done);
 958		if (ret)
 959			goto out;
 960	}
 961
 962	/* now, handle the rest of the interrupts */
 963	ath10k_dbg(ar, ATH10K_DBG_SDIO,
 964		   "sdio host_int_status 0x%x\n", host_int_status);
 965
 966	if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) {
 967		/* CPU Interrupt */
 968		ret = ath10k_sdio_mbox_proc_cpu_intr(ar);
 969		if (ret)
 970			goto out;
 971	}
 972
 973	if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) {
 974		/* Error Interrupt */
 975		ret = ath10k_sdio_mbox_proc_err_intr(ar);
 976		if (ret)
 977			goto out;
 978	}
 979
 980	if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status))
 981		/* Counter Interrupt */
 982		ret = ath10k_sdio_mbox_proc_counter_intr(ar);
 983
 984	ret = 0;
 985
 986out:
 987	/* An optimization to bypass reading the IRQ status registers
 988	 * unecessarily which can re-wake the target, if upper layers
 989	 * determine that we are in a low-throughput mode, we can rely on
 990	 * taking another interrupt rather than re-checking the status
 991	 * registers which can re-wake the target.
 992	 *
 993	 * NOTE : for host interfaces that makes use of detecting pending
 994	 * mbox messages at hif can not use this optimization due to
 995	 * possible side effects, SPI requires the host to drain all
 996	 * messages from the mailbox before exiting the ISR routine.
 997	 */
 998
 999	ath10k_dbg(ar, ATH10K_DBG_SDIO,
1000		   "sdio pending irqs done %d status %d",
1001		   *done, ret);
1002
1003	return ret;
1004}
1005
1006static void ath10k_sdio_set_mbox_info(struct ath10k *ar)
1007{
1008	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1009	struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
1010	u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev;
1011
1012	mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR;
1013	mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE;
1014	mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1;
1015	mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR;
1016	mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH;
1017
1018	mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;
1019
1020	dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, device);
1021	dev_id_chiprev = FIELD_GET(QCA_MANUFACTURER_ID_REV_MASK, device);
1022	switch (dev_id_base) {
1023	case QCA_MANUFACTURER_ID_AR6005_BASE:
1024		if (dev_id_chiprev < 4)
1025			mbox_info->ext_info[0].htc_ext_sz =
1026				ATH10K_HIF_MBOX0_EXT_WIDTH;
1027		else
1028			/* from QCA6174 2.0(0x504), the width has been extended
1029			 * to 56K
1030			 */
1031			mbox_info->ext_info[0].htc_ext_sz =
1032				ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1033		break;
1034	case QCA_MANUFACTURER_ID_QCA9377_BASE:
1035		mbox_info->ext_info[0].htc_ext_sz =
1036			ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1037		break;
1038	default:
1039		mbox_info->ext_info[0].htc_ext_sz =
1040				ATH10K_HIF_MBOX0_EXT_WIDTH;
1041	}
1042
1043	mbox_info->ext_info[1].htc_ext_addr =
1044		mbox_info->ext_info[0].htc_ext_addr +
1045		mbox_info->ext_info[0].htc_ext_sz +
1046		ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE;
1047	mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH;
1048}
1049
1050/* BMI functions */
1051
1052static int ath10k_sdio_bmi_credits(struct ath10k *ar)
1053{
1054	u32 addr, cmd_credits;
1055	unsigned long timeout;
1056	int ret;
1057
1058	/* Read the counter register to get the command credits */
1059	addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4;
1060	timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1061	cmd_credits = 0;
1062
1063	while (time_before(jiffies, timeout) && !cmd_credits) {
1064		/* Hit the credit counter with a 4-byte access, the first byte
1065		 * read will hit the counter and cause a decrement, while the
1066		 * remaining 3 bytes has no effect. The rationale behind this
1067		 * is to make all HIF accesses 4-byte aligned.
1068		 */
1069		ret = ath10k_sdio_read32(ar, addr, &cmd_credits);
1070		if (ret) {
1071			ath10k_warn(ar,
1072				    "unable to decrement the command credit count register: %d\n",
1073				    ret);
1074			return ret;
1075		}
1076
1077		/* The counter is only 8 bits.
1078		 * Ignore anything in the upper 3 bytes
1079		 */
1080		cmd_credits &= 0xFF;
1081	}
1082
1083	if (!cmd_credits) {
1084		ath10k_warn(ar, "bmi communication timeout\n");
1085		return -ETIMEDOUT;
1086	}
1087
1088	return 0;
1089}
1090
1091static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar)
1092{
1093	unsigned long timeout;
1094	u32 rx_word;
1095	int ret;
1096
1097	timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1098	rx_word = 0;
1099
1100	while ((time_before(jiffies, timeout)) && !rx_word) {
1101		ret = ath10k_sdio_read32(ar,
1102					 MBOX_HOST_INT_STATUS_ADDRESS,
1103					 &rx_word);
1104		if (ret) {
1105			ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret);
1106			return ret;
1107		}
1108
1109		 /* all we really want is one bit */
1110		rx_word &= 1;
1111	}
1112
1113	if (!rx_word) {
1114		ath10k_warn(ar, "bmi_recv_buf FIFO empty\n");
1115		return -EINVAL;
1116	}
1117
1118	return ret;
1119}
1120
1121static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,
1122					void *req, u32 req_len,
1123					void *resp, u32 *resp_len)
1124{
1125	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1126	u32 addr;
1127	int ret;
1128
1129	if (req) {
1130		ret = ath10k_sdio_bmi_credits(ar);
1131		if (ret)
1132			return ret;
1133
1134		addr = ar_sdio->mbox_info.htc_addr;
1135
1136		memcpy(ar_sdio->bmi_buf, req, req_len);
1137		ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len);
1138		if (ret) {
1139			ath10k_warn(ar,
1140				    "unable to send the bmi data to the device: %d\n",
1141				    ret);
1142			return ret;
1143		}
1144	}
1145
1146	if (!resp || !resp_len)
1147		/* No response expected */
1148		return 0;
1149
1150	/* During normal bootup, small reads may be required.
1151	 * Rather than issue an HIF Read and then wait as the Target
1152	 * adds successive bytes to the FIFO, we wait here until
1153	 * we know that response data is available.
1154	 *
1155	 * This allows us to cleanly timeout on an unexpected
1156	 * Target failure rather than risk problems at the HIF level.
1157	 * In particular, this avoids SDIO timeouts and possibly garbage
1158	 * data on some host controllers.  And on an interconnect
1159	 * such as Compact Flash (as well as some SDIO masters) which
1160	 * does not provide any indication on data timeout, it avoids
1161	 * a potential hang or garbage response.
1162	 *
1163	 * Synchronization is more difficult for reads larger than the
1164	 * size of the MBOX FIFO (128B), because the Target is unable
1165	 * to push the 129th byte of data until AFTER the Host posts an
1166	 * HIF Read and removes some FIFO data.  So for large reads the
1167	 * Host proceeds to post an HIF Read BEFORE all the data is
1168	 * actually available to read.  Fortunately, large BMI reads do
1169	 * not occur in practice -- they're supported for debug/development.
1170	 *
1171	 * So Host/Target BMI synchronization is divided into these cases:
1172	 *  CASE 1: length < 4
1173	 *        Should not happen
1174	 *
1175	 *  CASE 2: 4 <= length <= 128
1176	 *        Wait for first 4 bytes to be in FIFO
1177	 *        If CONSERVATIVE_BMI_READ is enabled, also wait for
1178	 *        a BMI command credit, which indicates that the ENTIRE
1179	 *        response is available in the the FIFO
1180	 *
1181	 *  CASE 3: length > 128
1182	 *        Wait for the first 4 bytes to be in FIFO
1183	 *
1184	 * For most uses, a small timeout should be sufficient and we will
1185	 * usually see a response quickly; but there may be some unusual
1186	 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
1187	 * For now, we use an unbounded busy loop while waiting for
1188	 * BMI_EXECUTE.
1189	 *
1190	 * If BMI_EXECUTE ever needs to support longer-latency execution,
1191	 * especially in production, this code needs to be enhanced to sleep
1192	 * and yield.  Also note that BMI_COMMUNICATION_TIMEOUT is currently
1193	 * a function of Host processor speed.
1194	 */
1195	ret = ath10k_sdio_bmi_get_rx_lookahead(ar);
1196	if (ret)
1197		return ret;
1198
1199	/* We always read from the start of the mbox address */
1200	addr = ar_sdio->mbox_info.htc_addr;
1201	ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len);
1202	if (ret) {
1203		ath10k_warn(ar,
1204			    "unable to read the bmi data from the device: %d\n",
1205			    ret);
1206		return ret;
1207	}
1208
1209	memcpy(resp, ar_sdio->bmi_buf, *resp_len);
1210
1211	return 0;
1212}
1213
1214/* sdio async handling functions */
1215
1216static struct ath10k_sdio_bus_request
1217*ath10k_sdio_alloc_busreq(struct ath10k *ar)
1218{
1219	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1220	struct ath10k_sdio_bus_request *bus_req;
1221
1222	spin_lock_bh(&ar_sdio->lock);
1223
1224	if (list_empty(&ar_sdio->bus_req_freeq)) {
1225		bus_req = NULL;
1226		goto out;
1227	}
1228
1229	bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
1230				   struct ath10k_sdio_bus_request, list);
1231	list_del(&bus_req->list);
1232
1233out:
1234	spin_unlock_bh(&ar_sdio->lock);
1235	return bus_req;
1236}
1237
1238static void ath10k_sdio_free_bus_req(struct ath10k *ar,
1239				     struct ath10k_sdio_bus_request *bus_req)
1240{
1241	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1242
1243	memset(bus_req, 0, sizeof(*bus_req));
1244
1245	spin_lock_bh(&ar_sdio->lock);
1246	list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
1247	spin_unlock_bh(&ar_sdio->lock);
1248}
1249
1250static void __ath10k_sdio_write_async(struct ath10k *ar,
1251				      struct ath10k_sdio_bus_request *req)
1252{
1253	struct ath10k_htc_ep *ep;
1254	struct sk_buff *skb;
1255	int ret;
1256
1257	skb = req->skb;
1258	ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);
1259	if (ret)
1260		ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d",
1261			    req->address, ret);
1262
1263	if (req->htc_msg) {
1264		ep = &ar->htc.endpoint[req->eid];
1265		ath10k_htc_notify_tx_completion(ep, skb);
1266	} else if (req->comp) {
1267		complete(req->comp);
1268	}
1269
1270	ath10k_sdio_free_bus_req(ar, req);
1271}
1272
1273static void ath10k_sdio_write_async_work(struct work_struct *work)
1274{
1275	struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
1276						   wr_async_work);
1277	struct ath10k *ar = ar_sdio->ar;
1278	struct ath10k_sdio_bus_request *req, *tmp_req;
1279
1280	spin_lock_bh(&ar_sdio->wr_async_lock);
1281
1282	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1283		list_del(&req->list);
1284		spin_unlock_bh(&ar_sdio->wr_async_lock);
1285		__ath10k_sdio_write_async(ar, req);
1286		spin_lock_bh(&ar_sdio->wr_async_lock);
1287	}
1288
1289	spin_unlock_bh(&ar_sdio->wr_async_lock);
1290}
1291
1292static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
1293				      struct sk_buff *skb,
1294				      struct completion *comp,
1295				      bool htc_msg, enum ath10k_htc_ep_id eid)
1296{
1297	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1298	struct ath10k_sdio_bus_request *bus_req;
1299
1300	/* Allocate a bus request for the message and queue it on the
1301	 * SDIO workqueue.
1302	 */
1303	bus_req = ath10k_sdio_alloc_busreq(ar);
1304	if (!bus_req) {
1305		ath10k_warn(ar,
1306			    "unable to allocate bus request for async request\n");
1307		return -ENOMEM;
1308	}
1309
1310	bus_req->skb = skb;
1311	bus_req->eid = eid;
1312	bus_req->address = addr;
1313	bus_req->htc_msg = htc_msg;
1314	bus_req->comp = comp;
1315
1316	spin_lock_bh(&ar_sdio->wr_async_lock);
1317	list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
1318	spin_unlock_bh(&ar_sdio->wr_async_lock);
1319
1320	return 0;
1321}
1322
1323/* IRQ handler */
1324
1325static void ath10k_sdio_irq_handler(struct sdio_func *func)
1326{
1327	struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
1328	struct ath10k *ar = ar_sdio->ar;
1329	unsigned long timeout;
1330	bool done = false;
1331	int ret;
1332
1333	/* Release the host during interrupts so we can pick it back up when
1334	 * we process commands.
1335	 */
1336	sdio_release_host(ar_sdio->func);
1337
1338	timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;
1339	do {
1340		ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);
1341		if (ret)
1342			break;
1343	} while (time_before(jiffies, timeout) && !done);
1344
1345	sdio_claim_host(ar_sdio->func);
1346
1347	if (ret && ret != -ECANCELED)
1348		ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n",
1349			    ret);
1350}
1351
1352/* sdio HIF functions */
1353
1354static int ath10k_sdio_hif_disable_intrs(struct ath10k *ar)
1355{
1356	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1357	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1358	struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1359	int ret;
1360
1361	mutex_lock(&irq_data->mtx);
1362
1363	memset(regs, 0, sizeof(*regs));
1364	ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1365				&regs->int_status_en, sizeof(*regs));
1366	if (ret)
1367		ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret);
1368
1369	mutex_unlock(&irq_data->mtx);
1370
1371	return ret;
1372}
1373
1374static int ath10k_sdio_hif_power_up(struct ath10k *ar)
1375{
1376	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1377	struct sdio_func *func = ar_sdio->func;
1378	int ret;
1379
1380	if (!ar_sdio->is_disabled)
1381		return 0;
1382
1383	ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
1384
1385	sdio_claim_host(func);
1386
1387	ret = sdio_enable_func(func);
1388	if (ret) {
1389		ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret);
1390		sdio_release_host(func);
1391		return ret;
1392	}
1393
1394	sdio_release_host(func);
1395
1396	/* Wait for hardware to initialise. It should take a lot less than
1397	 * 20 ms but let's be conservative here.
1398	 */
1399	msleep(20);
1400
1401	ar_sdio->is_disabled = false;
1402
1403	ret = ath10k_sdio_hif_disable_intrs(ar);
1404	if (ret)
1405		return ret;
1406
1407	return 0;
1408}
1409
1410static void ath10k_sdio_hif_power_down(struct ath10k *ar)
1411{
1412	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1413	int ret;
1414
1415	if (ar_sdio->is_disabled)
1416		return;
1417
1418	ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
1419
1420	/* Disable the card */
1421	sdio_claim_host(ar_sdio->func);
1422	ret = sdio_disable_func(ar_sdio->func);
1423	sdio_release_host(ar_sdio->func);
1424
1425	if (ret)
1426		ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
1427
1428	ar_sdio->is_disabled = true;
1429}
1430
1431static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1432				 struct ath10k_hif_sg_item *items, int n_items)
1433{
1434	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1435	enum ath10k_htc_ep_id eid;
1436	struct sk_buff *skb;
1437	int ret, i;
1438
1439	eid = pipe_id_to_eid(pipe_id);
1440
1441	for (i = 0; i < n_items; i++) {
1442		size_t padded_len;
1443		u32 address;
1444
1445		skb = items[i].transfer_context;
1446		padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio,
1447							      skb->len);
1448		skb_trim(skb, padded_len);
1449
1450		/* Write TX data to the end of the mbox address space */
1451		address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] -
1452			  skb->len;
1453		ret = ath10k_sdio_prep_async_req(ar, address, skb,
1454						 NULL, true, eid);
1455		if (ret)
1456			return ret;
1457	}
1458
1459	queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1460
1461	return 0;
1462}
1463
1464static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar)
1465{
1466	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1467	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1468	struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1469	int ret;
1470
1471	mutex_lock(&irq_data->mtx);
1472
1473	/* Enable all but CPU interrupts */
1474	regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) |
1475			      FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) |
1476			      FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1);
1477
1478	/* NOTE: There are some cases where HIF can do detection of
1479	 * pending mbox messages which is disabled now.
1480	 */
1481	regs->int_status_en |=
1482		FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);
1483
1484	/* Set up the CPU Interrupt status Register */
1485	regs->cpu_int_status_en = 0;
1486
1487	/* Set up the Error Interrupt status Register */
1488	regs->err_int_status_en =
1489		FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) |
1490		FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1);
1491
1492	/* Enable Counter interrupt status register to get fatal errors for
1493	 * debugging.
1494	 */
1495	regs->cntr_int_status_en =
1496		FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
1497			   ATH10K_SDIO_TARGET_DEBUG_INTR_MASK);
1498
1499	ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1500				&regs->int_status_en, sizeof(*regs));
1501	if (ret)
1502		ath10k_warn(ar,
1503			    "failed to update mbox interrupt status register : %d\n",
1504			    ret);
1505
1506	mutex_unlock(&irq_data->mtx);
1507	return ret;
1508}
1509
1510static int ath10k_sdio_hif_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
1511{
1512	u32 val;
1513	int ret;
1514
1515	ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
1516	if (ret) {
1517		ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
1518			    ret);
1519		return ret;
1520	}
1521
1522	if (enable_sleep)
1523		val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
1524	else
1525		val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
1526
1527	ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
1528	if (ret) {
1529		ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
1530			    ret);
1531		return ret;
1532	}
1533
1534	return 0;
1535}
1536
1537/* HIF diagnostics */
1538
1539static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1540				     size_t buf_len)
1541{
1542	int ret;
1543
1544	/* set window register to start read cycle */
1545	ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
1546	if (ret) {
1547		ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
1548		return ret;
1549	}
1550
1551	/* read the data */
1552	ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len);
1553	if (ret) {
1554		ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
1555			    ret);
1556		return ret;
1557	}
1558
1559	return 0;
1560}
1561
1562static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
1563				       u32 *value)
1564{
1565	__le32 *val;
1566	int ret;
1567
1568	val = kzalloc(sizeof(*val), GFP_KERNEL);
1569	if (!val)
1570		return -ENOMEM;
1571
1572	ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val));
1573	if (ret)
1574		goto out;
1575
1576	*value = __le32_to_cpu(*val);
1577
1578out:
1579	kfree(val);
1580
1581	return ret;
1582}
1583
1584static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
1585					  const void *data, int nbytes)
1586{
1587	int ret;
1588
1589	/* set write data */
1590	ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);
1591	if (ret) {
1592		ath10k_warn(ar,
1593			    "failed to write 0x%p to mbox window data address: %d\n",
1594			    data, ret);
1595		return ret;
1596	}
1597
1598	/* set window register, which starts the write cycle */
1599	ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address);
1600	if (ret) {
1601		ath10k_warn(ar, "failed to set mbox window write address: %d", ret);
1602		return ret;
1603	}
1604
1605	return 0;
1606}
1607
1608/* HIF start/stop */
1609
1610static int ath10k_sdio_hif_start(struct ath10k *ar)
1611{
1612	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1613	u32 addr, val;
1614	int ret;
1615
1616	/* Sleep 20 ms before HIF interrupts are disabled.
1617	 * This will give target plenty of time to process the BMI done
1618	 * request before interrupts are disabled.
1619	 */
1620	msleep(20);
1621	ret = ath10k_sdio_hif_disable_intrs(ar);
1622	if (ret)
1623		return ret;
1624
1625	/* eid 0 always uses the lower part of the extended mailbox address
1626	 * space (ext_info[0].htc_ext_addr).
1627	 */
1628	ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1629	ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1630
1631	sdio_claim_host(ar_sdio->func);
1632
1633	/* Register the isr */
1634	ret =  sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler);
1635	if (ret) {
1636		ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret);
1637		sdio_release_host(ar_sdio->func);
1638		return ret;
1639	}
1640
1641	sdio_release_host(ar_sdio->func);
1642
1643	ret = ath10k_sdio_hif_enable_intrs(ar);
1644	if (ret)
1645		ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
1646
1647	addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1648
1649	ret = ath10k_sdio_hif_diag_read32(ar, addr, &val);
1650	if (ret) {
1651		ath10k_warn(ar, "unable to read hi_acs_flags address: %d\n", ret);
1652		return ret;
1653	}
1654
1655	if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
1656		ath10k_dbg(ar, ATH10K_DBG_SDIO,
1657			   "sdio mailbox swap service enabled\n");
1658		ar_sdio->swap_mbox = true;
1659	}
1660
1661	/* Enable sleep and then disable it again */
1662	ret = ath10k_sdio_hif_set_mbox_sleep(ar, true);
1663	if (ret)
1664		return ret;
1665
1666	/* Wait for 20ms for the written value to take effect */
1667	msleep(20);
1668
1669	ret = ath10k_sdio_hif_set_mbox_sleep(ar, false);
1670	if (ret)
1671		return ret;
1672
1673	return 0;
1674}
1675
1676#define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ)
1677
1678static void ath10k_sdio_irq_disable(struct ath10k *ar)
1679{
1680	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1681	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1682	struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1683	struct sk_buff *skb;
1684	struct completion irqs_disabled_comp;
1685	int ret;
1686
1687	skb = dev_alloc_skb(sizeof(*regs));
1688	if (!skb)
1689		return;
1690
1691	mutex_lock(&irq_data->mtx);
1692
1693	memset(regs, 0, sizeof(*regs)); /* disable all interrupts */
1694	memcpy(skb->data, regs, sizeof(*regs));
1695	skb_put(skb, sizeof(*regs));
1696
1697	mutex_unlock(&irq_data->mtx);
1698
1699	init_completion(&irqs_disabled_comp);
1700	ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1701					 skb, &irqs_disabled_comp, false, 0);
1702	if (ret)
1703		goto out;
1704
1705	queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1706
1707	/* Wait for the completion of the IRQ disable request.
1708	 * If there is a timeout we will try to disable irq's anyway.
1709	 */
1710	ret = wait_for_completion_timeout(&irqs_disabled_comp,
1711					  SDIO_IRQ_DISABLE_TIMEOUT_HZ);
1712	if (!ret)
1713		ath10k_warn(ar, "sdio irq disable request timed out\n");
1714
1715	sdio_claim_host(ar_sdio->func);
1716
1717	ret = sdio_release_irq(ar_sdio->func);
1718	if (ret)
1719		ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret);
1720
1721	sdio_release_host(ar_sdio->func);
1722
1723out:
1724	kfree_skb(skb);
1725}
1726
1727static void ath10k_sdio_hif_stop(struct ath10k *ar)
1728{
1729	struct ath10k_sdio_bus_request *req, *tmp_req;
1730	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1731
1732	ath10k_sdio_irq_disable(ar);
1733
1734	cancel_work_sync(&ar_sdio->wr_async_work);
1735
1736	spin_lock_bh(&ar_sdio->wr_async_lock);
1737
1738	/* Free all bus requests that have not been handled */
1739	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1740		struct ath10k_htc_ep *ep;
1741
1742		list_del(&req->list);
1743
1744		if (req->htc_msg) {
1745			ep = &ar->htc.endpoint[req->eid];
1746			ath10k_htc_notify_tx_completion(ep, req->skb);
1747		} else if (req->skb) {
1748			kfree_skb(req->skb);
1749		}
1750		ath10k_sdio_free_bus_req(ar, req);
1751	}
1752
1753	spin_unlock_bh(&ar_sdio->wr_async_lock);
1754}
1755
1756#ifdef CONFIG_PM
1757
1758static int ath10k_sdio_hif_suspend(struct ath10k *ar)
1759{
1760	return -EOPNOTSUPP;
1761}
1762
1763static int ath10k_sdio_hif_resume(struct ath10k *ar)
1764{
1765	switch (ar->state) {
1766	case ATH10K_STATE_OFF:
1767		ath10k_dbg(ar, ATH10K_DBG_SDIO,
1768			   "sdio resume configuring sdio\n");
1769
1770		/* need to set sdio settings after power is cut from sdio */
1771		ath10k_sdio_config(ar);
1772		break;
1773
1774	case ATH10K_STATE_ON:
1775	default:
1776		break;
1777	}
1778
1779	return 0;
1780}
1781#endif
1782
1783static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar,
1784					       u16 service_id,
1785					       u8 *ul_pipe, u8 *dl_pipe)
1786{
1787	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1788	struct ath10k_htc *htc = &ar->htc;
1789	u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size;
1790	enum ath10k_htc_ep_id eid;
1791	bool ep_found = false;
1792	int i;
1793
1794	/* For sdio, we are interested in the mapping between eid
1795	 * and pipeid rather than service_id to pipe_id.
1796	 * First we find out which eid has been allocated to the
1797	 * service...
1798	 */
1799	for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
1800		if (htc->endpoint[i].service_id == service_id) {
1801			eid = htc->endpoint[i].eid;
1802			ep_found = true;
1803			break;
1804		}
1805	}
1806
1807	if (!ep_found)
1808		return -EINVAL;
1809
1810	/* Then we create the simplest mapping possible between pipeid
1811	 * and eid
1812	 */
1813	*ul_pipe = *dl_pipe = (u8)eid;
1814
1815	/* Normally, HTT will use the upper part of the extended
1816	 * mailbox address space (ext_info[1].htc_ext_addr) and WMI ctrl
1817	 * the lower part (ext_info[0].htc_ext_addr).
1818	 * If fw wants swapping of mailbox addresses, the opposite is true.
1819	 */
1820	if (ar_sdio->swap_mbox) {
1821		htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1822		wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
1823		htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1824		wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
1825	} else {
1826		htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
1827		wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1828		htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
1829		wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1830	}
1831
1832	switch (service_id) {
1833	case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1834		/* HTC ctrl ep mbox address has already been setup in
1835		 * ath10k_sdio_hif_start
1836		 */
1837		break;
1838	case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1839		ar_sdio->mbox_addr[eid] = wmi_addr;
1840		ar_sdio->mbox_size[eid] = wmi_mbox_size;
1841		ath10k_dbg(ar, ATH10K_DBG_SDIO,
1842			   "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n",
1843			   ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
1844		break;
1845	case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1846		ar_sdio->mbox_addr[eid] = htt_addr;
1847		ar_sdio->mbox_size[eid] = htt_mbox_size;
1848		ath10k_dbg(ar, ATH10K_DBG_SDIO,
1849			   "sdio htt data mbox_addr 0x%x mbox_size %d\n",
1850			   ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
1851		break;
1852	default:
1853		ath10k_warn(ar, "unsupported HTC service id: %d\n",
1854			    service_id);
1855		return -EINVAL;
1856	}
1857
1858	return 0;
1859}
1860
1861static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
1862					     u8 *ul_pipe, u8 *dl_pipe)
1863{
1864	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n");
1865
1866	/* HTC ctrl ep (SVC id 1) always has eid (and pipe_id in our
1867	 * case) == 0
1868	 */
1869	*ul_pipe = 0;
1870	*dl_pipe = 0;
1871}
1872
1873/* This op is currently only used by htc_wait_target if the HTC ready
1874 * message times out. It is not applicable for SDIO since there is nothing
1875 * we can do if the HTC ready message does not arrive in time.
1876 * TODO: Make this op non mandatory by introducing a NULL check in the
1877 * hif op wrapper.
1878 */
1879static void ath10k_sdio_hif_send_complete_check(struct ath10k *ar,
1880						u8 pipe, int force)
1881{
1882}
1883
1884static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
1885	.tx_sg			= ath10k_sdio_hif_tx_sg,
1886	.diag_read		= ath10k_sdio_hif_diag_read,
1887	.diag_write		= ath10k_sdio_hif_diag_write_mem,
1888	.exchange_bmi_msg	= ath10k_sdio_bmi_exchange_msg,
1889	.start			= ath10k_sdio_hif_start,
1890	.stop			= ath10k_sdio_hif_stop,
1891	.map_service_to_pipe	= ath10k_sdio_hif_map_service_to_pipe,
1892	.get_default_pipe	= ath10k_sdio_hif_get_default_pipe,
1893	.send_complete_check	= ath10k_sdio_hif_send_complete_check,
1894	.power_up		= ath10k_sdio_hif_power_up,
1895	.power_down		= ath10k_sdio_hif_power_down,
1896#ifdef CONFIG_PM
1897	.suspend		= ath10k_sdio_hif_suspend,
1898	.resume			= ath10k_sdio_hif_resume,
1899#endif
1900};
1901
1902#ifdef CONFIG_PM_SLEEP
1903
1904/* Empty handlers so that mmc subsystem doesn't remove us entirely during
1905 * suspend. We instead follow cfg80211 suspend/resume handlers.
1906 */
1907static int ath10k_sdio_pm_suspend(struct device *device)
1908{
1909	return 0;
1910}
1911
1912static int ath10k_sdio_pm_resume(struct device *device)
1913{
1914	return 0;
1915}
1916
1917static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
1918			 ath10k_sdio_pm_resume);
1919
1920#define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops)
1921
1922#else
1923
1924#define ATH10K_SDIO_PM_OPS NULL
1925
1926#endif /* CONFIG_PM_SLEEP */
1927
1928static int ath10k_sdio_probe(struct sdio_func *func,
1929			     const struct sdio_device_id *id)
1930{
1931	struct ath10k_sdio *ar_sdio;
1932	struct ath10k *ar;
1933	enum ath10k_hw_rev hw_rev;
1934	u32 chip_id, dev_id_base;
1935	int ret, i;
1936
1937	/* Assumption: All SDIO based chipsets (so far) are QCA6174 based.
1938	 * If there will be newer chipsets that does not use the hw reg
1939	 * setup as defined in qca6174_regs and qca6174_values, this
1940	 * assumption is no longer valid and hw_rev must be setup differently
1941	 * depending on chipset.
1942	 */
1943	hw_rev = ATH10K_HW_QCA6174;
1944
1945	ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO,
1946				hw_rev, &ath10k_sdio_hif_ops);
1947	if (!ar) {
1948		dev_err(&func->dev, "failed to allocate core\n");
1949		return -ENOMEM;
1950	}
1951
1952	ath10k_dbg(ar, ATH10K_DBG_BOOT,
1953		   "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
1954		   func->num, func->vendor, func->device,
1955		   func->max_blksize, func->cur_blksize);
1956
1957	ar_sdio = ath10k_sdio_priv(ar);
1958
1959	ar_sdio->irq_data.irq_proc_reg =
1960		kzalloc(sizeof(struct ath10k_sdio_irq_proc_regs),
1961			GFP_KERNEL);
1962	if (!ar_sdio->irq_data.irq_proc_reg) {
1963		ret = -ENOMEM;
1964		goto err_core_destroy;
1965	}
1966
1967	ar_sdio->irq_data.irq_en_reg =
1968		kzalloc(sizeof(struct ath10k_sdio_irq_enable_regs),
1969			GFP_KERNEL);
1970	if (!ar_sdio->irq_data.irq_en_reg) {
1971		ret = -ENOMEM;
1972		goto err_free_proc_reg;
1973	}
1974
1975	ar_sdio->bmi_buf = kzalloc(BMI_MAX_CMDBUF_SIZE, GFP_KERNEL);
1976	if (!ar_sdio->bmi_buf) {
1977		ret = -ENOMEM;
1978		goto err_free_en_reg;
1979	}
1980
1981	ar_sdio->func = func;
1982	sdio_set_drvdata(func, ar_sdio);
1983
1984	ar_sdio->is_disabled = true;
1985	ar_sdio->ar = ar;
1986
1987	spin_lock_init(&ar_sdio->lock);
1988	spin_lock_init(&ar_sdio->wr_async_lock);
1989	mutex_init(&ar_sdio->irq_data.mtx);
1990
1991	INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
1992	INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
1993
1994	INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work);
1995	ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");
1996	if (!ar_sdio->workqueue) {
1997		ret = -ENOMEM;
1998		goto err_free_bmi_buf;
1999	}
2000
2001	for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
2002		ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
2003
2004	dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, id->device);
2005	switch (dev_id_base) {
2006	case QCA_MANUFACTURER_ID_AR6005_BASE:
2007	case QCA_MANUFACTURER_ID_QCA9377_BASE:
2008		ar->dev_id = QCA9377_1_0_DEVICE_ID;
2009		break;
2010	default:
2011		ret = -ENODEV;
2012		ath10k_err(ar, "unsupported device id %u (0x%x)\n",
2013			   dev_id_base, id->device);
2014		goto err_free_bmi_buf;
2015	}
2016
2017	ar->id.vendor = id->vendor;
2018	ar->id.device = id->device;
2019
2020	ath10k_sdio_set_mbox_info(ar);
2021
2022	ret = ath10k_sdio_config(ar);
2023	if (ret) {
2024		ath10k_err(ar, "failed to config sdio: %d\n", ret);
2025		goto err_free_wq;
2026	}
2027
2028	/* TODO: don't know yet how to get chip_id with SDIO */
2029	chip_id = 0;
2030	ret = ath10k_core_register(ar, chip_id);
2031	if (ret) {
2032		ath10k_err(ar, "failed to register driver core: %d\n", ret);
2033		goto err_free_wq;
2034	}
2035
2036	/* TODO: remove this once SDIO support is fully implemented */
2037	ath10k_warn(ar, "WARNING: ath10k SDIO support is incomplete, don't expect anything to work!\n");
2038
2039	return 0;
2040
2041err_free_wq:
2042	destroy_workqueue(ar_sdio->workqueue);
2043err_free_bmi_buf:
2044	kfree(ar_sdio->bmi_buf);
2045err_free_en_reg:
2046	kfree(ar_sdio->irq_data.irq_en_reg);
2047err_free_proc_reg:
2048	kfree(ar_sdio->irq_data.irq_proc_reg);
2049err_core_destroy:
2050	ath10k_core_destroy(ar);
2051
2052	return ret;
2053}
2054
2055static void ath10k_sdio_remove(struct sdio_func *func)
2056{
2057	struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
2058	struct ath10k *ar = ar_sdio->ar;
2059
2060	ath10k_dbg(ar, ATH10K_DBG_BOOT,
2061		   "sdio removed func %d vendor 0x%x device 0x%x\n",
2062		   func->num, func->vendor, func->device);
2063
2064	(void)ath10k_sdio_hif_disable_intrs(ar);
2065	cancel_work_sync(&ar_sdio->wr_async_work);
2066	ath10k_core_unregister(ar);
2067	ath10k_core_destroy(ar);
2068}
2069
2070static const struct sdio_device_id ath10k_sdio_devices[] = {
2071	{SDIO_DEVICE(QCA_MANUFACTURER_CODE,
2072		     (QCA_SDIO_ID_AR6005_BASE | 0xA))},
2073	{SDIO_DEVICE(QCA_MANUFACTURER_CODE,
2074		     (QCA_SDIO_ID_QCA9377_BASE | 0x1))},
2075	{},
2076};
2077
2078MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
2079
2080static struct sdio_driver ath10k_sdio_driver = {
2081	.name = "ath10k_sdio",
2082	.id_table = ath10k_sdio_devices,
2083	.probe = ath10k_sdio_probe,
2084	.remove = ath10k_sdio_remove,
2085	.drv.pm = ATH10K_SDIO_PM_OPS,
2086};
2087
2088static int __init ath10k_sdio_init(void)
2089{
2090	int ret;
2091
2092	ret = sdio_register_driver(&ath10k_sdio_driver);
2093	if (ret)
2094		pr_err("sdio driver registration failed: %d\n", ret);
2095
2096	return ret;
2097}
2098
2099static void __exit ath10k_sdio_exit(void)
2100{
2101	sdio_unregister_driver(&ath10k_sdio_driver);
2102}
2103
2104module_init(ath10k_sdio_init);
2105module_exit(ath10k_sdio_exit);
2106
2107MODULE_AUTHOR("Qualcomm Atheros");
2108MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");
2109MODULE_LICENSE("Dual BSD/GPL");