Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.10.11.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
   3 */
   4
   5#include <linux/delay.h>
   6#include <linux/highmem.h>
   7#include <linux/io.h>
   8#include <linux/iopoll.h>
   9#include <linux/module.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/slab.h>
  12#include <linux/scatterlist.h>
  13#include <linux/platform_device.h>
  14#include <linux/ktime.h>
  15
  16#include <linux/mmc/mmc.h>
  17#include <linux/mmc/host.h>
  18#include <linux/mmc/card.h>
  19
  20#include "cqhci.h"
  21#include "cqhci-crypto.h"
  22
  23#define DCMD_SLOT 31
  24#define NUM_SLOTS 32
  25
  26struct cqhci_slot {
  27	struct mmc_request *mrq;
  28	unsigned int flags;
  29#define CQHCI_EXTERNAL_TIMEOUT	BIT(0)
  30#define CQHCI_COMPLETED		BIT(1)
  31#define CQHCI_HOST_CRC		BIT(2)
  32#define CQHCI_HOST_TIMEOUT	BIT(3)
  33#define CQHCI_HOST_OTHER	BIT(4)
  34};
  35
  36static bool cqhci_halted(struct cqhci_host *cq_host)
  37{
  38	return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
  39}
  40
  41static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
  42{
  43	return cq_host->desc_base + (tag * cq_host->slot_sz);
  44}
  45
  46static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
  47{
  48	u8 *desc = get_desc(cq_host, tag);
  49
  50	return desc + cq_host->task_desc_len;
  51}
  52
  53static inline size_t get_trans_desc_offset(struct cqhci_host *cq_host, u8 tag)
  54{
  55	return cq_host->trans_desc_len * cq_host->mmc->max_segs * tag;
  56}
  57
  58static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
  59{
  60	size_t offset = get_trans_desc_offset(cq_host, tag);
  61
  62	return cq_host->trans_desc_dma_base + offset;
  63}
  64
  65static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
  66{
  67	size_t offset = get_trans_desc_offset(cq_host, tag);
  68
  69	return cq_host->trans_desc_base + offset;
  70}
  71
  72static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
  73{
  74	u8 *link_temp;
  75	dma_addr_t trans_temp;
  76
  77	link_temp = get_link_desc(cq_host, tag);
  78	trans_temp = get_trans_desc_dma(cq_host, tag);
  79
  80	memset(link_temp, 0, cq_host->link_desc_len);
  81	if (cq_host->link_desc_len > 8)
  82		*(link_temp + 8) = 0;
  83
  84	if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
  85		*link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
  86		return;
  87	}
  88
  89	*link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
  90
  91	if (cq_host->dma64) {
  92		__le64 *data_addr = (__le64 __force *)(link_temp + 4);
  93
  94		data_addr[0] = cpu_to_le64(trans_temp);
  95	} else {
  96		__le32 *data_addr = (__le32 __force *)(link_temp + 4);
  97
  98		data_addr[0] = cpu_to_le32(trans_temp);
  99	}
 100}
 101
 102static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
 103{
 104	cqhci_writel(cq_host, set, CQHCI_ISTE);
 105	cqhci_writel(cq_host, set, CQHCI_ISGE);
 106}
 107
 108#define DRV_NAME "cqhci"
 109
 110#define CQHCI_DUMP(f, x...) \
 111	pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
 112
 113static void cqhci_dumpregs(struct cqhci_host *cq_host)
 114{
 115	struct mmc_host *mmc = cq_host->mmc;
 116
 117	CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
 118
 119	CQHCI_DUMP("Caps:      0x%08x | Version:  0x%08x\n",
 120		   cqhci_readl(cq_host, CQHCI_CAP),
 121		   cqhci_readl(cq_host, CQHCI_VER));
 122	CQHCI_DUMP("Config:    0x%08x | Control:  0x%08x\n",
 123		   cqhci_readl(cq_host, CQHCI_CFG),
 124		   cqhci_readl(cq_host, CQHCI_CTL));
 125	CQHCI_DUMP("Int stat:  0x%08x | Int enab: 0x%08x\n",
 126		   cqhci_readl(cq_host, CQHCI_IS),
 127		   cqhci_readl(cq_host, CQHCI_ISTE));
 128	CQHCI_DUMP("Int sig:   0x%08x | Int Coal: 0x%08x\n",
 129		   cqhci_readl(cq_host, CQHCI_ISGE),
 130		   cqhci_readl(cq_host, CQHCI_IC));
 131	CQHCI_DUMP("TDL base:  0x%08x | TDL up32: 0x%08x\n",
 132		   cqhci_readl(cq_host, CQHCI_TDLBA),
 133		   cqhci_readl(cq_host, CQHCI_TDLBAU));
 134	CQHCI_DUMP("Doorbell:  0x%08x | TCN:      0x%08x\n",
 135		   cqhci_readl(cq_host, CQHCI_TDBR),
 136		   cqhci_readl(cq_host, CQHCI_TCN));
 137	CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
 138		   cqhci_readl(cq_host, CQHCI_DQS),
 139		   cqhci_readl(cq_host, CQHCI_DPT));
 140	CQHCI_DUMP("Task clr:  0x%08x | SSC1:     0x%08x\n",
 141		   cqhci_readl(cq_host, CQHCI_TCLR),
 142		   cqhci_readl(cq_host, CQHCI_SSC1));
 143	CQHCI_DUMP("SSC2:      0x%08x | DCMD rsp: 0x%08x\n",
 144		   cqhci_readl(cq_host, CQHCI_SSC2),
 145		   cqhci_readl(cq_host, CQHCI_CRDCT));
 146	CQHCI_DUMP("RED mask:  0x%08x | TERRI:    0x%08x\n",
 147		   cqhci_readl(cq_host, CQHCI_RMEM),
 148		   cqhci_readl(cq_host, CQHCI_TERRI));
 149	CQHCI_DUMP("Resp idx:  0x%08x | Resp arg: 0x%08x\n",
 150		   cqhci_readl(cq_host, CQHCI_CRI),
 151		   cqhci_readl(cq_host, CQHCI_CRA));
 152
 153	if (cq_host->ops->dumpregs)
 154		cq_host->ops->dumpregs(mmc);
 155	else
 156		CQHCI_DUMP(": ===========================================\n");
 157}
 158
 159/*
 160 * The allocated descriptor table for task, link & transfer descriptors
 161 * looks like:
 162 * |----------|
 163 * |task desc |  |->|----------|
 164 * |----------|  |  |trans desc|
 165 * |link desc-|->|  |----------|
 166 * |----------|          .
 167 *      .                .
 168 *  no. of slots      max-segs
 169 *      .           |----------|
 170 * |----------|
 171 * The idea here is to create the [task+trans] table and mark & point the
 172 * link desc to the transfer desc table on a per slot basis.
 173 */
 174static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
 175{
 176	int i = 0;
 177
 178	/* task descriptor can be 64/128 bit irrespective of arch */
 179	if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
 180		cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
 181			       CQHCI_TASK_DESC_SZ, CQHCI_CFG);
 182		cq_host->task_desc_len = 16;
 183	} else {
 184		cq_host->task_desc_len = 8;
 185	}
 186
 187	/*
 188	 * 96 bits length of transfer desc instead of 128 bits which means
 189	 * ADMA would expect next valid descriptor at the 96th bit
 190	 * or 128th bit
 191	 */
 192	if (cq_host->dma64) {
 193		if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
 194			cq_host->trans_desc_len = 12;
 195		else
 196			cq_host->trans_desc_len = 16;
 197		cq_host->link_desc_len = 16;
 198	} else {
 199		cq_host->trans_desc_len = 8;
 200		cq_host->link_desc_len = 8;
 201	}
 202
 203	/* total size of a slot: 1 task & 1 transfer (link) */
 204	cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
 205
 206	cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
 207
 208	cq_host->data_size = get_trans_desc_offset(cq_host, cq_host->mmc->cqe_qdepth);
 209
 210	pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
 211		 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
 212		 cq_host->slot_sz);
 213
 214	/*
 215	 * allocate a dma-mapped chunk of memory for the descriptors
 216	 * allocate a dma-mapped chunk of memory for link descriptors
 217	 * setup each link-desc memory offset per slot-number to
 218	 * the descriptor table.
 219	 */
 220	cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 221						 cq_host->desc_size,
 222						 &cq_host->desc_dma_base,
 223						 GFP_KERNEL);
 224	if (!cq_host->desc_base)
 225		return -ENOMEM;
 226
 227	cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 228					      cq_host->data_size,
 229					      &cq_host->trans_desc_dma_base,
 230					      GFP_KERNEL);
 231	if (!cq_host->trans_desc_base) {
 232		dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
 233				   cq_host->desc_base,
 234				   cq_host->desc_dma_base);
 235		cq_host->desc_base = NULL;
 236		cq_host->desc_dma_base = 0;
 237		return -ENOMEM;
 238	}
 239
 240	pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
 241		 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
 242		(unsigned long long)cq_host->desc_dma_base,
 243		(unsigned long long)cq_host->trans_desc_dma_base);
 244
 245	for (; i < (cq_host->num_slots); i++)
 246		setup_trans_desc(cq_host, i);
 247
 248	return 0;
 249}
 250
 251static void __cqhci_enable(struct cqhci_host *cq_host)
 252{
 253	struct mmc_host *mmc = cq_host->mmc;
 254	u32 cqcfg;
 255
 256	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
 257
 258	/* Configuration must not be changed while enabled */
 259	if (cqcfg & CQHCI_ENABLE) {
 260		cqcfg &= ~CQHCI_ENABLE;
 261		cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 262	}
 263
 264	cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
 265
 266	if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
 267		cqcfg |= CQHCI_DCMD;
 268
 269	if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
 270		cqcfg |= CQHCI_TASK_DESC_SZ;
 271
 272	if (mmc->caps2 & MMC_CAP2_CRYPTO)
 273		cqcfg |= CQHCI_CRYPTO_GENERAL_ENABLE;
 274
 275	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 276
 277	cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
 278		     CQHCI_TDLBA);
 279	cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
 280		     CQHCI_TDLBAU);
 281
 282	cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
 283
 284	cqhci_set_irqs(cq_host, 0);
 285
 286	cqcfg |= CQHCI_ENABLE;
 287
 288	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 289
 290	if (cqhci_halted(cq_host))
 291		cqhci_writel(cq_host, 0, CQHCI_CTL);
 292
 293	mmc->cqe_on = true;
 294
 295	if (cq_host->ops->enable)
 296		cq_host->ops->enable(mmc);
 297
 298	/* Ensure all writes are done before interrupts are enabled */
 299	wmb();
 300
 301	cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
 302
 303	cq_host->activated = true;
 304}
 305
 306static void __cqhci_disable(struct cqhci_host *cq_host)
 307{
 308	u32 cqcfg;
 309
 310	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
 311	cqcfg &= ~CQHCI_ENABLE;
 312	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 313
 314	cq_host->mmc->cqe_on = false;
 315
 316	cq_host->activated = false;
 317}
 318
 319int cqhci_deactivate(struct mmc_host *mmc)
 320{
 321	struct cqhci_host *cq_host = mmc->cqe_private;
 322
 323	if (cq_host->enabled && cq_host->activated)
 324		__cqhci_disable(cq_host);
 325
 326	return 0;
 327}
 328EXPORT_SYMBOL(cqhci_deactivate);
 329
 330int cqhci_resume(struct mmc_host *mmc)
 331{
 332	/* Re-enable is done upon first request */
 333	return 0;
 334}
 335EXPORT_SYMBOL(cqhci_resume);
 336
 337static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
 338{
 339	struct cqhci_host *cq_host = mmc->cqe_private;
 340	int err;
 341
 342	if (!card->ext_csd.cmdq_en)
 343		return -EINVAL;
 344
 345	if (cq_host->enabled)
 346		return 0;
 347
 348	cq_host->rca = card->rca;
 349
 350	err = cqhci_host_alloc_tdl(cq_host);
 351	if (err) {
 352		pr_err("%s: Failed to enable CQE, error %d\n",
 353		       mmc_hostname(mmc), err);
 354		return err;
 355	}
 356
 357	__cqhci_enable(cq_host);
 358
 359	cq_host->enabled = true;
 360
 361#ifdef DEBUG
 362	cqhci_dumpregs(cq_host);
 363#endif
 364	return 0;
 365}
 366
 367/* CQHCI is idle and should halt immediately, so set a small timeout */
 368#define CQHCI_OFF_TIMEOUT 100
 369
 370static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
 371{
 372	return cqhci_readl(cq_host, CQHCI_CTL);
 373}
 374
 375static void cqhci_off(struct mmc_host *mmc)
 376{
 377	struct cqhci_host *cq_host = mmc->cqe_private;
 378	u32 reg;
 379	int err;
 380
 381	if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
 382		return;
 383
 384	if (cq_host->ops->disable)
 385		cq_host->ops->disable(mmc, false);
 386
 387	cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
 388
 389	err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
 390				 reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
 391	if (err < 0)
 392		pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
 393	else
 394		pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
 395
 396	if (cq_host->ops->post_disable)
 397		cq_host->ops->post_disable(mmc);
 398
 399	mmc->cqe_on = false;
 400}
 401
 402static void cqhci_disable(struct mmc_host *mmc)
 403{
 404	struct cqhci_host *cq_host = mmc->cqe_private;
 405
 406	if (!cq_host->enabled)
 407		return;
 408
 409	cqhci_off(mmc);
 410
 411	__cqhci_disable(cq_host);
 412
 413	dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
 414			   cq_host->trans_desc_base,
 415			   cq_host->trans_desc_dma_base);
 416
 417	dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
 418			   cq_host->desc_base,
 419			   cq_host->desc_dma_base);
 420
 421	cq_host->trans_desc_base = NULL;
 422	cq_host->desc_base = NULL;
 423
 424	cq_host->enabled = false;
 425}
 426
 427static void cqhci_prep_task_desc(struct mmc_request *mrq,
 428				 struct cqhci_host *cq_host, int tag)
 429{
 430	__le64 *task_desc = (__le64 __force *)get_desc(cq_host, tag);
 431	u32 req_flags = mrq->data->flags;
 432	u64 desc0;
 433
 434	desc0 = CQHCI_VALID(1) |
 435		CQHCI_END(1) |
 436		CQHCI_INT(1) |
 437		CQHCI_ACT(0x5) |
 438		CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
 439		CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
 440		CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
 441		CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
 442		CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
 443		CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
 444		CQHCI_BLK_COUNT(mrq->data->blocks) |
 445		CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
 446
 447	task_desc[0] = cpu_to_le64(desc0);
 448
 449	if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
 450		u64 desc1 = cqhci_crypto_prep_task_desc(mrq);
 451
 452		task_desc[1] = cpu_to_le64(desc1);
 453
 454		pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx%016llx\n",
 455			 mmc_hostname(mrq->host), mrq->tag, desc1, desc0);
 456	} else {
 457		pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n",
 458			 mmc_hostname(mrq->host), mrq->tag, desc0);
 459	}
 460}
 461
 462static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
 463{
 464	int sg_count;
 465	struct mmc_data *data = mrq->data;
 466
 467	if (!data)
 468		return -EINVAL;
 469
 470	sg_count = dma_map_sg(mmc_dev(host), data->sg,
 471			      data->sg_len,
 472			      (data->flags & MMC_DATA_WRITE) ?
 473			      DMA_TO_DEVICE : DMA_FROM_DEVICE);
 474	if (!sg_count) {
 475		pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
 476		return -ENOMEM;
 477	}
 478
 479	return sg_count;
 480}
 481
 482void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
 483			 bool dma64)
 484{
 485	__le32 *attr = (__le32 __force *)desc;
 486
 487	*attr = (CQHCI_VALID(1) |
 488		 CQHCI_END(end ? 1 : 0) |
 489		 CQHCI_INT(0) |
 490		 CQHCI_ACT(0x4) |
 491		 CQHCI_DAT_LENGTH(len));
 492
 493	if (dma64) {
 494		__le64 *dataddr = (__le64 __force *)(desc + 4);
 495
 496		dataddr[0] = cpu_to_le64(addr);
 497	} else {
 498		__le32 *dataddr = (__le32 __force *)(desc + 4);
 499
 500		dataddr[0] = cpu_to_le32(addr);
 501	}
 502}
 503EXPORT_SYMBOL(cqhci_set_tran_desc);
 504
 505static int cqhci_prep_tran_desc(struct mmc_request *mrq,
 506			       struct cqhci_host *cq_host, int tag)
 507{
 508	struct mmc_data *data = mrq->data;
 509	int i, sg_count, len;
 510	bool end = false;
 511	bool dma64 = cq_host->dma64;
 512	dma_addr_t addr;
 513	u8 *desc;
 514	struct scatterlist *sg;
 515
 516	sg_count = cqhci_dma_map(mrq->host, mrq);
 517	if (sg_count < 0) {
 518		pr_err("%s: %s: unable to map sg lists, %d\n",
 519				mmc_hostname(mrq->host), __func__, sg_count);
 520		return sg_count;
 521	}
 522
 523	desc = get_trans_desc(cq_host, tag);
 524
 525	for_each_sg(data->sg, sg, sg_count, i) {
 526		addr = sg_dma_address(sg);
 527		len = sg_dma_len(sg);
 528
 529		if ((i+1) == sg_count)
 530			end = true;
 531		if (cq_host->ops->set_tran_desc)
 532			cq_host->ops->set_tran_desc(cq_host, &desc, addr, len, end, dma64);
 533		else
 534			cqhci_set_tran_desc(desc, addr, len, end, dma64);
 535
 536		desc += cq_host->trans_desc_len;
 537	}
 538
 539	return 0;
 540}
 541
 542static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
 543				   struct mmc_request *mrq)
 544{
 545	u64 *task_desc = NULL;
 546	u64 data = 0;
 547	u8 resp_type;
 548	u8 *desc;
 549	__le64 *dataddr;
 550	struct cqhci_host *cq_host = mmc->cqe_private;
 551	u8 timing;
 552
 553	if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
 554		resp_type = 0x0;
 555		timing = 0x1;
 556	} else {
 557		if (mrq->cmd->flags & MMC_RSP_R1B) {
 558			resp_type = 0x3;
 559			timing = 0x0;
 560		} else {
 561			resp_type = 0x2;
 562			timing = 0x1;
 563		}
 564	}
 565
 566	task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
 567	memset(task_desc, 0, cq_host->task_desc_len);
 568	data |= (CQHCI_VALID(1) |
 569		 CQHCI_END(1) |
 570		 CQHCI_INT(1) |
 571		 CQHCI_QBAR(1) |
 572		 CQHCI_ACT(0x5) |
 573		 CQHCI_CMD_INDEX(mrq->cmd->opcode) |
 574		 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
 575	if (cq_host->ops->update_dcmd_desc)
 576		cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
 577	*task_desc |= data;
 578	desc = (u8 *)task_desc;
 579	pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
 580		 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
 581	dataddr = (__le64 __force *)(desc + 4);
 582	dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
 583
 584}
 585
 586static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
 587{
 588	struct mmc_data *data = mrq->data;
 589
 590	if (data) {
 591		dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
 592			     (data->flags & MMC_DATA_READ) ?
 593			     DMA_FROM_DEVICE : DMA_TO_DEVICE);
 594	}
 595}
 596
 597static inline int cqhci_tag(struct mmc_request *mrq)
 598{
 599	return mrq->cmd ? DCMD_SLOT : mrq->tag;
 600}
 601
 602static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 603{
 604	int err = 0;
 605	int tag = cqhci_tag(mrq);
 606	struct cqhci_host *cq_host = mmc->cqe_private;
 607	unsigned long flags;
 608
 609	if (!cq_host->enabled) {
 610		pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
 611		return -EINVAL;
 612	}
 613
 614	/* First request after resume has to re-enable */
 615	if (!cq_host->activated)
 616		__cqhci_enable(cq_host);
 617
 618	if (!mmc->cqe_on) {
 619		if (cq_host->ops->pre_enable)
 620			cq_host->ops->pre_enable(mmc);
 621
 622		cqhci_writel(cq_host, 0, CQHCI_CTL);
 623		mmc->cqe_on = true;
 624		pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
 625		if (cqhci_halted(cq_host)) {
 626			pr_err("%s: cqhci: CQE failed to exit halt state\n",
 627			       mmc_hostname(mmc));
 628		}
 629		if (cq_host->ops->enable)
 630			cq_host->ops->enable(mmc);
 631	}
 632
 633	if (mrq->data) {
 634		cqhci_prep_task_desc(mrq, cq_host, tag);
 635
 636		err = cqhci_prep_tran_desc(mrq, cq_host, tag);
 637		if (err) {
 638			pr_err("%s: cqhci: failed to setup tx desc: %d\n",
 639			       mmc_hostname(mmc), err);
 640			return err;
 641		}
 642	} else {
 643		cqhci_prep_dcmd_desc(mmc, mrq);
 644	}
 645
 646	spin_lock_irqsave(&cq_host->lock, flags);
 647
 648	if (cq_host->recovery_halt) {
 649		err = -EBUSY;
 650		goto out_unlock;
 651	}
 652
 653	cq_host->slot[tag].mrq = mrq;
 654	cq_host->slot[tag].flags = 0;
 655
 656	cq_host->qcnt += 1;
 657	/* Make sure descriptors are ready before ringing the doorbell */
 658	wmb();
 659	cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
 660	if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
 661		pr_debug("%s: cqhci: doorbell not set for tag %d\n",
 662			 mmc_hostname(mmc), tag);
 663out_unlock:
 664	spin_unlock_irqrestore(&cq_host->lock, flags);
 665
 666	if (err)
 667		cqhci_post_req(mmc, mrq);
 668
 669	return err;
 670}
 671
 672static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
 673				  bool notify)
 674{
 675	struct cqhci_host *cq_host = mmc->cqe_private;
 676
 677	if (!cq_host->recovery_halt) {
 678		cq_host->recovery_halt = true;
 679		pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
 680		wake_up(&cq_host->wait_queue);
 681		if (notify && mrq->recovery_notifier)
 682			mrq->recovery_notifier(mrq);
 683	}
 684}
 685
 686static unsigned int cqhci_error_flags(int error1, int error2)
 687{
 688	int error = error1 ? error1 : error2;
 689
 690	switch (error) {
 691	case -EILSEQ:
 692		return CQHCI_HOST_CRC;
 693	case -ETIMEDOUT:
 694		return CQHCI_HOST_TIMEOUT;
 695	default:
 696		return CQHCI_HOST_OTHER;
 697	}
 698}
 699
 700static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
 701			    int data_error)
 702{
 703	struct cqhci_host *cq_host = mmc->cqe_private;
 704	struct cqhci_slot *slot;
 705	u32 terri;
 706	u32 tdpe;
 707	int tag;
 708
 709	spin_lock(&cq_host->lock);
 710
 711	terri = cqhci_readl(cq_host, CQHCI_TERRI);
 712
 713	pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
 714		 mmc_hostname(mmc), status, cmd_error, data_error, terri);
 715
 716	/* Forget about errors when recovery has already been triggered */
 717	if (cq_host->recovery_halt)
 718		goto out_unlock;
 719
 720	if (!cq_host->qcnt) {
 721		WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
 722			  mmc_hostname(mmc), status, cmd_error, data_error,
 723			  terri);
 724		goto out_unlock;
 725	}
 726
 727	if (CQHCI_TERRI_C_VALID(terri)) {
 728		tag = CQHCI_TERRI_C_TASK(terri);
 729		slot = &cq_host->slot[tag];
 730		if (slot->mrq) {
 731			slot->flags = cqhci_error_flags(cmd_error, data_error);
 732			cqhci_recovery_needed(mmc, slot->mrq, true);
 733		}
 734	}
 735
 736	if (CQHCI_TERRI_D_VALID(terri)) {
 737		tag = CQHCI_TERRI_D_TASK(terri);
 738		slot = &cq_host->slot[tag];
 739		if (slot->mrq) {
 740			slot->flags = cqhci_error_flags(data_error, cmd_error);
 741			cqhci_recovery_needed(mmc, slot->mrq, true);
 742		}
 743	}
 744
 745	/*
 746	 * Handle ICCE ("Invalid Crypto Configuration Error").  This should
 747	 * never happen, since the block layer ensures that all crypto-enabled
 748	 * I/O requests have a valid keyslot before they reach the driver.
 749	 *
 750	 * Note that GCE ("General Crypto Error") is different; it already got
 751	 * handled above by checking TERRI.
 752	 */
 753	if (status & CQHCI_IS_ICCE) {
 754		tdpe = cqhci_readl(cq_host, CQHCI_TDPE);
 755		WARN_ONCE(1,
 756			  "%s: cqhci: invalid crypto configuration error. IRQ status: 0x%08x TDPE: 0x%08x\n",
 757			  mmc_hostname(mmc), status, tdpe);
 758		while (tdpe != 0) {
 759			tag = __ffs(tdpe);
 760			tdpe &= ~(1 << tag);
 761			slot = &cq_host->slot[tag];
 762			if (!slot->mrq)
 763				continue;
 764			slot->flags = cqhci_error_flags(data_error, cmd_error);
 765			cqhci_recovery_needed(mmc, slot->mrq, true);
 766		}
 767	}
 768
 769	if (!cq_host->recovery_halt) {
 770		/*
 771		 * The only way to guarantee forward progress is to mark at
 772		 * least one task in error, so if none is indicated, pick one.
 773		 */
 774		for (tag = 0; tag < NUM_SLOTS; tag++) {
 775			slot = &cq_host->slot[tag];
 776			if (!slot->mrq)
 777				continue;
 778			slot->flags = cqhci_error_flags(data_error, cmd_error);
 779			cqhci_recovery_needed(mmc, slot->mrq, true);
 780			break;
 781		}
 782	}
 783
 784out_unlock:
 785	spin_unlock(&cq_host->lock);
 786}
 787
 788static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
 789{
 790	struct cqhci_host *cq_host = mmc->cqe_private;
 791	struct cqhci_slot *slot = &cq_host->slot[tag];
 792	struct mmc_request *mrq = slot->mrq;
 793	struct mmc_data *data;
 794
 795	if (!mrq) {
 796		WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
 797			  mmc_hostname(mmc), tag);
 798		return;
 799	}
 800
 801	/* No completions allowed during recovery */
 802	if (cq_host->recovery_halt) {
 803		slot->flags |= CQHCI_COMPLETED;
 804		return;
 805	}
 806
 807	slot->mrq = NULL;
 808
 809	cq_host->qcnt -= 1;
 810
 811	data = mrq->data;
 812	if (data) {
 813		if (data->error)
 814			data->bytes_xfered = 0;
 815		else
 816			data->bytes_xfered = data->blksz * data->blocks;
 817	}
 818
 819	mmc_cqe_request_done(mmc, mrq);
 820}
 821
 822irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
 823		      int data_error)
 824{
 825	u32 status;
 826	unsigned long tag = 0, comp_status;
 827	struct cqhci_host *cq_host = mmc->cqe_private;
 828
 829	status = cqhci_readl(cq_host, CQHCI_IS);
 830	cqhci_writel(cq_host, status, CQHCI_IS);
 831
 832	pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
 833
 834	if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) ||
 835	    cmd_error || data_error) {
 836		if (status & CQHCI_IS_RED)
 837			mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_RED);
 838		if (status & CQHCI_IS_GCE)
 839			mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_GCE);
 840		if (status & CQHCI_IS_ICCE)
 841			mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_ICCE);
 842		cqhci_error_irq(mmc, status, cmd_error, data_error);
 843	}
 844
 845	if (status & CQHCI_IS_TCC) {
 846		/* read TCN and complete the request */
 847		comp_status = cqhci_readl(cq_host, CQHCI_TCN);
 848		cqhci_writel(cq_host, comp_status, CQHCI_TCN);
 849		pr_debug("%s: cqhci: TCN: 0x%08lx\n",
 850			 mmc_hostname(mmc), comp_status);
 851
 852		spin_lock(&cq_host->lock);
 853
 854		for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
 855			/* complete the corresponding mrq */
 856			pr_debug("%s: cqhci: completing tag %lu\n",
 857				 mmc_hostname(mmc), tag);
 858			cqhci_finish_mrq(mmc, tag);
 859		}
 860
 861		if (cq_host->waiting_for_idle && !cq_host->qcnt) {
 862			cq_host->waiting_for_idle = false;
 863			wake_up(&cq_host->wait_queue);
 864		}
 865
 866		spin_unlock(&cq_host->lock);
 867	}
 868
 869	if (status & CQHCI_IS_TCL)
 870		wake_up(&cq_host->wait_queue);
 871
 872	if (status & CQHCI_IS_HAC)
 873		wake_up(&cq_host->wait_queue);
 874
 875	return IRQ_HANDLED;
 876}
 877EXPORT_SYMBOL(cqhci_irq);
 878
 879static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
 880{
 881	unsigned long flags;
 882	bool is_idle;
 883
 884	spin_lock_irqsave(&cq_host->lock, flags);
 885	is_idle = !cq_host->qcnt || cq_host->recovery_halt;
 886	*ret = cq_host->recovery_halt ? -EBUSY : 0;
 887	cq_host->waiting_for_idle = !is_idle;
 888	spin_unlock_irqrestore(&cq_host->lock, flags);
 889
 890	return is_idle;
 891}
 892
 893static int cqhci_wait_for_idle(struct mmc_host *mmc)
 894{
 895	struct cqhci_host *cq_host = mmc->cqe_private;
 896	int ret;
 897
 898	wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
 899
 900	return ret;
 901}
 902
 903static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
 904			  bool *recovery_needed)
 905{
 906	struct cqhci_host *cq_host = mmc->cqe_private;
 907	int tag = cqhci_tag(mrq);
 908	struct cqhci_slot *slot = &cq_host->slot[tag];
 909	unsigned long flags;
 910	bool timed_out;
 911
 912	spin_lock_irqsave(&cq_host->lock, flags);
 913	timed_out = slot->mrq == mrq;
 914	if (timed_out) {
 915		slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
 916		cqhci_recovery_needed(mmc, mrq, false);
 917		*recovery_needed = cq_host->recovery_halt;
 918	}
 919	spin_unlock_irqrestore(&cq_host->lock, flags);
 920
 921	if (timed_out) {
 922		pr_err("%s: cqhci: timeout for tag %d, qcnt %d\n",
 923		       mmc_hostname(mmc), tag, cq_host->qcnt);
 924		cqhci_dumpregs(cq_host);
 925	}
 926
 927	return timed_out;
 928}
 929
 930static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
 931{
 932	return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
 933}
 934
 935static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
 936{
 937	struct cqhci_host *cq_host = mmc->cqe_private;
 938	bool ret;
 939	u32 ctl;
 940
 941	cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
 942
 943	ctl = cqhci_readl(cq_host, CQHCI_CTL);
 944	ctl |= CQHCI_CLEAR_ALL_TASKS;
 945	cqhci_writel(cq_host, ctl, CQHCI_CTL);
 946
 947	wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
 948			   msecs_to_jiffies(timeout) + 1);
 949
 950	cqhci_set_irqs(cq_host, 0);
 951
 952	ret = cqhci_tasks_cleared(cq_host);
 953
 954	if (!ret)
 955		pr_warn("%s: cqhci: Failed to clear tasks\n",
 956			mmc_hostname(mmc));
 957
 958	return ret;
 959}
 960
 961static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
 962{
 963	struct cqhci_host *cq_host = mmc->cqe_private;
 964	bool ret;
 965	u32 ctl;
 966
 967	if (cqhci_halted(cq_host))
 968		return true;
 969
 970	cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
 971
 972	ctl = cqhci_readl(cq_host, CQHCI_CTL);
 973	ctl |= CQHCI_HALT;
 974	cqhci_writel(cq_host, ctl, CQHCI_CTL);
 975
 976	wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
 977			   msecs_to_jiffies(timeout) + 1);
 978
 979	cqhci_set_irqs(cq_host, 0);
 980
 981	ret = cqhci_halted(cq_host);
 982
 983	if (!ret)
 984		pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
 985
 986	return ret;
 987}
 988
 989/*
 990 * After halting we expect to be able to use the command line. We interpret the
 991 * failure to halt to mean the data lines might still be in use (and the upper
 992 * layers will need to send a STOP command), however failing to halt complicates
 993 * the recovery, so set a timeout that would reasonably allow I/O to complete.
 994 */
 995#define CQHCI_START_HALT_TIMEOUT	500
 996
 997static void cqhci_recovery_start(struct mmc_host *mmc)
 998{
 999	struct cqhci_host *cq_host = mmc->cqe_private;
1000
1001	pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1002
1003	WARN_ON(!cq_host->recovery_halt);
1004
1005	cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
1006
1007	if (cq_host->ops->disable)
1008		cq_host->ops->disable(mmc, true);
1009
1010	mmc->cqe_on = false;
1011}
1012
1013static int cqhci_error_from_flags(unsigned int flags)
1014{
1015	if (!flags)
1016		return 0;
1017
1018	/* CRC errors might indicate re-tuning so prefer to report that */
1019	if (flags & CQHCI_HOST_CRC)
1020		return -EILSEQ;
1021
1022	if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
1023		return -ETIMEDOUT;
1024
1025	return -EIO;
1026}
1027
1028static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
1029{
1030	struct cqhci_slot *slot = &cq_host->slot[tag];
1031	struct mmc_request *mrq = slot->mrq;
1032	struct mmc_data *data;
1033
1034	if (!mrq)
1035		return;
1036
1037	slot->mrq = NULL;
1038
1039	cq_host->qcnt -= 1;
1040
1041	data = mrq->data;
1042	if (data) {
1043		data->bytes_xfered = 0;
1044		data->error = cqhci_error_from_flags(slot->flags);
1045	} else {
1046		mrq->cmd->error = cqhci_error_from_flags(slot->flags);
1047	}
1048
1049	mmc_cqe_request_done(cq_host->mmc, mrq);
1050}
1051
1052static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
1053{
1054	int i;
1055
1056	for (i = 0; i < cq_host->num_slots; i++)
1057		cqhci_recover_mrq(cq_host, i);
1058}
1059
1060/*
1061 * By now the command and data lines should be unused so there is no reason for
1062 * CQHCI to take a long time to halt, but if it doesn't halt there could be
1063 * problems clearing tasks, so be generous.
1064 */
1065#define CQHCI_FINISH_HALT_TIMEOUT	20
1066
1067/* CQHCI could be expected to clear it's internal state pretty quickly */
1068#define CQHCI_CLEAR_TIMEOUT		20
1069
1070static void cqhci_recovery_finish(struct mmc_host *mmc)
1071{
1072	struct cqhci_host *cq_host = mmc->cqe_private;
1073	unsigned long flags;
1074	u32 cqcfg;
1075	bool ok;
1076
1077	pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1078
1079	WARN_ON(!cq_host->recovery_halt);
1080
1081	ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1082
1083	/*
1084	 * The specification contradicts itself, by saying that tasks cannot be
1085	 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1086	 * be disabled/re-enabled, but not to disable before clearing tasks.
1087	 * Have a go anyway.
1088	 */
1089	if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1090		ok = false;
1091
1092	/* Disable to make sure tasks really are cleared */
1093	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1094	cqcfg &= ~CQHCI_ENABLE;
1095	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1096
1097	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1098	cqcfg |= CQHCI_ENABLE;
1099	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1100
1101	cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1102
1103	if (!ok)
1104		cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
1105
1106	cqhci_recover_mrqs(cq_host);
1107
1108	WARN_ON(cq_host->qcnt);
1109
1110	spin_lock_irqsave(&cq_host->lock, flags);
1111	cq_host->qcnt = 0;
1112	cq_host->recovery_halt = false;
1113	mmc->cqe_on = false;
1114	spin_unlock_irqrestore(&cq_host->lock, flags);
1115
1116	/* Ensure all writes are done before interrupts are re-enabled */
1117	wmb();
1118
1119	cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
1120
1121	cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
1122
1123	pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
1124}
1125
1126static const struct mmc_cqe_ops cqhci_cqe_ops = {
1127	.cqe_enable = cqhci_enable,
1128	.cqe_disable = cqhci_disable,
1129	.cqe_request = cqhci_request,
1130	.cqe_post_req = cqhci_post_req,
1131	.cqe_off = cqhci_off,
1132	.cqe_wait_for_idle = cqhci_wait_for_idle,
1133	.cqe_timeout = cqhci_timeout,
1134	.cqe_recovery_start = cqhci_recovery_start,
1135	.cqe_recovery_finish = cqhci_recovery_finish,
1136};
1137
1138struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
1139{
1140	struct cqhci_host *cq_host;
1141	struct resource *cqhci_memres = NULL;
1142
1143	/* check and setup CMDQ interface */
1144	cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1145						   "cqhci");
1146	if (!cqhci_memres) {
1147		dev_dbg(&pdev->dev, "CMDQ not supported\n");
1148		return ERR_PTR(-EINVAL);
1149	}
1150
1151	cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1152	if (!cq_host)
1153		return ERR_PTR(-ENOMEM);
1154	cq_host->mmio = devm_ioremap(&pdev->dev,
1155				     cqhci_memres->start,
1156				     resource_size(cqhci_memres));
1157	if (!cq_host->mmio) {
1158		dev_err(&pdev->dev, "failed to remap cqhci regs\n");
1159		return ERR_PTR(-EBUSY);
1160	}
1161	dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
1162
1163	return cq_host;
1164}
1165EXPORT_SYMBOL(cqhci_pltfm_init);
1166
1167static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
1168{
1169	return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
1170}
1171
1172static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
1173{
1174	u32 ver = cqhci_readl(cq_host, CQHCI_VER);
1175
1176	return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
1177}
1178
1179int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
1180	      bool dma64)
1181{
1182	int err;
1183
1184	cq_host->dma64 = dma64;
1185	cq_host->mmc = mmc;
1186	cq_host->mmc->cqe_private = cq_host;
1187
1188	cq_host->num_slots = NUM_SLOTS;
1189	cq_host->dcmd_slot = DCMD_SLOT;
1190
1191	mmc->cqe_ops = &cqhci_cqe_ops;
1192
1193	mmc->cqe_qdepth = NUM_SLOTS;
1194	if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
1195		mmc->cqe_qdepth -= 1;
1196
1197	cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
1198				     sizeof(*cq_host->slot), GFP_KERNEL);
1199	if (!cq_host->slot) {
1200		err = -ENOMEM;
1201		goto out_err;
1202	}
1203
1204	err = cqhci_crypto_init(cq_host);
1205	if (err) {
1206		pr_err("%s: CQHCI crypto initialization failed\n",
1207		       mmc_hostname(mmc));
1208		goto out_err;
1209	}
1210
1211	spin_lock_init(&cq_host->lock);
1212
1213	init_completion(&cq_host->halt_comp);
1214	init_waitqueue_head(&cq_host->wait_queue);
1215
1216	pr_info("%s: CQHCI version %u.%02u\n",
1217		mmc_hostname(mmc), cqhci_ver_major(cq_host),
1218		cqhci_ver_minor(cq_host));
1219
1220	return 0;
1221
1222out_err:
1223	pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1224	       mmc_hostname(mmc), cqhci_ver_major(cq_host),
1225	       cqhci_ver_minor(cq_host), err);
1226	return err;
1227}
1228EXPORT_SYMBOL(cqhci_init);
1229
1230MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1231MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1232MODULE_LICENSE("GPL v2");