Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
   3 */
   4
   5#include <linux/delay.h>
   6#include <linux/highmem.h>
   7#include <linux/io.h>
   8#include <linux/iopoll.h>
   9#include <linux/module.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/slab.h>
  12#include <linux/scatterlist.h>
  13#include <linux/platform_device.h>
  14#include <linux/ktime.h>
  15
  16#include <linux/mmc/mmc.h>
  17#include <linux/mmc/host.h>
  18#include <linux/mmc/card.h>
  19
  20#include "cqhci.h"
  21#include "cqhci-crypto.h"
  22
  23#define DCMD_SLOT 31
  24#define NUM_SLOTS 32
  25
  26struct cqhci_slot {
  27	struct mmc_request *mrq;
  28	unsigned int flags;
  29#define CQHCI_EXTERNAL_TIMEOUT	BIT(0)
  30#define CQHCI_COMPLETED		BIT(1)
  31#define CQHCI_HOST_CRC		BIT(2)
  32#define CQHCI_HOST_TIMEOUT	BIT(3)
  33#define CQHCI_HOST_OTHER	BIT(4)
  34};
  35
 
 
 
 
 
  36static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
  37{
  38	return cq_host->desc_base + (tag * cq_host->slot_sz);
  39}
  40
  41static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
  42{
  43	u8 *desc = get_desc(cq_host, tag);
  44
  45	return desc + cq_host->task_desc_len;
  46}
  47
  48static inline size_t get_trans_desc_offset(struct cqhci_host *cq_host, u8 tag)
  49{
  50	return cq_host->trans_desc_len * cq_host->mmc->max_segs * tag;
  51}
  52
  53static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
  54{
  55	size_t offset = get_trans_desc_offset(cq_host, tag);
  56
  57	return cq_host->trans_desc_dma_base + offset;
  58}
  59
  60static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
  61{
  62	size_t offset = get_trans_desc_offset(cq_host, tag);
  63
  64	return cq_host->trans_desc_base + offset;
  65}
  66
  67static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
  68{
  69	u8 *link_temp;
  70	dma_addr_t trans_temp;
  71
  72	link_temp = get_link_desc(cq_host, tag);
  73	trans_temp = get_trans_desc_dma(cq_host, tag);
  74
  75	memset(link_temp, 0, cq_host->link_desc_len);
  76	if (cq_host->link_desc_len > 8)
  77		*(link_temp + 8) = 0;
  78
  79	if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
  80		*link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
  81		return;
  82	}
  83
  84	*link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
  85
  86	if (cq_host->dma64) {
  87		__le64 *data_addr = (__le64 __force *)(link_temp + 4);
  88
  89		data_addr[0] = cpu_to_le64(trans_temp);
  90	} else {
  91		__le32 *data_addr = (__le32 __force *)(link_temp + 4);
  92
  93		data_addr[0] = cpu_to_le32(trans_temp);
  94	}
  95}
  96
  97static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
  98{
  99	cqhci_writel(cq_host, set, CQHCI_ISTE);
 100	cqhci_writel(cq_host, set, CQHCI_ISGE);
 101}
 102
 103#define DRV_NAME "cqhci"
 104
 105#define CQHCI_DUMP(f, x...) \
 106	pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
 107
 108static void cqhci_dumpregs(struct cqhci_host *cq_host)
 109{
 110	struct mmc_host *mmc = cq_host->mmc;
 111
 112	CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
 113
 114	CQHCI_DUMP("Caps:      0x%08x | Version:  0x%08x\n",
 115		   cqhci_readl(cq_host, CQHCI_CAP),
 116		   cqhci_readl(cq_host, CQHCI_VER));
 117	CQHCI_DUMP("Config:    0x%08x | Control:  0x%08x\n",
 118		   cqhci_readl(cq_host, CQHCI_CFG),
 119		   cqhci_readl(cq_host, CQHCI_CTL));
 120	CQHCI_DUMP("Int stat:  0x%08x | Int enab: 0x%08x\n",
 121		   cqhci_readl(cq_host, CQHCI_IS),
 122		   cqhci_readl(cq_host, CQHCI_ISTE));
 123	CQHCI_DUMP("Int sig:   0x%08x | Int Coal: 0x%08x\n",
 124		   cqhci_readl(cq_host, CQHCI_ISGE),
 125		   cqhci_readl(cq_host, CQHCI_IC));
 126	CQHCI_DUMP("TDL base:  0x%08x | TDL up32: 0x%08x\n",
 127		   cqhci_readl(cq_host, CQHCI_TDLBA),
 128		   cqhci_readl(cq_host, CQHCI_TDLBAU));
 129	CQHCI_DUMP("Doorbell:  0x%08x | TCN:      0x%08x\n",
 130		   cqhci_readl(cq_host, CQHCI_TDBR),
 131		   cqhci_readl(cq_host, CQHCI_TCN));
 132	CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
 133		   cqhci_readl(cq_host, CQHCI_DQS),
 134		   cqhci_readl(cq_host, CQHCI_DPT));
 135	CQHCI_DUMP("Task clr:  0x%08x | SSC1:     0x%08x\n",
 136		   cqhci_readl(cq_host, CQHCI_TCLR),
 137		   cqhci_readl(cq_host, CQHCI_SSC1));
 138	CQHCI_DUMP("SSC2:      0x%08x | DCMD rsp: 0x%08x\n",
 139		   cqhci_readl(cq_host, CQHCI_SSC2),
 140		   cqhci_readl(cq_host, CQHCI_CRDCT));
 141	CQHCI_DUMP("RED mask:  0x%08x | TERRI:    0x%08x\n",
 142		   cqhci_readl(cq_host, CQHCI_RMEM),
 143		   cqhci_readl(cq_host, CQHCI_TERRI));
 144	CQHCI_DUMP("Resp idx:  0x%08x | Resp arg: 0x%08x\n",
 145		   cqhci_readl(cq_host, CQHCI_CRI),
 146		   cqhci_readl(cq_host, CQHCI_CRA));
 147
 148	if (cq_host->ops->dumpregs)
 149		cq_host->ops->dumpregs(mmc);
 150	else
 151		CQHCI_DUMP(": ===========================================\n");
 152}
 153
 154/*
 155 * The allocated descriptor table for task, link & transfer descriptors
 156 * looks like:
 157 * |----------|
 158 * |task desc |  |->|----------|
 159 * |----------|  |  |trans desc|
 160 * |link desc-|->|  |----------|
 161 * |----------|          .
 162 *      .                .
 163 *  no. of slots      max-segs
 164 *      .           |----------|
 165 * |----------|
 166 * The idea here is to create the [task+trans] table and mark & point the
 167 * link desc to the transfer desc table on a per slot basis.
 168 */
 169static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
 170{
 171	int i = 0;
 172
 173	/* task descriptor can be 64/128 bit irrespective of arch */
 174	if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
 175		cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
 176			       CQHCI_TASK_DESC_SZ, CQHCI_CFG);
 177		cq_host->task_desc_len = 16;
 178	} else {
 179		cq_host->task_desc_len = 8;
 180	}
 181
 182	/*
 183	 * 96 bits length of transfer desc instead of 128 bits which means
 184	 * ADMA would expect next valid descriptor at the 96th bit
 185	 * or 128th bit
 186	 */
 187	if (cq_host->dma64) {
 188		if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
 189			cq_host->trans_desc_len = 12;
 190		else
 191			cq_host->trans_desc_len = 16;
 192		cq_host->link_desc_len = 16;
 193	} else {
 194		cq_host->trans_desc_len = 8;
 195		cq_host->link_desc_len = 8;
 196	}
 197
 198	/* total size of a slot: 1 task & 1 transfer (link) */
 199	cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
 200
 201	cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
 202
 203	cq_host->data_size = get_trans_desc_offset(cq_host, cq_host->mmc->cqe_qdepth);
 204
 205	pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
 206		 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
 207		 cq_host->slot_sz);
 208
 209	/*
 210	 * allocate a dma-mapped chunk of memory for the descriptors
 211	 * allocate a dma-mapped chunk of memory for link descriptors
 212	 * setup each link-desc memory offset per slot-number to
 213	 * the descriptor table.
 214	 */
 215	cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 216						 cq_host->desc_size,
 217						 &cq_host->desc_dma_base,
 218						 GFP_KERNEL);
 219	if (!cq_host->desc_base)
 220		return -ENOMEM;
 221
 222	cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 223					      cq_host->data_size,
 224					      &cq_host->trans_desc_dma_base,
 225					      GFP_KERNEL);
 226	if (!cq_host->trans_desc_base) {
 227		dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
 228				   cq_host->desc_base,
 229				   cq_host->desc_dma_base);
 230		cq_host->desc_base = NULL;
 231		cq_host->desc_dma_base = 0;
 232		return -ENOMEM;
 233	}
 234
 235	pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
 236		 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
 237		(unsigned long long)cq_host->desc_dma_base,
 238		(unsigned long long)cq_host->trans_desc_dma_base);
 239
 240	for (; i < (cq_host->num_slots); i++)
 241		setup_trans_desc(cq_host, i);
 242
 243	return 0;
 244}
 245
 246static void __cqhci_enable(struct cqhci_host *cq_host)
 247{
 248	struct mmc_host *mmc = cq_host->mmc;
 249	u32 cqcfg;
 250
 251	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
 252
 253	/* Configuration must not be changed while enabled */
 254	if (cqcfg & CQHCI_ENABLE) {
 255		cqcfg &= ~CQHCI_ENABLE;
 256		cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 257	}
 258
 259	cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
 260
 261	if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
 262		cqcfg |= CQHCI_DCMD;
 263
 264	if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
 265		cqcfg |= CQHCI_TASK_DESC_SZ;
 266
 267	if (mmc->caps2 & MMC_CAP2_CRYPTO)
 268		cqcfg |= CQHCI_CRYPTO_GENERAL_ENABLE;
 269
 270	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 271
 272	cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
 273		     CQHCI_TDLBA);
 274	cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
 275		     CQHCI_TDLBAU);
 276
 277	cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
 278
 279	cqhci_set_irqs(cq_host, 0);
 280
 281	cqcfg |= CQHCI_ENABLE;
 282
 283	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 284
 285	if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
 286		cqhci_writel(cq_host, 0, CQHCI_CTL);
 287
 288	mmc->cqe_on = true;
 289
 290	if (cq_host->ops->enable)
 291		cq_host->ops->enable(mmc);
 292
 293	/* Ensure all writes are done before interrupts are enabled */
 294	wmb();
 295
 296	cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
 297
 298	cq_host->activated = true;
 299}
 300
 301static void __cqhci_disable(struct cqhci_host *cq_host)
 302{
 303	u32 cqcfg;
 304
 305	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
 306	cqcfg &= ~CQHCI_ENABLE;
 307	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 308
 309	cq_host->mmc->cqe_on = false;
 310
 311	cq_host->activated = false;
 312}
 313
 314int cqhci_deactivate(struct mmc_host *mmc)
 315{
 316	struct cqhci_host *cq_host = mmc->cqe_private;
 317
 318	if (cq_host->enabled && cq_host->activated)
 319		__cqhci_disable(cq_host);
 320
 321	return 0;
 322}
 323EXPORT_SYMBOL(cqhci_deactivate);
 324
 325int cqhci_resume(struct mmc_host *mmc)
 326{
 327	/* Re-enable is done upon first request */
 328	return 0;
 329}
 330EXPORT_SYMBOL(cqhci_resume);
 331
 332static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
 333{
 334	struct cqhci_host *cq_host = mmc->cqe_private;
 335	int err;
 336
 337	if (!card->ext_csd.cmdq_en)
 338		return -EINVAL;
 339
 340	if (cq_host->enabled)
 341		return 0;
 342
 343	cq_host->rca = card->rca;
 344
 345	err = cqhci_host_alloc_tdl(cq_host);
 346	if (err) {
 347		pr_err("%s: Failed to enable CQE, error %d\n",
 348		       mmc_hostname(mmc), err);
 349		return err;
 350	}
 351
 352	__cqhci_enable(cq_host);
 353
 354	cq_host->enabled = true;
 355
 356#ifdef DEBUG
 357	cqhci_dumpregs(cq_host);
 358#endif
 359	return 0;
 360}
 361
 362/* CQHCI is idle and should halt immediately, so set a small timeout */
 363#define CQHCI_OFF_TIMEOUT 100
 364
 365static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
 366{
 367	return cqhci_readl(cq_host, CQHCI_CTL);
 368}
 369
 370static void cqhci_off(struct mmc_host *mmc)
 371{
 372	struct cqhci_host *cq_host = mmc->cqe_private;
 373	u32 reg;
 374	int err;
 375
 376	if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
 377		return;
 378
 379	if (cq_host->ops->disable)
 380		cq_host->ops->disable(mmc, false);
 381
 382	cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
 383
 384	err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
 385				 reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
 386	if (err < 0)
 387		pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
 388	else
 389		pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
 390
 391	if (cq_host->ops->post_disable)
 392		cq_host->ops->post_disable(mmc);
 393
 394	mmc->cqe_on = false;
 395}
 396
 397static void cqhci_disable(struct mmc_host *mmc)
 398{
 399	struct cqhci_host *cq_host = mmc->cqe_private;
 400
 401	if (!cq_host->enabled)
 402		return;
 403
 404	cqhci_off(mmc);
 405
 406	__cqhci_disable(cq_host);
 407
 408	dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
 409			   cq_host->trans_desc_base,
 410			   cq_host->trans_desc_dma_base);
 411
 412	dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
 413			   cq_host->desc_base,
 414			   cq_host->desc_dma_base);
 415
 416	cq_host->trans_desc_base = NULL;
 417	cq_host->desc_base = NULL;
 418
 419	cq_host->enabled = false;
 420}
 421
 422static void cqhci_prep_task_desc(struct mmc_request *mrq,
 423				 struct cqhci_host *cq_host, int tag)
 424{
 425	__le64 *task_desc = (__le64 __force *)get_desc(cq_host, tag);
 426	u32 req_flags = mrq->data->flags;
 427	u64 desc0;
 428
 429	desc0 = CQHCI_VALID(1) |
 430		CQHCI_END(1) |
 431		CQHCI_INT(1) |
 432		CQHCI_ACT(0x5) |
 433		CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
 434		CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
 435		CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
 436		CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
 437		CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
 438		CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
 439		CQHCI_BLK_COUNT(mrq->data->blocks) |
 440		CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
 441
 442	task_desc[0] = cpu_to_le64(desc0);
 443
 444	if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
 445		u64 desc1 = cqhci_crypto_prep_task_desc(mrq);
 446
 447		task_desc[1] = cpu_to_le64(desc1);
 448
 449		pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx%016llx\n",
 450			 mmc_hostname(mrq->host), mrq->tag, desc1, desc0);
 451	} else {
 452		pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n",
 453			 mmc_hostname(mrq->host), mrq->tag, desc0);
 454	}
 455}
 456
 457static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
 458{
 459	int sg_count;
 460	struct mmc_data *data = mrq->data;
 461
 462	if (!data)
 463		return -EINVAL;
 464
 465	sg_count = dma_map_sg(mmc_dev(host), data->sg,
 466			      data->sg_len,
 467			      (data->flags & MMC_DATA_WRITE) ?
 468			      DMA_TO_DEVICE : DMA_FROM_DEVICE);
 469	if (!sg_count) {
 470		pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
 471		return -ENOMEM;
 472	}
 473
 474	return sg_count;
 475}
 476
 477static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
 478				bool dma64)
 479{
 480	__le32 *attr = (__le32 __force *)desc;
 481
 482	*attr = (CQHCI_VALID(1) |
 483		 CQHCI_END(end ? 1 : 0) |
 484		 CQHCI_INT(0) |
 485		 CQHCI_ACT(0x4) |
 486		 CQHCI_DAT_LENGTH(len));
 487
 488	if (dma64) {
 489		__le64 *dataddr = (__le64 __force *)(desc + 4);
 490
 491		dataddr[0] = cpu_to_le64(addr);
 492	} else {
 493		__le32 *dataddr = (__le32 __force *)(desc + 4);
 494
 495		dataddr[0] = cpu_to_le32(addr);
 496	}
 497}
 
 498
 499static int cqhci_prep_tran_desc(struct mmc_request *mrq,
 500			       struct cqhci_host *cq_host, int tag)
 501{
 502	struct mmc_data *data = mrq->data;
 503	int i, sg_count, len;
 504	bool end = false;
 505	bool dma64 = cq_host->dma64;
 506	dma_addr_t addr;
 507	u8 *desc;
 508	struct scatterlist *sg;
 509
 510	sg_count = cqhci_dma_map(mrq->host, mrq);
 511	if (sg_count < 0) {
 512		pr_err("%s: %s: unable to map sg lists, %d\n",
 513				mmc_hostname(mrq->host), __func__, sg_count);
 514		return sg_count;
 515	}
 516
 517	desc = get_trans_desc(cq_host, tag);
 518
 519	for_each_sg(data->sg, sg, sg_count, i) {
 520		addr = sg_dma_address(sg);
 521		len = sg_dma_len(sg);
 522
 523		if ((i+1) == sg_count)
 524			end = true;
 525		cqhci_set_tran_desc(desc, addr, len, end, dma64);
 
 
 
 
 526		desc += cq_host->trans_desc_len;
 527	}
 528
 529	return 0;
 530}
 531
 532static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
 533				   struct mmc_request *mrq)
 534{
 535	u64 *task_desc = NULL;
 536	u64 data = 0;
 537	u8 resp_type;
 538	u8 *desc;
 539	__le64 *dataddr;
 540	struct cqhci_host *cq_host = mmc->cqe_private;
 541	u8 timing;
 542
 543	if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
 544		resp_type = 0x0;
 545		timing = 0x1;
 546	} else {
 547		if (mrq->cmd->flags & MMC_RSP_R1B) {
 548			resp_type = 0x3;
 549			timing = 0x0;
 550		} else {
 551			resp_type = 0x2;
 552			timing = 0x1;
 553		}
 554	}
 555
 556	task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
 557	memset(task_desc, 0, cq_host->task_desc_len);
 558	data |= (CQHCI_VALID(1) |
 559		 CQHCI_END(1) |
 560		 CQHCI_INT(1) |
 561		 CQHCI_QBAR(1) |
 562		 CQHCI_ACT(0x5) |
 563		 CQHCI_CMD_INDEX(mrq->cmd->opcode) |
 564		 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
 565	if (cq_host->ops->update_dcmd_desc)
 566		cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
 567	*task_desc |= data;
 568	desc = (u8 *)task_desc;
 569	pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
 570		 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
 571	dataddr = (__le64 __force *)(desc + 4);
 572	dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
 573
 574}
 575
 576static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
 577{
 578	struct mmc_data *data = mrq->data;
 579
 580	if (data) {
 581		dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
 582			     (data->flags & MMC_DATA_READ) ?
 583			     DMA_FROM_DEVICE : DMA_TO_DEVICE);
 584	}
 585}
 586
 587static inline int cqhci_tag(struct mmc_request *mrq)
 588{
 589	return mrq->cmd ? DCMD_SLOT : mrq->tag;
 590}
 591
 592static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 593{
 594	int err = 0;
 595	int tag = cqhci_tag(mrq);
 596	struct cqhci_host *cq_host = mmc->cqe_private;
 597	unsigned long flags;
 598
 599	if (!cq_host->enabled) {
 600		pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
 601		return -EINVAL;
 602	}
 603
 604	/* First request after resume has to re-enable */
 605	if (!cq_host->activated)
 606		__cqhci_enable(cq_host);
 607
 608	if (!mmc->cqe_on) {
 609		if (cq_host->ops->pre_enable)
 610			cq_host->ops->pre_enable(mmc);
 611
 612		cqhci_writel(cq_host, 0, CQHCI_CTL);
 613		mmc->cqe_on = true;
 614		pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
 615		if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
 616			pr_err("%s: cqhci: CQE failed to exit halt state\n",
 617			       mmc_hostname(mmc));
 618		}
 619		if (cq_host->ops->enable)
 620			cq_host->ops->enable(mmc);
 621	}
 622
 623	if (mrq->data) {
 624		cqhci_prep_task_desc(mrq, cq_host, tag);
 625
 626		err = cqhci_prep_tran_desc(mrq, cq_host, tag);
 627		if (err) {
 628			pr_err("%s: cqhci: failed to setup tx desc: %d\n",
 629			       mmc_hostname(mmc), err);
 630			return err;
 631		}
 632	} else {
 633		cqhci_prep_dcmd_desc(mmc, mrq);
 634	}
 635
 636	spin_lock_irqsave(&cq_host->lock, flags);
 637
 638	if (cq_host->recovery_halt) {
 639		err = -EBUSY;
 640		goto out_unlock;
 641	}
 642
 643	cq_host->slot[tag].mrq = mrq;
 644	cq_host->slot[tag].flags = 0;
 645
 646	cq_host->qcnt += 1;
 647	/* Make sure descriptors are ready before ringing the doorbell */
 648	wmb();
 649	cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
 650	if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
 651		pr_debug("%s: cqhci: doorbell not set for tag %d\n",
 652			 mmc_hostname(mmc), tag);
 653out_unlock:
 654	spin_unlock_irqrestore(&cq_host->lock, flags);
 655
 656	if (err)
 657		cqhci_post_req(mmc, mrq);
 658
 659	return err;
 660}
 661
 662static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
 663				  bool notify)
 664{
 665	struct cqhci_host *cq_host = mmc->cqe_private;
 666
 667	if (!cq_host->recovery_halt) {
 668		cq_host->recovery_halt = true;
 669		pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
 670		wake_up(&cq_host->wait_queue);
 671		if (notify && mrq->recovery_notifier)
 672			mrq->recovery_notifier(mrq);
 673	}
 674}
 675
 676static unsigned int cqhci_error_flags(int error1, int error2)
 677{
 678	int error = error1 ? error1 : error2;
 679
 680	switch (error) {
 681	case -EILSEQ:
 682		return CQHCI_HOST_CRC;
 683	case -ETIMEDOUT:
 684		return CQHCI_HOST_TIMEOUT;
 685	default:
 686		return CQHCI_HOST_OTHER;
 687	}
 688}
 689
 690static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
 691			    int data_error)
 692{
 693	struct cqhci_host *cq_host = mmc->cqe_private;
 694	struct cqhci_slot *slot;
 695	u32 terri;
 696	u32 tdpe;
 697	int tag;
 698
 699	spin_lock(&cq_host->lock);
 700
 701	terri = cqhci_readl(cq_host, CQHCI_TERRI);
 702
 703	pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
 704		 mmc_hostname(mmc), status, cmd_error, data_error, terri);
 705
 706	/* Forget about errors when recovery has already been triggered */
 707	if (cq_host->recovery_halt)
 708		goto out_unlock;
 709
 710	if (!cq_host->qcnt) {
 711		WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
 712			  mmc_hostname(mmc), status, cmd_error, data_error,
 713			  terri);
 714		goto out_unlock;
 715	}
 716
 717	if (CQHCI_TERRI_C_VALID(terri)) {
 718		tag = CQHCI_TERRI_C_TASK(terri);
 719		slot = &cq_host->slot[tag];
 720		if (slot->mrq) {
 721			slot->flags = cqhci_error_flags(cmd_error, data_error);
 722			cqhci_recovery_needed(mmc, slot->mrq, true);
 723		}
 724	}
 725
 726	if (CQHCI_TERRI_D_VALID(terri)) {
 727		tag = CQHCI_TERRI_D_TASK(terri);
 728		slot = &cq_host->slot[tag];
 729		if (slot->mrq) {
 730			slot->flags = cqhci_error_flags(data_error, cmd_error);
 731			cqhci_recovery_needed(mmc, slot->mrq, true);
 732		}
 733	}
 734
 735	/*
 736	 * Handle ICCE ("Invalid Crypto Configuration Error").  This should
 737	 * never happen, since the block layer ensures that all crypto-enabled
 738	 * I/O requests have a valid keyslot before they reach the driver.
 739	 *
 740	 * Note that GCE ("General Crypto Error") is different; it already got
 741	 * handled above by checking TERRI.
 742	 */
 743	if (status & CQHCI_IS_ICCE) {
 744		tdpe = cqhci_readl(cq_host, CQHCI_TDPE);
 745		WARN_ONCE(1,
 746			  "%s: cqhci: invalid crypto configuration error. IRQ status: 0x%08x TDPE: 0x%08x\n",
 747			  mmc_hostname(mmc), status, tdpe);
 748		while (tdpe != 0) {
 749			tag = __ffs(tdpe);
 750			tdpe &= ~(1 << tag);
 751			slot = &cq_host->slot[tag];
 752			if (!slot->mrq)
 753				continue;
 754			slot->flags = cqhci_error_flags(data_error, cmd_error);
 755			cqhci_recovery_needed(mmc, slot->mrq, true);
 756		}
 757	}
 758
 759	if (!cq_host->recovery_halt) {
 760		/*
 761		 * The only way to guarantee forward progress is to mark at
 762		 * least one task in error, so if none is indicated, pick one.
 763		 */
 764		for (tag = 0; tag < NUM_SLOTS; tag++) {
 765			slot = &cq_host->slot[tag];
 766			if (!slot->mrq)
 767				continue;
 768			slot->flags = cqhci_error_flags(data_error, cmd_error);
 769			cqhci_recovery_needed(mmc, slot->mrq, true);
 770			break;
 771		}
 772	}
 773
 774out_unlock:
 775	spin_unlock(&cq_host->lock);
 776}
 777
 778static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
 779{
 780	struct cqhci_host *cq_host = mmc->cqe_private;
 781	struct cqhci_slot *slot = &cq_host->slot[tag];
 782	struct mmc_request *mrq = slot->mrq;
 783	struct mmc_data *data;
 784
 785	if (!mrq) {
 786		WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
 787			  mmc_hostname(mmc), tag);
 788		return;
 789	}
 790
 791	/* No completions allowed during recovery */
 792	if (cq_host->recovery_halt) {
 793		slot->flags |= CQHCI_COMPLETED;
 794		return;
 795	}
 796
 797	slot->mrq = NULL;
 798
 799	cq_host->qcnt -= 1;
 800
 801	data = mrq->data;
 802	if (data) {
 803		if (data->error)
 804			data->bytes_xfered = 0;
 805		else
 806			data->bytes_xfered = data->blksz * data->blocks;
 807	}
 808
 809	mmc_cqe_request_done(mmc, mrq);
 810}
 811
 812irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
 813		      int data_error)
 814{
 815	u32 status;
 816	unsigned long tag = 0, comp_status;
 817	struct cqhci_host *cq_host = mmc->cqe_private;
 818
 819	status = cqhci_readl(cq_host, CQHCI_IS);
 820	cqhci_writel(cq_host, status, CQHCI_IS);
 821
 822	pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
 823
 824	if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) ||
 825	    cmd_error || data_error) {
 826		if (status & CQHCI_IS_RED)
 827			mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_RED);
 828		if (status & CQHCI_IS_GCE)
 829			mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_GCE);
 830		if (status & CQHCI_IS_ICCE)
 831			mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_ICCE);
 832		cqhci_error_irq(mmc, status, cmd_error, data_error);
 833	}
 834
 835	if (status & CQHCI_IS_TCC) {
 836		/* read TCN and complete the request */
 837		comp_status = cqhci_readl(cq_host, CQHCI_TCN);
 838		cqhci_writel(cq_host, comp_status, CQHCI_TCN);
 839		pr_debug("%s: cqhci: TCN: 0x%08lx\n",
 840			 mmc_hostname(mmc), comp_status);
 841
 842		spin_lock(&cq_host->lock);
 843
 844		for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
 845			/* complete the corresponding mrq */
 846			pr_debug("%s: cqhci: completing tag %lu\n",
 847				 mmc_hostname(mmc), tag);
 848			cqhci_finish_mrq(mmc, tag);
 849		}
 850
 851		if (cq_host->waiting_for_idle && !cq_host->qcnt) {
 852			cq_host->waiting_for_idle = false;
 853			wake_up(&cq_host->wait_queue);
 854		}
 855
 856		spin_unlock(&cq_host->lock);
 857	}
 858
 859	if (status & CQHCI_IS_TCL)
 860		wake_up(&cq_host->wait_queue);
 861
 862	if (status & CQHCI_IS_HAC)
 863		wake_up(&cq_host->wait_queue);
 864
 865	return IRQ_HANDLED;
 866}
 867EXPORT_SYMBOL(cqhci_irq);
 868
 869static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
 870{
 871	unsigned long flags;
 872	bool is_idle;
 873
 874	spin_lock_irqsave(&cq_host->lock, flags);
 875	is_idle = !cq_host->qcnt || cq_host->recovery_halt;
 876	*ret = cq_host->recovery_halt ? -EBUSY : 0;
 877	cq_host->waiting_for_idle = !is_idle;
 878	spin_unlock_irqrestore(&cq_host->lock, flags);
 879
 880	return is_idle;
 881}
 882
 883static int cqhci_wait_for_idle(struct mmc_host *mmc)
 884{
 885	struct cqhci_host *cq_host = mmc->cqe_private;
 886	int ret;
 887
 888	wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
 889
 890	return ret;
 891}
 892
 893static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
 894			  bool *recovery_needed)
 895{
 896	struct cqhci_host *cq_host = mmc->cqe_private;
 897	int tag = cqhci_tag(mrq);
 898	struct cqhci_slot *slot = &cq_host->slot[tag];
 899	unsigned long flags;
 900	bool timed_out;
 901
 902	spin_lock_irqsave(&cq_host->lock, flags);
 903	timed_out = slot->mrq == mrq;
 904	if (timed_out) {
 905		slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
 906		cqhci_recovery_needed(mmc, mrq, false);
 907		*recovery_needed = cq_host->recovery_halt;
 908	}
 909	spin_unlock_irqrestore(&cq_host->lock, flags);
 910
 911	if (timed_out) {
 912		pr_err("%s: cqhci: timeout for tag %d, qcnt %d\n",
 913		       mmc_hostname(mmc), tag, cq_host->qcnt);
 914		cqhci_dumpregs(cq_host);
 915	}
 916
 917	return timed_out;
 918}
 919
 920static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
 921{
 922	return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
 923}
 924
 925static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
 926{
 927	struct cqhci_host *cq_host = mmc->cqe_private;
 928	bool ret;
 929	u32 ctl;
 930
 931	cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
 932
 933	ctl = cqhci_readl(cq_host, CQHCI_CTL);
 934	ctl |= CQHCI_CLEAR_ALL_TASKS;
 935	cqhci_writel(cq_host, ctl, CQHCI_CTL);
 936
 937	wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
 938			   msecs_to_jiffies(timeout) + 1);
 939
 940	cqhci_set_irqs(cq_host, 0);
 941
 942	ret = cqhci_tasks_cleared(cq_host);
 943
 944	if (!ret)
 945		pr_debug("%s: cqhci: Failed to clear tasks\n",
 946			 mmc_hostname(mmc));
 947
 948	return ret;
 949}
 950
 951static bool cqhci_halted(struct cqhci_host *cq_host)
 952{
 953	return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
 954}
 955
 956static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
 957{
 958	struct cqhci_host *cq_host = mmc->cqe_private;
 959	bool ret;
 960	u32 ctl;
 961
 962	if (cqhci_halted(cq_host))
 963		return true;
 964
 965	cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
 966
 967	ctl = cqhci_readl(cq_host, CQHCI_CTL);
 968	ctl |= CQHCI_HALT;
 969	cqhci_writel(cq_host, ctl, CQHCI_CTL);
 970
 971	wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
 972			   msecs_to_jiffies(timeout) + 1);
 973
 974	cqhci_set_irqs(cq_host, 0);
 975
 976	ret = cqhci_halted(cq_host);
 977
 978	if (!ret)
 979		pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
 980
 981	return ret;
 982}
 983
 984/*
 985 * After halting we expect to be able to use the command line. We interpret the
 986 * failure to halt to mean the data lines might still be in use (and the upper
 987 * layers will need to send a STOP command), so we set the timeout based on a
 988 * generous command timeout.
 989 */
 990#define CQHCI_START_HALT_TIMEOUT	5
 991
 992static void cqhci_recovery_start(struct mmc_host *mmc)
 993{
 994	struct cqhci_host *cq_host = mmc->cqe_private;
 995
 996	pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
 997
 998	WARN_ON(!cq_host->recovery_halt);
 999
1000	cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
1001
1002	if (cq_host->ops->disable)
1003		cq_host->ops->disable(mmc, true);
1004
1005	mmc->cqe_on = false;
1006}
1007
1008static int cqhci_error_from_flags(unsigned int flags)
1009{
1010	if (!flags)
1011		return 0;
1012
1013	/* CRC errors might indicate re-tuning so prefer to report that */
1014	if (flags & CQHCI_HOST_CRC)
1015		return -EILSEQ;
1016
1017	if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
1018		return -ETIMEDOUT;
1019
1020	return -EIO;
1021}
1022
1023static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
1024{
1025	struct cqhci_slot *slot = &cq_host->slot[tag];
1026	struct mmc_request *mrq = slot->mrq;
1027	struct mmc_data *data;
1028
1029	if (!mrq)
1030		return;
1031
1032	slot->mrq = NULL;
1033
1034	cq_host->qcnt -= 1;
1035
1036	data = mrq->data;
1037	if (data) {
1038		data->bytes_xfered = 0;
1039		data->error = cqhci_error_from_flags(slot->flags);
1040	} else {
1041		mrq->cmd->error = cqhci_error_from_flags(slot->flags);
1042	}
1043
1044	mmc_cqe_request_done(cq_host->mmc, mrq);
1045}
1046
1047static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
1048{
1049	int i;
1050
1051	for (i = 0; i < cq_host->num_slots; i++)
1052		cqhci_recover_mrq(cq_host, i);
1053}
1054
1055/*
1056 * By now the command and data lines should be unused so there is no reason for
1057 * CQHCI to take a long time to halt, but if it doesn't halt there could be
1058 * problems clearing tasks, so be generous.
1059 */
1060#define CQHCI_FINISH_HALT_TIMEOUT	20
1061
1062/* CQHCI could be expected to clear it's internal state pretty quickly */
1063#define CQHCI_CLEAR_TIMEOUT		20
1064
1065static void cqhci_recovery_finish(struct mmc_host *mmc)
1066{
1067	struct cqhci_host *cq_host = mmc->cqe_private;
1068	unsigned long flags;
1069	u32 cqcfg;
1070	bool ok;
1071
1072	pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1073
1074	WARN_ON(!cq_host->recovery_halt);
1075
1076	ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1077
1078	if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1079		ok = false;
1080
1081	/*
1082	 * The specification contradicts itself, by saying that tasks cannot be
1083	 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1084	 * be disabled/re-enabled, but not to disable before clearing tasks.
1085	 * Have a go anyway.
1086	 */
1087	if (!ok) {
1088		pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
1089		cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1090		cqcfg &= ~CQHCI_ENABLE;
1091		cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1092		cqcfg |= CQHCI_ENABLE;
1093		cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1094		/* Be sure that there are no tasks */
1095		ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1096		if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1097			ok = false;
1098		WARN_ON(!ok);
1099	}
 
 
 
1100
1101	cqhci_recover_mrqs(cq_host);
1102
1103	WARN_ON(cq_host->qcnt);
1104
1105	spin_lock_irqsave(&cq_host->lock, flags);
1106	cq_host->qcnt = 0;
1107	cq_host->recovery_halt = false;
1108	mmc->cqe_on = false;
1109	spin_unlock_irqrestore(&cq_host->lock, flags);
1110
1111	/* Ensure all writes are done before interrupts are re-enabled */
1112	wmb();
1113
1114	cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
1115
1116	cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
1117
1118	pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
1119}
1120
1121static const struct mmc_cqe_ops cqhci_cqe_ops = {
1122	.cqe_enable = cqhci_enable,
1123	.cqe_disable = cqhci_disable,
1124	.cqe_request = cqhci_request,
1125	.cqe_post_req = cqhci_post_req,
1126	.cqe_off = cqhci_off,
1127	.cqe_wait_for_idle = cqhci_wait_for_idle,
1128	.cqe_timeout = cqhci_timeout,
1129	.cqe_recovery_start = cqhci_recovery_start,
1130	.cqe_recovery_finish = cqhci_recovery_finish,
1131};
1132
1133struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
1134{
1135	struct cqhci_host *cq_host;
1136	struct resource *cqhci_memres = NULL;
1137
1138	/* check and setup CMDQ interface */
1139	cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1140						   "cqhci");
1141	if (!cqhci_memres) {
1142		dev_dbg(&pdev->dev, "CMDQ not supported\n");
1143		return ERR_PTR(-EINVAL);
1144	}
1145
1146	cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1147	if (!cq_host)
1148		return ERR_PTR(-ENOMEM);
1149	cq_host->mmio = devm_ioremap(&pdev->dev,
1150				     cqhci_memres->start,
1151				     resource_size(cqhci_memres));
1152	if (!cq_host->mmio) {
1153		dev_err(&pdev->dev, "failed to remap cqhci regs\n");
1154		return ERR_PTR(-EBUSY);
1155	}
1156	dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
1157
1158	return cq_host;
1159}
1160EXPORT_SYMBOL(cqhci_pltfm_init);
1161
1162static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
1163{
1164	return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
1165}
1166
1167static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
1168{
1169	u32 ver = cqhci_readl(cq_host, CQHCI_VER);
1170
1171	return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
1172}
1173
1174int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
1175	      bool dma64)
1176{
1177	int err;
1178
1179	cq_host->dma64 = dma64;
1180	cq_host->mmc = mmc;
1181	cq_host->mmc->cqe_private = cq_host;
1182
1183	cq_host->num_slots = NUM_SLOTS;
1184	cq_host->dcmd_slot = DCMD_SLOT;
1185
1186	mmc->cqe_ops = &cqhci_cqe_ops;
1187
1188	mmc->cqe_qdepth = NUM_SLOTS;
1189	if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
1190		mmc->cqe_qdepth -= 1;
1191
1192	cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
1193				     sizeof(*cq_host->slot), GFP_KERNEL);
1194	if (!cq_host->slot) {
1195		err = -ENOMEM;
1196		goto out_err;
1197	}
1198
1199	err = cqhci_crypto_init(cq_host);
1200	if (err) {
1201		pr_err("%s: CQHCI crypto initialization failed\n",
1202		       mmc_hostname(mmc));
1203		goto out_err;
1204	}
1205
1206	spin_lock_init(&cq_host->lock);
1207
1208	init_completion(&cq_host->halt_comp);
1209	init_waitqueue_head(&cq_host->wait_queue);
1210
1211	pr_info("%s: CQHCI version %u.%02u\n",
1212		mmc_hostname(mmc), cqhci_ver_major(cq_host),
1213		cqhci_ver_minor(cq_host));
1214
1215	return 0;
1216
1217out_err:
1218	pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1219	       mmc_hostname(mmc), cqhci_ver_major(cq_host),
1220	       cqhci_ver_minor(cq_host), err);
1221	return err;
1222}
1223EXPORT_SYMBOL(cqhci_init);
1224
1225MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1226MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1227MODULE_LICENSE("GPL v2");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
   3 */
   4
   5#include <linux/delay.h>
   6#include <linux/highmem.h>
   7#include <linux/io.h>
   8#include <linux/iopoll.h>
   9#include <linux/module.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/slab.h>
  12#include <linux/scatterlist.h>
  13#include <linux/platform_device.h>
  14#include <linux/ktime.h>
  15
  16#include <linux/mmc/mmc.h>
  17#include <linux/mmc/host.h>
  18#include <linux/mmc/card.h>
  19
  20#include "cqhci.h"
  21#include "cqhci-crypto.h"
  22
  23#define DCMD_SLOT 31
  24#define NUM_SLOTS 32
  25
  26struct cqhci_slot {
  27	struct mmc_request *mrq;
  28	unsigned int flags;
  29#define CQHCI_EXTERNAL_TIMEOUT	BIT(0)
  30#define CQHCI_COMPLETED		BIT(1)
  31#define CQHCI_HOST_CRC		BIT(2)
  32#define CQHCI_HOST_TIMEOUT	BIT(3)
  33#define CQHCI_HOST_OTHER	BIT(4)
  34};
  35
  36static bool cqhci_halted(struct cqhci_host *cq_host)
  37{
  38	return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
  39}
  40
  41static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
  42{
  43	return cq_host->desc_base + (tag * cq_host->slot_sz);
  44}
  45
  46static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
  47{
  48	u8 *desc = get_desc(cq_host, tag);
  49
  50	return desc + cq_host->task_desc_len;
  51}
  52
  53static inline size_t get_trans_desc_offset(struct cqhci_host *cq_host, u8 tag)
  54{
  55	return cq_host->trans_desc_len * cq_host->mmc->max_segs * tag;
  56}
  57
  58static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
  59{
  60	size_t offset = get_trans_desc_offset(cq_host, tag);
  61
  62	return cq_host->trans_desc_dma_base + offset;
  63}
  64
  65static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
  66{
  67	size_t offset = get_trans_desc_offset(cq_host, tag);
  68
  69	return cq_host->trans_desc_base + offset;
  70}
  71
  72static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
  73{
  74	u8 *link_temp;
  75	dma_addr_t trans_temp;
  76
  77	link_temp = get_link_desc(cq_host, tag);
  78	trans_temp = get_trans_desc_dma(cq_host, tag);
  79
  80	memset(link_temp, 0, cq_host->link_desc_len);
  81	if (cq_host->link_desc_len > 8)
  82		*(link_temp + 8) = 0;
  83
  84	if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
  85		*link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
  86		return;
  87	}
  88
  89	*link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
  90
  91	if (cq_host->dma64) {
  92		__le64 *data_addr = (__le64 __force *)(link_temp + 4);
  93
  94		data_addr[0] = cpu_to_le64(trans_temp);
  95	} else {
  96		__le32 *data_addr = (__le32 __force *)(link_temp + 4);
  97
  98		data_addr[0] = cpu_to_le32(trans_temp);
  99	}
 100}
 101
 102static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
 103{
 104	cqhci_writel(cq_host, set, CQHCI_ISTE);
 105	cqhci_writel(cq_host, set, CQHCI_ISGE);
 106}
 107
 108#define DRV_NAME "cqhci"
 109
 110#define CQHCI_DUMP(f, x...) \
 111	pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
 112
 113static void cqhci_dumpregs(struct cqhci_host *cq_host)
 114{
 115	struct mmc_host *mmc = cq_host->mmc;
 116
 117	CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
 118
 119	CQHCI_DUMP("Caps:      0x%08x | Version:  0x%08x\n",
 120		   cqhci_readl(cq_host, CQHCI_CAP),
 121		   cqhci_readl(cq_host, CQHCI_VER));
 122	CQHCI_DUMP("Config:    0x%08x | Control:  0x%08x\n",
 123		   cqhci_readl(cq_host, CQHCI_CFG),
 124		   cqhci_readl(cq_host, CQHCI_CTL));
 125	CQHCI_DUMP("Int stat:  0x%08x | Int enab: 0x%08x\n",
 126		   cqhci_readl(cq_host, CQHCI_IS),
 127		   cqhci_readl(cq_host, CQHCI_ISTE));
 128	CQHCI_DUMP("Int sig:   0x%08x | Int Coal: 0x%08x\n",
 129		   cqhci_readl(cq_host, CQHCI_ISGE),
 130		   cqhci_readl(cq_host, CQHCI_IC));
 131	CQHCI_DUMP("TDL base:  0x%08x | TDL up32: 0x%08x\n",
 132		   cqhci_readl(cq_host, CQHCI_TDLBA),
 133		   cqhci_readl(cq_host, CQHCI_TDLBAU));
 134	CQHCI_DUMP("Doorbell:  0x%08x | TCN:      0x%08x\n",
 135		   cqhci_readl(cq_host, CQHCI_TDBR),
 136		   cqhci_readl(cq_host, CQHCI_TCN));
 137	CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
 138		   cqhci_readl(cq_host, CQHCI_DQS),
 139		   cqhci_readl(cq_host, CQHCI_DPT));
 140	CQHCI_DUMP("Task clr:  0x%08x | SSC1:     0x%08x\n",
 141		   cqhci_readl(cq_host, CQHCI_TCLR),
 142		   cqhci_readl(cq_host, CQHCI_SSC1));
 143	CQHCI_DUMP("SSC2:      0x%08x | DCMD rsp: 0x%08x\n",
 144		   cqhci_readl(cq_host, CQHCI_SSC2),
 145		   cqhci_readl(cq_host, CQHCI_CRDCT));
 146	CQHCI_DUMP("RED mask:  0x%08x | TERRI:    0x%08x\n",
 147		   cqhci_readl(cq_host, CQHCI_RMEM),
 148		   cqhci_readl(cq_host, CQHCI_TERRI));
 149	CQHCI_DUMP("Resp idx:  0x%08x | Resp arg: 0x%08x\n",
 150		   cqhci_readl(cq_host, CQHCI_CRI),
 151		   cqhci_readl(cq_host, CQHCI_CRA));
 152
 153	if (cq_host->ops->dumpregs)
 154		cq_host->ops->dumpregs(mmc);
 155	else
 156		CQHCI_DUMP(": ===========================================\n");
 157}
 158
 159/*
 160 * The allocated descriptor table for task, link & transfer descriptors
 161 * looks like:
 162 * |----------|
 163 * |task desc |  |->|----------|
 164 * |----------|  |  |trans desc|
 165 * |link desc-|->|  |----------|
 166 * |----------|          .
 167 *      .                .
 168 *  no. of slots      max-segs
 169 *      .           |----------|
 170 * |----------|
 171 * The idea here is to create the [task+trans] table and mark & point the
 172 * link desc to the transfer desc table on a per slot basis.
 173 */
 174static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
 175{
 176	int i = 0;
 177
 178	/* task descriptor can be 64/128 bit irrespective of arch */
 179	if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
 180		cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
 181			       CQHCI_TASK_DESC_SZ, CQHCI_CFG);
 182		cq_host->task_desc_len = 16;
 183	} else {
 184		cq_host->task_desc_len = 8;
 185	}
 186
 187	/*
 188	 * 96 bits length of transfer desc instead of 128 bits which means
 189	 * ADMA would expect next valid descriptor at the 96th bit
 190	 * or 128th bit
 191	 */
 192	if (cq_host->dma64) {
 193		if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
 194			cq_host->trans_desc_len = 12;
 195		else
 196			cq_host->trans_desc_len = 16;
 197		cq_host->link_desc_len = 16;
 198	} else {
 199		cq_host->trans_desc_len = 8;
 200		cq_host->link_desc_len = 8;
 201	}
 202
 203	/* total size of a slot: 1 task & 1 transfer (link) */
 204	cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
 205
 206	cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
 207
 208	cq_host->data_size = get_trans_desc_offset(cq_host, cq_host->mmc->cqe_qdepth);
 209
 210	pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
 211		 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
 212		 cq_host->slot_sz);
 213
 214	/*
 215	 * allocate a dma-mapped chunk of memory for the descriptors
 216	 * allocate a dma-mapped chunk of memory for link descriptors
 217	 * setup each link-desc memory offset per slot-number to
 218	 * the descriptor table.
 219	 */
 220	cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 221						 cq_host->desc_size,
 222						 &cq_host->desc_dma_base,
 223						 GFP_KERNEL);
 224	if (!cq_host->desc_base)
 225		return -ENOMEM;
 226
 227	cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 228					      cq_host->data_size,
 229					      &cq_host->trans_desc_dma_base,
 230					      GFP_KERNEL);
 231	if (!cq_host->trans_desc_base) {
 232		dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
 233				   cq_host->desc_base,
 234				   cq_host->desc_dma_base);
 235		cq_host->desc_base = NULL;
 236		cq_host->desc_dma_base = 0;
 237		return -ENOMEM;
 238	}
 239
 240	pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
 241		 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
 242		(unsigned long long)cq_host->desc_dma_base,
 243		(unsigned long long)cq_host->trans_desc_dma_base);
 244
 245	for (; i < (cq_host->num_slots); i++)
 246		setup_trans_desc(cq_host, i);
 247
 248	return 0;
 249}
 250
 251static void __cqhci_enable(struct cqhci_host *cq_host)
 252{
 253	struct mmc_host *mmc = cq_host->mmc;
 254	u32 cqcfg;
 255
 256	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
 257
 258	/* Configuration must not be changed while enabled */
 259	if (cqcfg & CQHCI_ENABLE) {
 260		cqcfg &= ~CQHCI_ENABLE;
 261		cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 262	}
 263
 264	cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
 265
 266	if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
 267		cqcfg |= CQHCI_DCMD;
 268
 269	if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
 270		cqcfg |= CQHCI_TASK_DESC_SZ;
 271
 272	if (mmc->caps2 & MMC_CAP2_CRYPTO)
 273		cqcfg |= CQHCI_CRYPTO_GENERAL_ENABLE;
 274
 275	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 276
 277	cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
 278		     CQHCI_TDLBA);
 279	cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
 280		     CQHCI_TDLBAU);
 281
 282	cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
 283
 284	cqhci_set_irqs(cq_host, 0);
 285
 286	cqcfg |= CQHCI_ENABLE;
 287
 288	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 289
 290	if (cqhci_halted(cq_host))
 291		cqhci_writel(cq_host, 0, CQHCI_CTL);
 292
 293	mmc->cqe_on = true;
 294
 295	if (cq_host->ops->enable)
 296		cq_host->ops->enable(mmc);
 297
 298	/* Ensure all writes are done before interrupts are enabled */
 299	wmb();
 300
 301	cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
 302
 303	cq_host->activated = true;
 304}
 305
 306static void __cqhci_disable(struct cqhci_host *cq_host)
 307{
 308	u32 cqcfg;
 309
 310	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
 311	cqcfg &= ~CQHCI_ENABLE;
 312	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 313
 314	cq_host->mmc->cqe_on = false;
 315
 316	cq_host->activated = false;
 317}
 318
 319int cqhci_deactivate(struct mmc_host *mmc)
 320{
 321	struct cqhci_host *cq_host = mmc->cqe_private;
 322
 323	if (cq_host->enabled && cq_host->activated)
 324		__cqhci_disable(cq_host);
 325
 326	return 0;
 327}
 328EXPORT_SYMBOL(cqhci_deactivate);
 329
 330int cqhci_resume(struct mmc_host *mmc)
 331{
 332	/* Re-enable is done upon first request */
 333	return 0;
 334}
 335EXPORT_SYMBOL(cqhci_resume);
 336
 337static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
 338{
 339	struct cqhci_host *cq_host = mmc->cqe_private;
 340	int err;
 341
 342	if (!card->ext_csd.cmdq_en)
 343		return -EINVAL;
 344
 345	if (cq_host->enabled)
 346		return 0;
 347
 348	cq_host->rca = card->rca;
 349
 350	err = cqhci_host_alloc_tdl(cq_host);
 351	if (err) {
 352		pr_err("%s: Failed to enable CQE, error %d\n",
 353		       mmc_hostname(mmc), err);
 354		return err;
 355	}
 356
 357	__cqhci_enable(cq_host);
 358
 359	cq_host->enabled = true;
 360
 361#ifdef DEBUG
 362	cqhci_dumpregs(cq_host);
 363#endif
 364	return 0;
 365}
 366
 367/* CQHCI is idle and should halt immediately, so set a small timeout */
 368#define CQHCI_OFF_TIMEOUT 100
 369
 370static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
 371{
 372	return cqhci_readl(cq_host, CQHCI_CTL);
 373}
 374
 375static void cqhci_off(struct mmc_host *mmc)
 376{
 377	struct cqhci_host *cq_host = mmc->cqe_private;
 378	u32 reg;
 379	int err;
 380
 381	if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
 382		return;
 383
 384	if (cq_host->ops->disable)
 385		cq_host->ops->disable(mmc, false);
 386
 387	cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
 388
 389	err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
 390				 reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
 391	if (err < 0)
 392		pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
 393	else
 394		pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
 395
 396	if (cq_host->ops->post_disable)
 397		cq_host->ops->post_disable(mmc);
 398
 399	mmc->cqe_on = false;
 400}
 401
 402static void cqhci_disable(struct mmc_host *mmc)
 403{
 404	struct cqhci_host *cq_host = mmc->cqe_private;
 405
 406	if (!cq_host->enabled)
 407		return;
 408
 409	cqhci_off(mmc);
 410
 411	__cqhci_disable(cq_host);
 412
 413	dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
 414			   cq_host->trans_desc_base,
 415			   cq_host->trans_desc_dma_base);
 416
 417	dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
 418			   cq_host->desc_base,
 419			   cq_host->desc_dma_base);
 420
 421	cq_host->trans_desc_base = NULL;
 422	cq_host->desc_base = NULL;
 423
 424	cq_host->enabled = false;
 425}
 426
 427static void cqhci_prep_task_desc(struct mmc_request *mrq,
 428				 struct cqhci_host *cq_host, int tag)
 429{
 430	__le64 *task_desc = (__le64 __force *)get_desc(cq_host, tag);
 431	u32 req_flags = mrq->data->flags;
 432	u64 desc0;
 433
 434	desc0 = CQHCI_VALID(1) |
 435		CQHCI_END(1) |
 436		CQHCI_INT(1) |
 437		CQHCI_ACT(0x5) |
 438		CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
 439		CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
 440		CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
 441		CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
 442		CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
 443		CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
 444		CQHCI_BLK_COUNT(mrq->data->blocks) |
 445		CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
 446
 447	task_desc[0] = cpu_to_le64(desc0);
 448
 449	if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
 450		u64 desc1 = cqhci_crypto_prep_task_desc(mrq);
 451
 452		task_desc[1] = cpu_to_le64(desc1);
 453
 454		pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx%016llx\n",
 455			 mmc_hostname(mrq->host), mrq->tag, desc1, desc0);
 456	} else {
 457		pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n",
 458			 mmc_hostname(mrq->host), mrq->tag, desc0);
 459	}
 460}
 461
 462static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
 463{
 464	int sg_count;
 465	struct mmc_data *data = mrq->data;
 466
 467	if (!data)
 468		return -EINVAL;
 469
 470	sg_count = dma_map_sg(mmc_dev(host), data->sg,
 471			      data->sg_len,
 472			      (data->flags & MMC_DATA_WRITE) ?
 473			      DMA_TO_DEVICE : DMA_FROM_DEVICE);
 474	if (!sg_count) {
 475		pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
 476		return -ENOMEM;
 477	}
 478
 479	return sg_count;
 480}
 481
 482void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
 483			 bool dma64)
 484{
 485	__le32 *attr = (__le32 __force *)desc;
 486
 487	*attr = (CQHCI_VALID(1) |
 488		 CQHCI_END(end ? 1 : 0) |
 489		 CQHCI_INT(0) |
 490		 CQHCI_ACT(0x4) |
 491		 CQHCI_DAT_LENGTH(len));
 492
 493	if (dma64) {
 494		__le64 *dataddr = (__le64 __force *)(desc + 4);
 495
 496		dataddr[0] = cpu_to_le64(addr);
 497	} else {
 498		__le32 *dataddr = (__le32 __force *)(desc + 4);
 499
 500		dataddr[0] = cpu_to_le32(addr);
 501	}
 502}
 503EXPORT_SYMBOL(cqhci_set_tran_desc);
 504
 505static int cqhci_prep_tran_desc(struct mmc_request *mrq,
 506			       struct cqhci_host *cq_host, int tag)
 507{
 508	struct mmc_data *data = mrq->data;
 509	int i, sg_count, len;
 510	bool end = false;
 511	bool dma64 = cq_host->dma64;
 512	dma_addr_t addr;
 513	u8 *desc;
 514	struct scatterlist *sg;
 515
 516	sg_count = cqhci_dma_map(mrq->host, mrq);
 517	if (sg_count < 0) {
 518		pr_err("%s: %s: unable to map sg lists, %d\n",
 519				mmc_hostname(mrq->host), __func__, sg_count);
 520		return sg_count;
 521	}
 522
 523	desc = get_trans_desc(cq_host, tag);
 524
 525	for_each_sg(data->sg, sg, sg_count, i) {
 526		addr = sg_dma_address(sg);
 527		len = sg_dma_len(sg);
 528
 529		if ((i+1) == sg_count)
 530			end = true;
 531		if (cq_host->ops->set_tran_desc)
 532			cq_host->ops->set_tran_desc(cq_host, &desc, addr, len, end, dma64);
 533		else
 534			cqhci_set_tran_desc(desc, addr, len, end, dma64);
 535
 536		desc += cq_host->trans_desc_len;
 537	}
 538
 539	return 0;
 540}
 541
 542static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
 543				   struct mmc_request *mrq)
 544{
 545	u64 *task_desc = NULL;
 546	u64 data = 0;
 547	u8 resp_type;
 548	u8 *desc;
 549	__le64 *dataddr;
 550	struct cqhci_host *cq_host = mmc->cqe_private;
 551	u8 timing;
 552
 553	if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
 554		resp_type = 0x0;
 555		timing = 0x1;
 556	} else {
 557		if (mrq->cmd->flags & MMC_RSP_R1B) {
 558			resp_type = 0x3;
 559			timing = 0x0;
 560		} else {
 561			resp_type = 0x2;
 562			timing = 0x1;
 563		}
 564	}
 565
 566	task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
 567	memset(task_desc, 0, cq_host->task_desc_len);
 568	data |= (CQHCI_VALID(1) |
 569		 CQHCI_END(1) |
 570		 CQHCI_INT(1) |
 571		 CQHCI_QBAR(1) |
 572		 CQHCI_ACT(0x5) |
 573		 CQHCI_CMD_INDEX(mrq->cmd->opcode) |
 574		 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
 575	if (cq_host->ops->update_dcmd_desc)
 576		cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
 577	*task_desc |= data;
 578	desc = (u8 *)task_desc;
 579	pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
 580		 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
 581	dataddr = (__le64 __force *)(desc + 4);
 582	dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
 583
 584}
 585
 586static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
 587{
 588	struct mmc_data *data = mrq->data;
 589
 590	if (data) {
 591		dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
 592			     (data->flags & MMC_DATA_READ) ?
 593			     DMA_FROM_DEVICE : DMA_TO_DEVICE);
 594	}
 595}
 596
 597static inline int cqhci_tag(struct mmc_request *mrq)
 598{
 599	return mrq->cmd ? DCMD_SLOT : mrq->tag;
 600}
 601
 602static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 603{
 604	int err = 0;
 605	int tag = cqhci_tag(mrq);
 606	struct cqhci_host *cq_host = mmc->cqe_private;
 607	unsigned long flags;
 608
 609	if (!cq_host->enabled) {
 610		pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
 611		return -EINVAL;
 612	}
 613
 614	/* First request after resume has to re-enable */
 615	if (!cq_host->activated)
 616		__cqhci_enable(cq_host);
 617
 618	if (!mmc->cqe_on) {
 619		if (cq_host->ops->pre_enable)
 620			cq_host->ops->pre_enable(mmc);
 621
 622		cqhci_writel(cq_host, 0, CQHCI_CTL);
 623		mmc->cqe_on = true;
 624		pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
 625		if (cqhci_halted(cq_host)) {
 626			pr_err("%s: cqhci: CQE failed to exit halt state\n",
 627			       mmc_hostname(mmc));
 628		}
 629		if (cq_host->ops->enable)
 630			cq_host->ops->enable(mmc);
 631	}
 632
 633	if (mrq->data) {
 634		cqhci_prep_task_desc(mrq, cq_host, tag);
 635
 636		err = cqhci_prep_tran_desc(mrq, cq_host, tag);
 637		if (err) {
 638			pr_err("%s: cqhci: failed to setup tx desc: %d\n",
 639			       mmc_hostname(mmc), err);
 640			return err;
 641		}
 642	} else {
 643		cqhci_prep_dcmd_desc(mmc, mrq);
 644	}
 645
 646	spin_lock_irqsave(&cq_host->lock, flags);
 647
 648	if (cq_host->recovery_halt) {
 649		err = -EBUSY;
 650		goto out_unlock;
 651	}
 652
 653	cq_host->slot[tag].mrq = mrq;
 654	cq_host->slot[tag].flags = 0;
 655
 656	cq_host->qcnt += 1;
 657	/* Make sure descriptors are ready before ringing the doorbell */
 658	wmb();
 659	cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
 660	if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
 661		pr_debug("%s: cqhci: doorbell not set for tag %d\n",
 662			 mmc_hostname(mmc), tag);
 663out_unlock:
 664	spin_unlock_irqrestore(&cq_host->lock, flags);
 665
 666	if (err)
 667		cqhci_post_req(mmc, mrq);
 668
 669	return err;
 670}
 671
 672static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
 673				  bool notify)
 674{
 675	struct cqhci_host *cq_host = mmc->cqe_private;
 676
 677	if (!cq_host->recovery_halt) {
 678		cq_host->recovery_halt = true;
 679		pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
 680		wake_up(&cq_host->wait_queue);
 681		if (notify && mrq->recovery_notifier)
 682			mrq->recovery_notifier(mrq);
 683	}
 684}
 685
 686static unsigned int cqhci_error_flags(int error1, int error2)
 687{
 688	int error = error1 ? error1 : error2;
 689
 690	switch (error) {
 691	case -EILSEQ:
 692		return CQHCI_HOST_CRC;
 693	case -ETIMEDOUT:
 694		return CQHCI_HOST_TIMEOUT;
 695	default:
 696		return CQHCI_HOST_OTHER;
 697	}
 698}
 699
 700static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
 701			    int data_error)
 702{
 703	struct cqhci_host *cq_host = mmc->cqe_private;
 704	struct cqhci_slot *slot;
 705	u32 terri;
 706	u32 tdpe;
 707	int tag;
 708
 709	spin_lock(&cq_host->lock);
 710
 711	terri = cqhci_readl(cq_host, CQHCI_TERRI);
 712
 713	pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
 714		 mmc_hostname(mmc), status, cmd_error, data_error, terri);
 715
 716	/* Forget about errors when recovery has already been triggered */
 717	if (cq_host->recovery_halt)
 718		goto out_unlock;
 719
 720	if (!cq_host->qcnt) {
 721		WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
 722			  mmc_hostname(mmc), status, cmd_error, data_error,
 723			  terri);
 724		goto out_unlock;
 725	}
 726
 727	if (CQHCI_TERRI_C_VALID(terri)) {
 728		tag = CQHCI_TERRI_C_TASK(terri);
 729		slot = &cq_host->slot[tag];
 730		if (slot->mrq) {
 731			slot->flags = cqhci_error_flags(cmd_error, data_error);
 732			cqhci_recovery_needed(mmc, slot->mrq, true);
 733		}
 734	}
 735
 736	if (CQHCI_TERRI_D_VALID(terri)) {
 737		tag = CQHCI_TERRI_D_TASK(terri);
 738		slot = &cq_host->slot[tag];
 739		if (slot->mrq) {
 740			slot->flags = cqhci_error_flags(data_error, cmd_error);
 741			cqhci_recovery_needed(mmc, slot->mrq, true);
 742		}
 743	}
 744
 745	/*
 746	 * Handle ICCE ("Invalid Crypto Configuration Error").  This should
 747	 * never happen, since the block layer ensures that all crypto-enabled
 748	 * I/O requests have a valid keyslot before they reach the driver.
 749	 *
 750	 * Note that GCE ("General Crypto Error") is different; it already got
 751	 * handled above by checking TERRI.
 752	 */
 753	if (status & CQHCI_IS_ICCE) {
 754		tdpe = cqhci_readl(cq_host, CQHCI_TDPE);
 755		WARN_ONCE(1,
 756			  "%s: cqhci: invalid crypto configuration error. IRQ status: 0x%08x TDPE: 0x%08x\n",
 757			  mmc_hostname(mmc), status, tdpe);
 758		while (tdpe != 0) {
 759			tag = __ffs(tdpe);
 760			tdpe &= ~(1 << tag);
 761			slot = &cq_host->slot[tag];
 762			if (!slot->mrq)
 763				continue;
 764			slot->flags = cqhci_error_flags(data_error, cmd_error);
 765			cqhci_recovery_needed(mmc, slot->mrq, true);
 766		}
 767	}
 768
 769	if (!cq_host->recovery_halt) {
 770		/*
 771		 * The only way to guarantee forward progress is to mark at
 772		 * least one task in error, so if none is indicated, pick one.
 773		 */
 774		for (tag = 0; tag < NUM_SLOTS; tag++) {
 775			slot = &cq_host->slot[tag];
 776			if (!slot->mrq)
 777				continue;
 778			slot->flags = cqhci_error_flags(data_error, cmd_error);
 779			cqhci_recovery_needed(mmc, slot->mrq, true);
 780			break;
 781		}
 782	}
 783
 784out_unlock:
 785	spin_unlock(&cq_host->lock);
 786}
 787
 788static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
 789{
 790	struct cqhci_host *cq_host = mmc->cqe_private;
 791	struct cqhci_slot *slot = &cq_host->slot[tag];
 792	struct mmc_request *mrq = slot->mrq;
 793	struct mmc_data *data;
 794
 795	if (!mrq) {
 796		WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
 797			  mmc_hostname(mmc), tag);
 798		return;
 799	}
 800
 801	/* No completions allowed during recovery */
 802	if (cq_host->recovery_halt) {
 803		slot->flags |= CQHCI_COMPLETED;
 804		return;
 805	}
 806
 807	slot->mrq = NULL;
 808
 809	cq_host->qcnt -= 1;
 810
 811	data = mrq->data;
 812	if (data) {
 813		if (data->error)
 814			data->bytes_xfered = 0;
 815		else
 816			data->bytes_xfered = data->blksz * data->blocks;
 817	}
 818
 819	mmc_cqe_request_done(mmc, mrq);
 820}
 821
 822irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
 823		      int data_error)
 824{
 825	u32 status;
 826	unsigned long tag = 0, comp_status;
 827	struct cqhci_host *cq_host = mmc->cqe_private;
 828
 829	status = cqhci_readl(cq_host, CQHCI_IS);
 830	cqhci_writel(cq_host, status, CQHCI_IS);
 831
 832	pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
 833
 834	if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) ||
 835	    cmd_error || data_error) {
 836		if (status & CQHCI_IS_RED)
 837			mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_RED);
 838		if (status & CQHCI_IS_GCE)
 839			mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_GCE);
 840		if (status & CQHCI_IS_ICCE)
 841			mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_ICCE);
 842		cqhci_error_irq(mmc, status, cmd_error, data_error);
 843	}
 844
 845	if (status & CQHCI_IS_TCC) {
 846		/* read TCN and complete the request */
 847		comp_status = cqhci_readl(cq_host, CQHCI_TCN);
 848		cqhci_writel(cq_host, comp_status, CQHCI_TCN);
 849		pr_debug("%s: cqhci: TCN: 0x%08lx\n",
 850			 mmc_hostname(mmc), comp_status);
 851
 852		spin_lock(&cq_host->lock);
 853
 854		for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
 855			/* complete the corresponding mrq */
 856			pr_debug("%s: cqhci: completing tag %lu\n",
 857				 mmc_hostname(mmc), tag);
 858			cqhci_finish_mrq(mmc, tag);
 859		}
 860
 861		if (cq_host->waiting_for_idle && !cq_host->qcnt) {
 862			cq_host->waiting_for_idle = false;
 863			wake_up(&cq_host->wait_queue);
 864		}
 865
 866		spin_unlock(&cq_host->lock);
 867	}
 868
 869	if (status & CQHCI_IS_TCL)
 870		wake_up(&cq_host->wait_queue);
 871
 872	if (status & CQHCI_IS_HAC)
 873		wake_up(&cq_host->wait_queue);
 874
 875	return IRQ_HANDLED;
 876}
 877EXPORT_SYMBOL(cqhci_irq);
 878
 879static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
 880{
 881	unsigned long flags;
 882	bool is_idle;
 883
 884	spin_lock_irqsave(&cq_host->lock, flags);
 885	is_idle = !cq_host->qcnt || cq_host->recovery_halt;
 886	*ret = cq_host->recovery_halt ? -EBUSY : 0;
 887	cq_host->waiting_for_idle = !is_idle;
 888	spin_unlock_irqrestore(&cq_host->lock, flags);
 889
 890	return is_idle;
 891}
 892
 893static int cqhci_wait_for_idle(struct mmc_host *mmc)
 894{
 895	struct cqhci_host *cq_host = mmc->cqe_private;
 896	int ret;
 897
 898	wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
 899
 900	return ret;
 901}
 902
 903static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
 904			  bool *recovery_needed)
 905{
 906	struct cqhci_host *cq_host = mmc->cqe_private;
 907	int tag = cqhci_tag(mrq);
 908	struct cqhci_slot *slot = &cq_host->slot[tag];
 909	unsigned long flags;
 910	bool timed_out;
 911
 912	spin_lock_irqsave(&cq_host->lock, flags);
 913	timed_out = slot->mrq == mrq;
 914	if (timed_out) {
 915		slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
 916		cqhci_recovery_needed(mmc, mrq, false);
 917		*recovery_needed = cq_host->recovery_halt;
 918	}
 919	spin_unlock_irqrestore(&cq_host->lock, flags);
 920
 921	if (timed_out) {
 922		pr_err("%s: cqhci: timeout for tag %d, qcnt %d\n",
 923		       mmc_hostname(mmc), tag, cq_host->qcnt);
 924		cqhci_dumpregs(cq_host);
 925	}
 926
 927	return timed_out;
 928}
 929
 930static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
 931{
 932	return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
 933}
 934
 935static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
 936{
 937	struct cqhci_host *cq_host = mmc->cqe_private;
 938	bool ret;
 939	u32 ctl;
 940
 941	cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
 942
 943	ctl = cqhci_readl(cq_host, CQHCI_CTL);
 944	ctl |= CQHCI_CLEAR_ALL_TASKS;
 945	cqhci_writel(cq_host, ctl, CQHCI_CTL);
 946
 947	wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
 948			   msecs_to_jiffies(timeout) + 1);
 949
 950	cqhci_set_irqs(cq_host, 0);
 951
 952	ret = cqhci_tasks_cleared(cq_host);
 953
 954	if (!ret)
 955		pr_warn("%s: cqhci: Failed to clear tasks\n",
 956			mmc_hostname(mmc));
 957
 958	return ret;
 959}
 960
 
 
 
 
 
 961static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
 962{
 963	struct cqhci_host *cq_host = mmc->cqe_private;
 964	bool ret;
 965	u32 ctl;
 966
 967	if (cqhci_halted(cq_host))
 968		return true;
 969
 970	cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
 971
 972	ctl = cqhci_readl(cq_host, CQHCI_CTL);
 973	ctl |= CQHCI_HALT;
 974	cqhci_writel(cq_host, ctl, CQHCI_CTL);
 975
 976	wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
 977			   msecs_to_jiffies(timeout) + 1);
 978
 979	cqhci_set_irqs(cq_host, 0);
 980
 981	ret = cqhci_halted(cq_host);
 982
 983	if (!ret)
 984		pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
 985
 986	return ret;
 987}
 988
 989/*
 990 * After halting we expect to be able to use the command line. We interpret the
 991 * failure to halt to mean the data lines might still be in use (and the upper
 992 * layers will need to send a STOP command), however failing to halt complicates
 993 * the recovery, so set a timeout that would reasonably allow I/O to complete.
 994 */
 995#define CQHCI_START_HALT_TIMEOUT	500
 996
 997static void cqhci_recovery_start(struct mmc_host *mmc)
 998{
 999	struct cqhci_host *cq_host = mmc->cqe_private;
1000
1001	pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1002
1003	WARN_ON(!cq_host->recovery_halt);
1004
1005	cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
1006
1007	if (cq_host->ops->disable)
1008		cq_host->ops->disable(mmc, true);
1009
1010	mmc->cqe_on = false;
1011}
1012
1013static int cqhci_error_from_flags(unsigned int flags)
1014{
1015	if (!flags)
1016		return 0;
1017
1018	/* CRC errors might indicate re-tuning so prefer to report that */
1019	if (flags & CQHCI_HOST_CRC)
1020		return -EILSEQ;
1021
1022	if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
1023		return -ETIMEDOUT;
1024
1025	return -EIO;
1026}
1027
1028static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
1029{
1030	struct cqhci_slot *slot = &cq_host->slot[tag];
1031	struct mmc_request *mrq = slot->mrq;
1032	struct mmc_data *data;
1033
1034	if (!mrq)
1035		return;
1036
1037	slot->mrq = NULL;
1038
1039	cq_host->qcnt -= 1;
1040
1041	data = mrq->data;
1042	if (data) {
1043		data->bytes_xfered = 0;
1044		data->error = cqhci_error_from_flags(slot->flags);
1045	} else {
1046		mrq->cmd->error = cqhci_error_from_flags(slot->flags);
1047	}
1048
1049	mmc_cqe_request_done(cq_host->mmc, mrq);
1050}
1051
1052static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
1053{
1054	int i;
1055
1056	for (i = 0; i < cq_host->num_slots; i++)
1057		cqhci_recover_mrq(cq_host, i);
1058}
1059
1060/*
1061 * By now the command and data lines should be unused so there is no reason for
1062 * CQHCI to take a long time to halt, but if it doesn't halt there could be
1063 * problems clearing tasks, so be generous.
1064 */
1065#define CQHCI_FINISH_HALT_TIMEOUT	20
1066
1067/* CQHCI could be expected to clear it's internal state pretty quickly */
1068#define CQHCI_CLEAR_TIMEOUT		20
1069
1070static void cqhci_recovery_finish(struct mmc_host *mmc)
1071{
1072	struct cqhci_host *cq_host = mmc->cqe_private;
1073	unsigned long flags;
1074	u32 cqcfg;
1075	bool ok;
1076
1077	pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1078
1079	WARN_ON(!cq_host->recovery_halt);
1080
1081	ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1082
 
 
 
1083	/*
1084	 * The specification contradicts itself, by saying that tasks cannot be
1085	 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1086	 * be disabled/re-enabled, but not to disable before clearing tasks.
1087	 * Have a go anyway.
1088	 */
1089	if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1090		ok = false;
1091
1092	/* Disable to make sure tasks really are cleared */
1093	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1094	cqcfg &= ~CQHCI_ENABLE;
1095	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1096
1097	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1098	cqcfg |= CQHCI_ENABLE;
1099	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1100
1101	cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1102
1103	if (!ok)
1104		cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
1105
1106	cqhci_recover_mrqs(cq_host);
1107
1108	WARN_ON(cq_host->qcnt);
1109
1110	spin_lock_irqsave(&cq_host->lock, flags);
1111	cq_host->qcnt = 0;
1112	cq_host->recovery_halt = false;
1113	mmc->cqe_on = false;
1114	spin_unlock_irqrestore(&cq_host->lock, flags);
1115
1116	/* Ensure all writes are done before interrupts are re-enabled */
1117	wmb();
1118
1119	cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
1120
1121	cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
1122
1123	pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
1124}
1125
1126static const struct mmc_cqe_ops cqhci_cqe_ops = {
1127	.cqe_enable = cqhci_enable,
1128	.cqe_disable = cqhci_disable,
1129	.cqe_request = cqhci_request,
1130	.cqe_post_req = cqhci_post_req,
1131	.cqe_off = cqhci_off,
1132	.cqe_wait_for_idle = cqhci_wait_for_idle,
1133	.cqe_timeout = cqhci_timeout,
1134	.cqe_recovery_start = cqhci_recovery_start,
1135	.cqe_recovery_finish = cqhci_recovery_finish,
1136};
1137
1138struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
1139{
1140	struct cqhci_host *cq_host;
1141	struct resource *cqhci_memres = NULL;
1142
1143	/* check and setup CMDQ interface */
1144	cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1145						   "cqhci");
1146	if (!cqhci_memres) {
1147		dev_dbg(&pdev->dev, "CMDQ not supported\n");
1148		return ERR_PTR(-EINVAL);
1149	}
1150
1151	cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1152	if (!cq_host)
1153		return ERR_PTR(-ENOMEM);
1154	cq_host->mmio = devm_ioremap(&pdev->dev,
1155				     cqhci_memres->start,
1156				     resource_size(cqhci_memres));
1157	if (!cq_host->mmio) {
1158		dev_err(&pdev->dev, "failed to remap cqhci regs\n");
1159		return ERR_PTR(-EBUSY);
1160	}
1161	dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
1162
1163	return cq_host;
1164}
1165EXPORT_SYMBOL(cqhci_pltfm_init);
1166
1167static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
1168{
1169	return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
1170}
1171
1172static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
1173{
1174	u32 ver = cqhci_readl(cq_host, CQHCI_VER);
1175
1176	return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
1177}
1178
1179int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
1180	      bool dma64)
1181{
1182	int err;
1183
1184	cq_host->dma64 = dma64;
1185	cq_host->mmc = mmc;
1186	cq_host->mmc->cqe_private = cq_host;
1187
1188	cq_host->num_slots = NUM_SLOTS;
1189	cq_host->dcmd_slot = DCMD_SLOT;
1190
1191	mmc->cqe_ops = &cqhci_cqe_ops;
1192
1193	mmc->cqe_qdepth = NUM_SLOTS;
1194	if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
1195		mmc->cqe_qdepth -= 1;
1196
1197	cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
1198				     sizeof(*cq_host->slot), GFP_KERNEL);
1199	if (!cq_host->slot) {
1200		err = -ENOMEM;
1201		goto out_err;
1202	}
1203
1204	err = cqhci_crypto_init(cq_host);
1205	if (err) {
1206		pr_err("%s: CQHCI crypto initialization failed\n",
1207		       mmc_hostname(mmc));
1208		goto out_err;
1209	}
1210
1211	spin_lock_init(&cq_host->lock);
1212
1213	init_completion(&cq_host->halt_comp);
1214	init_waitqueue_head(&cq_host->wait_queue);
1215
1216	pr_info("%s: CQHCI version %u.%02u\n",
1217		mmc_hostname(mmc), cqhci_ver_major(cq_host),
1218		cqhci_ver_minor(cq_host));
1219
1220	return 0;
1221
1222out_err:
1223	pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1224	       mmc_hostname(mmc), cqhci_ver_major(cq_host),
1225	       cqhci_ver_minor(cq_host), err);
1226	return err;
1227}
1228EXPORT_SYMBOL(cqhci_init);
1229
1230MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1231MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1232MODULE_LICENSE("GPL v2");