Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
   3 */
   4
   5#include <linux/delay.h>
   6#include <linux/highmem.h>
   7#include <linux/io.h>
   8#include <linux/module.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/slab.h>
  11#include <linux/scatterlist.h>
  12#include <linux/platform_device.h>
  13#include <linux/ktime.h>
  14
  15#include <linux/mmc/mmc.h>
  16#include <linux/mmc/host.h>
  17#include <linux/mmc/card.h>
  18
  19#include "cqhci.h"
  20
  21#define DCMD_SLOT 31
  22#define NUM_SLOTS 32
  23
  24struct cqhci_slot {
  25	struct mmc_request *mrq;
  26	unsigned int flags;
  27#define CQHCI_EXTERNAL_TIMEOUT	BIT(0)
  28#define CQHCI_COMPLETED		BIT(1)
  29#define CQHCI_HOST_CRC		BIT(2)
  30#define CQHCI_HOST_TIMEOUT	BIT(3)
  31#define CQHCI_HOST_OTHER	BIT(4)
  32};
  33
  34static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
  35{
  36	return cq_host->desc_base + (tag * cq_host->slot_sz);
  37}
  38
  39static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
  40{
  41	u8 *desc = get_desc(cq_host, tag);
  42
  43	return desc + cq_host->task_desc_len;
  44}
  45
  46static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
  47{
  48	return cq_host->trans_desc_dma_base +
  49		(cq_host->mmc->max_segs * tag *
  50		 cq_host->trans_desc_len);
  51}
  52
  53static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
  54{
  55	return cq_host->trans_desc_base +
  56		(cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
  57}
  58
  59static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
  60{
  61	u8 *link_temp;
  62	dma_addr_t trans_temp;
  63
  64	link_temp = get_link_desc(cq_host, tag);
  65	trans_temp = get_trans_desc_dma(cq_host, tag);
  66
  67	memset(link_temp, 0, cq_host->link_desc_len);
  68	if (cq_host->link_desc_len > 8)
  69		*(link_temp + 8) = 0;
  70
  71	if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
  72		*link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
  73		return;
  74	}
  75
  76	*link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
  77
  78	if (cq_host->dma64) {
  79		__le64 *data_addr = (__le64 __force *)(link_temp + 4);
  80
  81		data_addr[0] = cpu_to_le64(trans_temp);
  82	} else {
  83		__le32 *data_addr = (__le32 __force *)(link_temp + 4);
  84
  85		data_addr[0] = cpu_to_le32(trans_temp);
  86	}
  87}
  88
  89static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
  90{
  91	cqhci_writel(cq_host, set, CQHCI_ISTE);
  92	cqhci_writel(cq_host, set, CQHCI_ISGE);
  93}
  94
  95#define DRV_NAME "cqhci"
  96
  97#define CQHCI_DUMP(f, x...) \
  98	pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
  99
 100static void cqhci_dumpregs(struct cqhci_host *cq_host)
 101{
 102	struct mmc_host *mmc = cq_host->mmc;
 103
 104	CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
 105
 106	CQHCI_DUMP("Caps:      0x%08x | Version:  0x%08x\n",
 107		   cqhci_readl(cq_host, CQHCI_CAP),
 108		   cqhci_readl(cq_host, CQHCI_VER));
 109	CQHCI_DUMP("Config:    0x%08x | Control:  0x%08x\n",
 110		   cqhci_readl(cq_host, CQHCI_CFG),
 111		   cqhci_readl(cq_host, CQHCI_CTL));
 112	CQHCI_DUMP("Int stat:  0x%08x | Int enab: 0x%08x\n",
 113		   cqhci_readl(cq_host, CQHCI_IS),
 114		   cqhci_readl(cq_host, CQHCI_ISTE));
 115	CQHCI_DUMP("Int sig:   0x%08x | Int Coal: 0x%08x\n",
 116		   cqhci_readl(cq_host, CQHCI_ISGE),
 117		   cqhci_readl(cq_host, CQHCI_IC));
 118	CQHCI_DUMP("TDL base:  0x%08x | TDL up32: 0x%08x\n",
 119		   cqhci_readl(cq_host, CQHCI_TDLBA),
 120		   cqhci_readl(cq_host, CQHCI_TDLBAU));
 121	CQHCI_DUMP("Doorbell:  0x%08x | TCN:      0x%08x\n",
 122		   cqhci_readl(cq_host, CQHCI_TDBR),
 123		   cqhci_readl(cq_host, CQHCI_TCN));
 124	CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
 125		   cqhci_readl(cq_host, CQHCI_DQS),
 126		   cqhci_readl(cq_host, CQHCI_DPT));
 127	CQHCI_DUMP("Task clr:  0x%08x | SSC1:     0x%08x\n",
 128		   cqhci_readl(cq_host, CQHCI_TCLR),
 129		   cqhci_readl(cq_host, CQHCI_SSC1));
 130	CQHCI_DUMP("SSC2:      0x%08x | DCMD rsp: 0x%08x\n",
 131		   cqhci_readl(cq_host, CQHCI_SSC2),
 132		   cqhci_readl(cq_host, CQHCI_CRDCT));
 133	CQHCI_DUMP("RED mask:  0x%08x | TERRI:    0x%08x\n",
 134		   cqhci_readl(cq_host, CQHCI_RMEM),
 135		   cqhci_readl(cq_host, CQHCI_TERRI));
 136	CQHCI_DUMP("Resp idx:  0x%08x | Resp arg: 0x%08x\n",
 137		   cqhci_readl(cq_host, CQHCI_CRI),
 138		   cqhci_readl(cq_host, CQHCI_CRA));
 139
 140	if (cq_host->ops->dumpregs)
 141		cq_host->ops->dumpregs(mmc);
 142	else
 143		CQHCI_DUMP(": ===========================================\n");
 144}
 145
 146/**
 147 * The allocated descriptor table for task, link & transfer descritors
 148 * looks like:
 149 * |----------|
 150 * |task desc |  |->|----------|
 151 * |----------|  |  |trans desc|
 152 * |link desc-|->|  |----------|
 153 * |----------|          .
 154 *      .                .
 155 *  no. of slots      max-segs
 156 *      .           |----------|
 157 * |----------|
 158 * The idea here is to create the [task+trans] table and mark & point the
 159 * link desc to the transfer desc table on a per slot basis.
 160 */
 161static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
 162{
 163	int i = 0;
 164
 165	/* task descriptor can be 64/128 bit irrespective of arch */
 166	if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
 167		cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
 168			       CQHCI_TASK_DESC_SZ, CQHCI_CFG);
 169		cq_host->task_desc_len = 16;
 170	} else {
 171		cq_host->task_desc_len = 8;
 172	}
 173
 174	/*
 175	 * 96 bits length of transfer desc instead of 128 bits which means
 176	 * ADMA would expect next valid descriptor at the 96th bit
 177	 * or 128th bit
 178	 */
 179	if (cq_host->dma64) {
 180		if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
 181			cq_host->trans_desc_len = 12;
 182		else
 183			cq_host->trans_desc_len = 16;
 184		cq_host->link_desc_len = 16;
 185	} else {
 186		cq_host->trans_desc_len = 8;
 187		cq_host->link_desc_len = 8;
 188	}
 189
 190	/* total size of a slot: 1 task & 1 transfer (link) */
 191	cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
 192
 193	cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
 194
 195	cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
 196		cq_host->mmc->cqe_qdepth;
 197
 198	pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
 199		 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
 200		 cq_host->slot_sz);
 201
 202	/*
 203	 * allocate a dma-mapped chunk of memory for the descriptors
 204	 * allocate a dma-mapped chunk of memory for link descriptors
 205	 * setup each link-desc memory offset per slot-number to
 206	 * the descriptor table.
 207	 */
 208	cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 209						 cq_host->desc_size,
 210						 &cq_host->desc_dma_base,
 211						 GFP_KERNEL);
 212	if (!cq_host->desc_base)
 213		return -ENOMEM;
 214
 215	cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 216					      cq_host->data_size,
 217					      &cq_host->trans_desc_dma_base,
 218					      GFP_KERNEL);
 219	if (!cq_host->trans_desc_base) {
 220		dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
 221				   cq_host->desc_base,
 222				   cq_host->desc_dma_base);
 223		cq_host->desc_base = NULL;
 224		cq_host->desc_dma_base = 0;
 225		return -ENOMEM;
 226	}
 227
 228	pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
 229		 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
 230		(unsigned long long)cq_host->desc_dma_base,
 231		(unsigned long long)cq_host->trans_desc_dma_base);
 232
 233	for (; i < (cq_host->num_slots); i++)
 234		setup_trans_desc(cq_host, i);
 235
 236	return 0;
 237}
 238
 239static void __cqhci_enable(struct cqhci_host *cq_host)
 240{
 241	struct mmc_host *mmc = cq_host->mmc;
 242	u32 cqcfg;
 243
 244	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
 245
 246	/* Configuration must not be changed while enabled */
 247	if (cqcfg & CQHCI_ENABLE) {
 248		cqcfg &= ~CQHCI_ENABLE;
 249		cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 250	}
 251
 252	cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
 253
 254	if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
 255		cqcfg |= CQHCI_DCMD;
 256
 257	if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
 258		cqcfg |= CQHCI_TASK_DESC_SZ;
 259
 260	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 261
 262	cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
 263		     CQHCI_TDLBA);
 264	cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
 265		     CQHCI_TDLBAU);
 266
 267	cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
 268
 269	cqhci_set_irqs(cq_host, 0);
 270
 271	cqcfg |= CQHCI_ENABLE;
 272
 273	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 274
 275	mmc->cqe_on = true;
 276
 277	if (cq_host->ops->enable)
 278		cq_host->ops->enable(mmc);
 279
 280	/* Ensure all writes are done before interrupts are enabled */
 281	wmb();
 282
 283	cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
 284
 285	cq_host->activated = true;
 286}
 287
 288static void __cqhci_disable(struct cqhci_host *cq_host)
 289{
 290	u32 cqcfg;
 291
 292	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
 293	cqcfg &= ~CQHCI_ENABLE;
 294	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 295
 296	cq_host->mmc->cqe_on = false;
 297
 298	cq_host->activated = false;
 299}
 300
 301int cqhci_suspend(struct mmc_host *mmc)
 302{
 303	struct cqhci_host *cq_host = mmc->cqe_private;
 304
 305	if (cq_host->enabled)
 306		__cqhci_disable(cq_host);
 307
 308	return 0;
 309}
 310EXPORT_SYMBOL(cqhci_suspend);
 311
 312int cqhci_resume(struct mmc_host *mmc)
 313{
 314	/* Re-enable is done upon first request */
 315	return 0;
 316}
 317EXPORT_SYMBOL(cqhci_resume);
 318
 319static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
 320{
 321	struct cqhci_host *cq_host = mmc->cqe_private;
 322	int err;
 323
 324	if (cq_host->enabled)
 325		return 0;
 326
 327	cq_host->rca = card->rca;
 328
 329	err = cqhci_host_alloc_tdl(cq_host);
 330	if (err)
 331		return err;
 332
 333	__cqhci_enable(cq_host);
 334
 335	cq_host->enabled = true;
 336
 337#ifdef DEBUG
 338	cqhci_dumpregs(cq_host);
 339#endif
 340	return 0;
 341}
 342
 343/* CQHCI is idle and should halt immediately, so set a small timeout */
 344#define CQHCI_OFF_TIMEOUT 100
 345
 346static void cqhci_off(struct mmc_host *mmc)
 347{
 348	struct cqhci_host *cq_host = mmc->cqe_private;
 349	ktime_t timeout;
 350	bool timed_out;
 351	u32 reg;
 352
 353	if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
 354		return;
 355
 356	if (cq_host->ops->disable)
 357		cq_host->ops->disable(mmc, false);
 358
 359	cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
 360
 361	timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
 362	while (1) {
 363		timed_out = ktime_compare(ktime_get(), timeout) > 0;
 364		reg = cqhci_readl(cq_host, CQHCI_CTL);
 365		if ((reg & CQHCI_HALT) || timed_out)
 366			break;
 367	}
 368
 369	if (timed_out)
 370		pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
 371	else
 372		pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
 373
 374	mmc->cqe_on = false;
 375}
 376
 377static void cqhci_disable(struct mmc_host *mmc)
 378{
 379	struct cqhci_host *cq_host = mmc->cqe_private;
 380
 381	if (!cq_host->enabled)
 382		return;
 383
 384	cqhci_off(mmc);
 385
 386	__cqhci_disable(cq_host);
 387
 388	dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
 389			   cq_host->trans_desc_base,
 390			   cq_host->trans_desc_dma_base);
 391
 392	dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
 393			   cq_host->desc_base,
 394			   cq_host->desc_dma_base);
 395
 396	cq_host->trans_desc_base = NULL;
 397	cq_host->desc_base = NULL;
 398
 399	cq_host->enabled = false;
 400}
 401
 402static void cqhci_prep_task_desc(struct mmc_request *mrq,
 403					u64 *data, bool intr)
 404{
 405	u32 req_flags = mrq->data->flags;
 406
 407	*data = CQHCI_VALID(1) |
 408		CQHCI_END(1) |
 409		CQHCI_INT(intr) |
 410		CQHCI_ACT(0x5) |
 411		CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
 412		CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
 413		CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
 414		CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
 415		CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
 416		CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
 417		CQHCI_BLK_COUNT(mrq->data->blocks) |
 418		CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
 419
 420	pr_debug("%s: cqhci: tag %d task descriptor 0x016%llx\n",
 421		 mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data);
 422}
 423
 424static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
 425{
 426	int sg_count;
 427	struct mmc_data *data = mrq->data;
 428
 429	if (!data)
 430		return -EINVAL;
 431
 432	sg_count = dma_map_sg(mmc_dev(host), data->sg,
 433			      data->sg_len,
 434			      (data->flags & MMC_DATA_WRITE) ?
 435			      DMA_TO_DEVICE : DMA_FROM_DEVICE);
 436	if (!sg_count) {
 437		pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
 438		return -ENOMEM;
 439	}
 440
 441	return sg_count;
 442}
 443
 444static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
 445				bool dma64)
 446{
 447	__le32 *attr = (__le32 __force *)desc;
 448
 449	*attr = (CQHCI_VALID(1) |
 450		 CQHCI_END(end ? 1 : 0) |
 451		 CQHCI_INT(0) |
 452		 CQHCI_ACT(0x4) |
 453		 CQHCI_DAT_LENGTH(len));
 454
 455	if (dma64) {
 456		__le64 *dataddr = (__le64 __force *)(desc + 4);
 457
 458		dataddr[0] = cpu_to_le64(addr);
 459	} else {
 460		__le32 *dataddr = (__le32 __force *)(desc + 4);
 461
 462		dataddr[0] = cpu_to_le32(addr);
 463	}
 464}
 465
 466static int cqhci_prep_tran_desc(struct mmc_request *mrq,
 467			       struct cqhci_host *cq_host, int tag)
 468{
 469	struct mmc_data *data = mrq->data;
 470	int i, sg_count, len;
 471	bool end = false;
 472	bool dma64 = cq_host->dma64;
 473	dma_addr_t addr;
 474	u8 *desc;
 475	struct scatterlist *sg;
 476
 477	sg_count = cqhci_dma_map(mrq->host, mrq);
 478	if (sg_count < 0) {
 479		pr_err("%s: %s: unable to map sg lists, %d\n",
 480				mmc_hostname(mrq->host), __func__, sg_count);
 481		return sg_count;
 482	}
 483
 484	desc = get_trans_desc(cq_host, tag);
 485
 486	for_each_sg(data->sg, sg, sg_count, i) {
 487		addr = sg_dma_address(sg);
 488		len = sg_dma_len(sg);
 489
 490		if ((i+1) == sg_count)
 491			end = true;
 492		cqhci_set_tran_desc(desc, addr, len, end, dma64);
 493		desc += cq_host->trans_desc_len;
 494	}
 495
 496	return 0;
 497}
 498
 499static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
 500				   struct mmc_request *mrq)
 501{
 502	u64 *task_desc = NULL;
 503	u64 data = 0;
 504	u8 resp_type;
 505	u8 *desc;
 506	__le64 *dataddr;
 507	struct cqhci_host *cq_host = mmc->cqe_private;
 508	u8 timing;
 509
 510	if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
 511		resp_type = 0x0;
 512		timing = 0x1;
 513	} else {
 514		if (mrq->cmd->flags & MMC_RSP_R1B) {
 515			resp_type = 0x3;
 516			timing = 0x0;
 517		} else {
 518			resp_type = 0x2;
 519			timing = 0x1;
 520		}
 521	}
 522
 523	task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
 524	memset(task_desc, 0, cq_host->task_desc_len);
 525	data |= (CQHCI_VALID(1) |
 526		 CQHCI_END(1) |
 527		 CQHCI_INT(1) |
 528		 CQHCI_QBAR(1) |
 529		 CQHCI_ACT(0x5) |
 530		 CQHCI_CMD_INDEX(mrq->cmd->opcode) |
 531		 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
 532	if (cq_host->ops->update_dcmd_desc)
 533		cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
 534	*task_desc |= data;
 535	desc = (u8 *)task_desc;
 536	pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
 537		 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
 538	dataddr = (__le64 __force *)(desc + 4);
 539	dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
 540
 541}
 542
 543static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
 544{
 545	struct mmc_data *data = mrq->data;
 546
 547	if (data) {
 548		dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
 549			     (data->flags & MMC_DATA_READ) ?
 550			     DMA_FROM_DEVICE : DMA_TO_DEVICE);
 551	}
 552}
 553
 554static inline int cqhci_tag(struct mmc_request *mrq)
 555{
 556	return mrq->cmd ? DCMD_SLOT : mrq->tag;
 557}
 558
 559static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 560{
 561	int err = 0;
 562	u64 data = 0;
 563	u64 *task_desc = NULL;
 564	int tag = cqhci_tag(mrq);
 565	struct cqhci_host *cq_host = mmc->cqe_private;
 566	unsigned long flags;
 567
 568	if (!cq_host->enabled) {
 569		pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
 570		return -EINVAL;
 571	}
 572
 573	/* First request after resume has to re-enable */
 574	if (!cq_host->activated)
 575		__cqhci_enable(cq_host);
 576
 577	if (!mmc->cqe_on) {
 578		cqhci_writel(cq_host, 0, CQHCI_CTL);
 579		mmc->cqe_on = true;
 580		pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
 581		if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
 582			pr_err("%s: cqhci: CQE failed to exit halt state\n",
 583			       mmc_hostname(mmc));
 584		}
 585		if (cq_host->ops->enable)
 586			cq_host->ops->enable(mmc);
 587	}
 588
 589	if (mrq->data) {
 590		task_desc = (__le64 __force *)get_desc(cq_host, tag);
 591		cqhci_prep_task_desc(mrq, &data, 1);
 592		*task_desc = cpu_to_le64(data);
 593		err = cqhci_prep_tran_desc(mrq, cq_host, tag);
 594		if (err) {
 595			pr_err("%s: cqhci: failed to setup tx desc: %d\n",
 596			       mmc_hostname(mmc), err);
 597			return err;
 598		}
 599	} else {
 600		cqhci_prep_dcmd_desc(mmc, mrq);
 601	}
 602
 603	spin_lock_irqsave(&cq_host->lock, flags);
 604
 605	if (cq_host->recovery_halt) {
 606		err = -EBUSY;
 607		goto out_unlock;
 608	}
 609
 610	cq_host->slot[tag].mrq = mrq;
 611	cq_host->slot[tag].flags = 0;
 612
 613	cq_host->qcnt += 1;
 614	/* Make sure descriptors are ready before ringing the doorbell */
 615	wmb();
 616	cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
 617	if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
 618		pr_debug("%s: cqhci: doorbell not set for tag %d\n",
 619			 mmc_hostname(mmc), tag);
 620out_unlock:
 621	spin_unlock_irqrestore(&cq_host->lock, flags);
 622
 623	if (err)
 624		cqhci_post_req(mmc, mrq);
 625
 626	return err;
 627}
 628
 629static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
 630				  bool notify)
 631{
 632	struct cqhci_host *cq_host = mmc->cqe_private;
 633
 634	if (!cq_host->recovery_halt) {
 635		cq_host->recovery_halt = true;
 636		pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
 637		wake_up(&cq_host->wait_queue);
 638		if (notify && mrq->recovery_notifier)
 639			mrq->recovery_notifier(mrq);
 640	}
 641}
 642
 643static unsigned int cqhci_error_flags(int error1, int error2)
 644{
 645	int error = error1 ? error1 : error2;
 646
 647	switch (error) {
 648	case -EILSEQ:
 649		return CQHCI_HOST_CRC;
 650	case -ETIMEDOUT:
 651		return CQHCI_HOST_TIMEOUT;
 652	default:
 653		return CQHCI_HOST_OTHER;
 654	}
 655}
 656
 657static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
 658			    int data_error)
 659{
 660	struct cqhci_host *cq_host = mmc->cqe_private;
 661	struct cqhci_slot *slot;
 662	u32 terri;
 663	int tag;
 664
 665	spin_lock(&cq_host->lock);
 666
 667	terri = cqhci_readl(cq_host, CQHCI_TERRI);
 668
 669	pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
 670		 mmc_hostname(mmc), status, cmd_error, data_error, terri);
 671
 672	/* Forget about errors when recovery has already been triggered */
 673	if (cq_host->recovery_halt)
 674		goto out_unlock;
 675
 676	if (!cq_host->qcnt) {
 677		WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
 678			  mmc_hostname(mmc), status, cmd_error, data_error,
 679			  terri);
 680		goto out_unlock;
 681	}
 682
 683	if (CQHCI_TERRI_C_VALID(terri)) {
 684		tag = CQHCI_TERRI_C_TASK(terri);
 685		slot = &cq_host->slot[tag];
 686		if (slot->mrq) {
 687			slot->flags = cqhci_error_flags(cmd_error, data_error);
 688			cqhci_recovery_needed(mmc, slot->mrq, true);
 689		}
 690	}
 691
 692	if (CQHCI_TERRI_D_VALID(terri)) {
 693		tag = CQHCI_TERRI_D_TASK(terri);
 694		slot = &cq_host->slot[tag];
 695		if (slot->mrq) {
 696			slot->flags = cqhci_error_flags(data_error, cmd_error);
 697			cqhci_recovery_needed(mmc, slot->mrq, true);
 698		}
 699	}
 700
 701	if (!cq_host->recovery_halt) {
 702		/*
 703		 * The only way to guarantee forward progress is to mark at
 704		 * least one task in error, so if none is indicated, pick one.
 705		 */
 706		for (tag = 0; tag < NUM_SLOTS; tag++) {
 707			slot = &cq_host->slot[tag];
 708			if (!slot->mrq)
 709				continue;
 710			slot->flags = cqhci_error_flags(data_error, cmd_error);
 711			cqhci_recovery_needed(mmc, slot->mrq, true);
 712			break;
 713		}
 714	}
 715
 716out_unlock:
 717	spin_unlock(&cq_host->lock);
 718}
 719
 720static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
 721{
 722	struct cqhci_host *cq_host = mmc->cqe_private;
 723	struct cqhci_slot *slot = &cq_host->slot[tag];
 724	struct mmc_request *mrq = slot->mrq;
 725	struct mmc_data *data;
 726
 727	if (!mrq) {
 728		WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
 729			  mmc_hostname(mmc), tag);
 730		return;
 731	}
 732
 733	/* No completions allowed during recovery */
 734	if (cq_host->recovery_halt) {
 735		slot->flags |= CQHCI_COMPLETED;
 736		return;
 737	}
 738
 739	slot->mrq = NULL;
 740
 741	cq_host->qcnt -= 1;
 742
 743	data = mrq->data;
 744	if (data) {
 745		if (data->error)
 746			data->bytes_xfered = 0;
 747		else
 748			data->bytes_xfered = data->blksz * data->blocks;
 749	}
 750
 751	mmc_cqe_request_done(mmc, mrq);
 752}
 753
 754irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
 755		      int data_error)
 756{
 757	u32 status;
 758	unsigned long tag = 0, comp_status;
 759	struct cqhci_host *cq_host = mmc->cqe_private;
 760
 761	status = cqhci_readl(cq_host, CQHCI_IS);
 762	cqhci_writel(cq_host, status, CQHCI_IS);
 763
 764	pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
 765
 766	if ((status & CQHCI_IS_RED) || cmd_error || data_error)
 767		cqhci_error_irq(mmc, status, cmd_error, data_error);
 768
 769	if (status & CQHCI_IS_TCC) {
 770		/* read TCN and complete the request */
 771		comp_status = cqhci_readl(cq_host, CQHCI_TCN);
 772		cqhci_writel(cq_host, comp_status, CQHCI_TCN);
 773		pr_debug("%s: cqhci: TCN: 0x%08lx\n",
 774			 mmc_hostname(mmc), comp_status);
 775
 776		spin_lock(&cq_host->lock);
 777
 778		for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
 779			/* complete the corresponding mrq */
 780			pr_debug("%s: cqhci: completing tag %lu\n",
 781				 mmc_hostname(mmc), tag);
 782			cqhci_finish_mrq(mmc, tag);
 783		}
 784
 785		if (cq_host->waiting_for_idle && !cq_host->qcnt) {
 786			cq_host->waiting_for_idle = false;
 787			wake_up(&cq_host->wait_queue);
 788		}
 789
 790		spin_unlock(&cq_host->lock);
 791	}
 792
 793	if (status & CQHCI_IS_TCL)
 794		wake_up(&cq_host->wait_queue);
 795
 796	if (status & CQHCI_IS_HAC)
 797		wake_up(&cq_host->wait_queue);
 798
 799	return IRQ_HANDLED;
 800}
 801EXPORT_SYMBOL(cqhci_irq);
 802
 803static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
 804{
 805	unsigned long flags;
 806	bool is_idle;
 807
 808	spin_lock_irqsave(&cq_host->lock, flags);
 809	is_idle = !cq_host->qcnt || cq_host->recovery_halt;
 810	*ret = cq_host->recovery_halt ? -EBUSY : 0;
 811	cq_host->waiting_for_idle = !is_idle;
 812	spin_unlock_irqrestore(&cq_host->lock, flags);
 813
 814	return is_idle;
 815}
 816
 817static int cqhci_wait_for_idle(struct mmc_host *mmc)
 818{
 819	struct cqhci_host *cq_host = mmc->cqe_private;
 820	int ret;
 821
 822	wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
 823
 824	return ret;
 825}
 826
 827static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
 828			  bool *recovery_needed)
 829{
 830	struct cqhci_host *cq_host = mmc->cqe_private;
 831	int tag = cqhci_tag(mrq);
 832	struct cqhci_slot *slot = &cq_host->slot[tag];
 833	unsigned long flags;
 834	bool timed_out;
 835
 836	spin_lock_irqsave(&cq_host->lock, flags);
 837	timed_out = slot->mrq == mrq;
 838	if (timed_out) {
 839		slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
 840		cqhci_recovery_needed(mmc, mrq, false);
 841		*recovery_needed = cq_host->recovery_halt;
 842	}
 843	spin_unlock_irqrestore(&cq_host->lock, flags);
 844
 845	if (timed_out) {
 846		pr_err("%s: cqhci: timeout for tag %d\n",
 847		       mmc_hostname(mmc), tag);
 848		cqhci_dumpregs(cq_host);
 849	}
 850
 851	return timed_out;
 852}
 853
 854static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
 855{
 856	return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
 857}
 858
 859static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
 860{
 861	struct cqhci_host *cq_host = mmc->cqe_private;
 862	bool ret;
 863	u32 ctl;
 864
 865	cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
 866
 867	ctl = cqhci_readl(cq_host, CQHCI_CTL);
 868	ctl |= CQHCI_CLEAR_ALL_TASKS;
 869	cqhci_writel(cq_host, ctl, CQHCI_CTL);
 870
 871	wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
 872			   msecs_to_jiffies(timeout) + 1);
 873
 874	cqhci_set_irqs(cq_host, 0);
 875
 876	ret = cqhci_tasks_cleared(cq_host);
 877
 878	if (!ret)
 879		pr_debug("%s: cqhci: Failed to clear tasks\n",
 880			 mmc_hostname(mmc));
 881
 882	return ret;
 883}
 884
 885static bool cqhci_halted(struct cqhci_host *cq_host)
 886{
 887	return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
 888}
 889
 890static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
 891{
 892	struct cqhci_host *cq_host = mmc->cqe_private;
 893	bool ret;
 894	u32 ctl;
 895
 896	if (cqhci_halted(cq_host))
 897		return true;
 898
 899	cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
 900
 901	ctl = cqhci_readl(cq_host, CQHCI_CTL);
 902	ctl |= CQHCI_HALT;
 903	cqhci_writel(cq_host, ctl, CQHCI_CTL);
 904
 905	wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
 906			   msecs_to_jiffies(timeout) + 1);
 907
 908	cqhci_set_irqs(cq_host, 0);
 909
 910	ret = cqhci_halted(cq_host);
 911
 912	if (!ret)
 913		pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
 914
 915	return ret;
 916}
 917
 918/*
 919 * After halting we expect to be able to use the command line. We interpret the
 920 * failure to halt to mean the data lines might still be in use (and the upper
 921 * layers will need to send a STOP command), so we set the timeout based on a
 922 * generous command timeout.
 923 */
 924#define CQHCI_START_HALT_TIMEOUT	5
 925
 926static void cqhci_recovery_start(struct mmc_host *mmc)
 927{
 928	struct cqhci_host *cq_host = mmc->cqe_private;
 929
 930	pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
 931
 932	WARN_ON(!cq_host->recovery_halt);
 933
 934	cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
 935
 936	if (cq_host->ops->disable)
 937		cq_host->ops->disable(mmc, true);
 938
 939	mmc->cqe_on = false;
 940}
 941
 942static int cqhci_error_from_flags(unsigned int flags)
 943{
 944	if (!flags)
 945		return 0;
 946
 947	/* CRC errors might indicate re-tuning so prefer to report that */
 948	if (flags & CQHCI_HOST_CRC)
 949		return -EILSEQ;
 950
 951	if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
 952		return -ETIMEDOUT;
 953
 954	return -EIO;
 955}
 956
 957static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
 958{
 959	struct cqhci_slot *slot = &cq_host->slot[tag];
 960	struct mmc_request *mrq = slot->mrq;
 961	struct mmc_data *data;
 962
 963	if (!mrq)
 964		return;
 965
 966	slot->mrq = NULL;
 967
 968	cq_host->qcnt -= 1;
 969
 970	data = mrq->data;
 971	if (data) {
 972		data->bytes_xfered = 0;
 973		data->error = cqhci_error_from_flags(slot->flags);
 974	} else {
 975		mrq->cmd->error = cqhci_error_from_flags(slot->flags);
 976	}
 977
 978	mmc_cqe_request_done(cq_host->mmc, mrq);
 979}
 980
 981static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
 982{
 983	int i;
 984
 985	for (i = 0; i < cq_host->num_slots; i++)
 986		cqhci_recover_mrq(cq_host, i);
 987}
 988
 989/*
 990 * By now the command and data lines should be unused so there is no reason for
 991 * CQHCI to take a long time to halt, but if it doesn't halt there could be
 992 * problems clearing tasks, so be generous.
 993 */
 994#define CQHCI_FINISH_HALT_TIMEOUT	20
 995
 996/* CQHCI could be expected to clear it's internal state pretty quickly */
 997#define CQHCI_CLEAR_TIMEOUT		20
 998
 999static void cqhci_recovery_finish(struct mmc_host *mmc)
1000{
1001	struct cqhci_host *cq_host = mmc->cqe_private;
1002	unsigned long flags;
1003	u32 cqcfg;
1004	bool ok;
1005
1006	pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1007
1008	WARN_ON(!cq_host->recovery_halt);
1009
1010	ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1011
1012	if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1013		ok = false;
1014
1015	/*
1016	 * The specification contradicts itself, by saying that tasks cannot be
1017	 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1018	 * be disabled/re-enabled, but not to disable before clearing tasks.
1019	 * Have a go anyway.
1020	 */
1021	if (!ok) {
1022		pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
1023		cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1024		cqcfg &= ~CQHCI_ENABLE;
1025		cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1026		cqcfg |= CQHCI_ENABLE;
1027		cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1028		/* Be sure that there are no tasks */
1029		ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1030		if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1031			ok = false;
1032		WARN_ON(!ok);
1033	}
1034
1035	cqhci_recover_mrqs(cq_host);
1036
1037	WARN_ON(cq_host->qcnt);
1038
1039	spin_lock_irqsave(&cq_host->lock, flags);
1040	cq_host->qcnt = 0;
1041	cq_host->recovery_halt = false;
1042	mmc->cqe_on = false;
1043	spin_unlock_irqrestore(&cq_host->lock, flags);
1044
1045	/* Ensure all writes are done before interrupts are re-enabled */
1046	wmb();
1047
1048	cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
1049
1050	cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
1051
1052	pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
1053}
1054
1055static const struct mmc_cqe_ops cqhci_cqe_ops = {
1056	.cqe_enable = cqhci_enable,
1057	.cqe_disable = cqhci_disable,
1058	.cqe_request = cqhci_request,
1059	.cqe_post_req = cqhci_post_req,
1060	.cqe_off = cqhci_off,
1061	.cqe_wait_for_idle = cqhci_wait_for_idle,
1062	.cqe_timeout = cqhci_timeout,
1063	.cqe_recovery_start = cqhci_recovery_start,
1064	.cqe_recovery_finish = cqhci_recovery_finish,
1065};
1066
1067struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
1068{
1069	struct cqhci_host *cq_host;
1070	struct resource *cqhci_memres = NULL;
1071
1072	/* check and setup CMDQ interface */
1073	cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1074						   "cqhci_mem");
1075	if (!cqhci_memres) {
1076		dev_dbg(&pdev->dev, "CMDQ not supported\n");
1077		return ERR_PTR(-EINVAL);
1078	}
1079
1080	cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1081	if (!cq_host)
1082		return ERR_PTR(-ENOMEM);
1083	cq_host->mmio = devm_ioremap(&pdev->dev,
1084				     cqhci_memres->start,
1085				     resource_size(cqhci_memres));
1086	if (!cq_host->mmio) {
1087		dev_err(&pdev->dev, "failed to remap cqhci regs\n");
1088		return ERR_PTR(-EBUSY);
1089	}
1090	dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
1091
1092	return cq_host;
1093}
1094EXPORT_SYMBOL(cqhci_pltfm_init);
1095
1096static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
1097{
1098	return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
1099}
1100
1101static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
1102{
1103	u32 ver = cqhci_readl(cq_host, CQHCI_VER);
1104
1105	return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
1106}
1107
1108int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
1109	      bool dma64)
1110{
1111	int err;
1112
1113	cq_host->dma64 = dma64;
1114	cq_host->mmc = mmc;
1115	cq_host->mmc->cqe_private = cq_host;
1116
1117	cq_host->num_slots = NUM_SLOTS;
1118	cq_host->dcmd_slot = DCMD_SLOT;
1119
1120	mmc->cqe_ops = &cqhci_cqe_ops;
1121
1122	mmc->cqe_qdepth = NUM_SLOTS;
1123	if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
1124		mmc->cqe_qdepth -= 1;
1125
1126	cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
1127				     sizeof(*cq_host->slot), GFP_KERNEL);
1128	if (!cq_host->slot) {
1129		err = -ENOMEM;
1130		goto out_err;
1131	}
1132
1133	spin_lock_init(&cq_host->lock);
1134
1135	init_completion(&cq_host->halt_comp);
1136	init_waitqueue_head(&cq_host->wait_queue);
1137
1138	pr_info("%s: CQHCI version %u.%02u\n",
1139		mmc_hostname(mmc), cqhci_ver_major(cq_host),
1140		cqhci_ver_minor(cq_host));
1141
1142	return 0;
1143
1144out_err:
1145	pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1146	       mmc_hostname(mmc), cqhci_ver_major(cq_host),
1147	       cqhci_ver_minor(cq_host), err);
1148	return err;
1149}
1150EXPORT_SYMBOL(cqhci_init);
1151
1152MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1153MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1154MODULE_LICENSE("GPL v2");