Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * Shared part of driver for MMC/SDHC controller on Cavium OCTEON and
   3 * ThunderX SOCs.
   4 *
   5 * This file is subject to the terms and conditions of the GNU General Public
   6 * License.  See the file "COPYING" in the main directory of this archive
   7 * for more details.
   8 *
   9 * Copyright (C) 2012-2017 Cavium Inc.
  10 * Authors:
  11 *   David Daney <david.daney@cavium.com>
  12 *   Peter Swain <pswain@cavium.com>
  13 *   Steven J. Hill <steven.hill@cavium.com>
  14 *   Jan Glauber <jglauber@cavium.com>
  15 */
  16#include <linux/bitfield.h>
  17#include <linux/delay.h>
  18#include <linux/dma-direction.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/gpio/consumer.h>
  21#include <linux/interrupt.h>
  22#include <linux/mmc/mmc.h>
  23#include <linux/mmc/slot-gpio.h>
  24#include <linux/module.h>
  25#include <linux/regulator/consumer.h>
  26#include <linux/scatterlist.h>
  27#include <linux/time.h>
  28
  29#include "cavium.h"
  30
  31const char *cvm_mmc_irq_names[] = {
  32	"MMC Buffer",
  33	"MMC Command",
  34	"MMC DMA",
  35	"MMC Command Error",
  36	"MMC DMA Error",
  37	"MMC Switch",
  38	"MMC Switch Error",
  39	"MMC DMA int Fifo",
  40	"MMC DMA int",
  41};
  42
  43/*
  44 * The Cavium MMC host hardware assumes that all commands have fixed
  45 * command and response types.  These are correct if MMC devices are
  46 * being used.  However, non-MMC devices like SD use command and
  47 * response types that are unexpected by the host hardware.
  48 *
  49 * The command and response types can be overridden by supplying an
  50 * XOR value that is applied to the type.  We calculate the XOR value
  51 * from the values in this table and the flags passed from the MMC
  52 * core.
  53 */
  54static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
  55	{0, 0},		/* CMD0 */
  56	{0, 3},		/* CMD1 */
  57	{0, 2},		/* CMD2 */
  58	{0, 1},		/* CMD3 */
  59	{0, 0},		/* CMD4 */
  60	{0, 1},		/* CMD5 */
  61	{0, 1},		/* CMD6 */
  62	{0, 1},		/* CMD7 */
  63	{1, 1},		/* CMD8 */
  64	{0, 2},		/* CMD9 */
  65	{0, 2},		/* CMD10 */
  66	{1, 1},		/* CMD11 */
  67	{0, 1},		/* CMD12 */
  68	{0, 1},		/* CMD13 */
  69	{1, 1},		/* CMD14 */
  70	{0, 0},		/* CMD15 */
  71	{0, 1},		/* CMD16 */
  72	{1, 1},		/* CMD17 */
  73	{1, 1},		/* CMD18 */
  74	{3, 1},		/* CMD19 */
  75	{2, 1},		/* CMD20 */
  76	{0, 0},		/* CMD21 */
  77	{0, 0},		/* CMD22 */
  78	{0, 1},		/* CMD23 */
  79	{2, 1},		/* CMD24 */
  80	{2, 1},		/* CMD25 */
  81	{2, 1},		/* CMD26 */
  82	{2, 1},		/* CMD27 */
  83	{0, 1},		/* CMD28 */
  84	{0, 1},		/* CMD29 */
  85	{1, 1},		/* CMD30 */
  86	{1, 1},		/* CMD31 */
  87	{0, 0},		/* CMD32 */
  88	{0, 0},		/* CMD33 */
  89	{0, 0},		/* CMD34 */
  90	{0, 1},		/* CMD35 */
  91	{0, 1},		/* CMD36 */
  92	{0, 0},		/* CMD37 */
  93	{0, 1},		/* CMD38 */
  94	{0, 4},		/* CMD39 */
  95	{0, 5},		/* CMD40 */
  96	{0, 0},		/* CMD41 */
  97	{2, 1},		/* CMD42 */
  98	{0, 0},		/* CMD43 */
  99	{0, 0},		/* CMD44 */
 100	{0, 0},		/* CMD45 */
 101	{0, 0},		/* CMD46 */
 102	{0, 0},		/* CMD47 */
 103	{0, 0},		/* CMD48 */
 104	{0, 0},		/* CMD49 */
 105	{0, 0},		/* CMD50 */
 106	{0, 0},		/* CMD51 */
 107	{0, 0},		/* CMD52 */
 108	{0, 0},		/* CMD53 */
 109	{0, 0},		/* CMD54 */
 110	{0, 1},		/* CMD55 */
 111	{0xff, 0xff},	/* CMD56 */
 112	{0, 0},		/* CMD57 */
 113	{0, 0},		/* CMD58 */
 114	{0, 0},		/* CMD59 */
 115	{0, 0},		/* CMD60 */
 116	{0, 0},		/* CMD61 */
 117	{0, 0},		/* CMD62 */
 118	{0, 0}		/* CMD63 */
 119};
 120
 121static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
 122{
 123	struct cvm_mmc_cr_type *cr;
 124	u8 hardware_ctype, hardware_rtype;
 125	u8 desired_ctype = 0, desired_rtype = 0;
 126	struct cvm_mmc_cr_mods r;
 127
 128	cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f);
 129	hardware_ctype = cr->ctype;
 130	hardware_rtype = cr->rtype;
 131	if (cmd->opcode == MMC_GEN_CMD)
 132		hardware_ctype = (cmd->arg & 1) ? 1 : 2;
 133
 134	switch (mmc_cmd_type(cmd)) {
 135	case MMC_CMD_ADTC:
 136		desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
 137		break;
 138	case MMC_CMD_AC:
 139	case MMC_CMD_BC:
 140	case MMC_CMD_BCR:
 141		desired_ctype = 0;
 142		break;
 143	}
 144
 145	switch (mmc_resp_type(cmd)) {
 146	case MMC_RSP_NONE:
 147		desired_rtype = 0;
 148		break;
 149	case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
 150	case MMC_RSP_R1B:
 151		desired_rtype = 1;
 152		break;
 153	case MMC_RSP_R2:
 154		desired_rtype = 2;
 155		break;
 156	case MMC_RSP_R3: /* MMC_RSP_R4 */
 157		desired_rtype = 3;
 158		break;
 159	}
 160	r.ctype_xor = desired_ctype ^ hardware_ctype;
 161	r.rtype_xor = desired_rtype ^ hardware_rtype;
 162	return r;
 163}
 164
 165static void check_switch_errors(struct cvm_mmc_host *host)
 166{
 167	u64 emm_switch;
 168
 169	emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
 170	if (emm_switch & MIO_EMM_SWITCH_ERR0)
 171		dev_err(host->dev, "Switch power class error\n");
 172	if (emm_switch & MIO_EMM_SWITCH_ERR1)
 173		dev_err(host->dev, "Switch hs timing error\n");
 174	if (emm_switch & MIO_EMM_SWITCH_ERR2)
 175		dev_err(host->dev, "Switch bus width error\n");
 176}
 177
 178static void clear_bus_id(u64 *reg)
 179{
 180	u64 bus_id_mask = GENMASK_ULL(61, 60);
 181
 182	*reg &= ~bus_id_mask;
 183}
 184
 185static void set_bus_id(u64 *reg, int bus_id)
 186{
 187	clear_bus_id(reg);
 188	*reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
 189}
 190
 191static int get_bus_id(u64 reg)
 192{
 193	return FIELD_GET(GENMASK_ULL(61, 60), reg);
 194}
 195
 196/*
 197 * We never set the switch_exe bit since that would interfere
 198 * with the commands send by the MMC core.
 199 */
 200static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
 201{
 202	int retries = 100;
 203	u64 rsp_sts;
 204	int bus_id;
 205
 206	/*
 207	 * Modes setting only taken from slot 0. Work around that hardware
 208	 * issue by first switching to slot 0.
 209	 */
 210	bus_id = get_bus_id(emm_switch);
 211	clear_bus_id(&emm_switch);
 212	writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
 213
 214	set_bus_id(&emm_switch, bus_id);
 215	writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
 216
 217	/* wait for the switch to finish */
 218	do {
 219		rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
 220		if (!(rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL))
 221			break;
 222		udelay(10);
 223	} while (--retries);
 224
 225	check_switch_errors(host);
 226}
 227
 228static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
 229{
 230	/* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
 231	u64 match = 0x3001070fffffffffull;
 232
 233	return (slot->cached_switch & match) != (new_val & match);
 234}
 235
 236static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
 237{
 238	u64 timeout;
 239
 240	if (!slot->clock)
 241		return;
 242
 243	if (ns)
 244		timeout = (slot->clock * ns) / NSEC_PER_SEC;
 245	else
 246		timeout = (slot->clock * 850ull) / 1000ull;
 247	writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
 248}
 249
 250static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
 251{
 252	struct cvm_mmc_host *host = slot->host;
 253	u64 emm_switch, wdog;
 254
 255	emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
 256	emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
 257			MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
 258	set_bus_id(&emm_switch, slot->bus_id);
 259
 260	wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
 261	do_switch(slot->host, emm_switch);
 262
 263	slot->cached_switch = emm_switch;
 264
 265	msleep(20);
 266
 267	writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
 268}
 269
 270/* Switch to another slot if needed */
 271static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
 272{
 273	struct cvm_mmc_host *host = slot->host;
 274	struct cvm_mmc_slot *old_slot;
 275	u64 emm_sample, emm_switch;
 276
 277	if (slot->bus_id == host->last_slot)
 278		return;
 279
 280	if (host->last_slot >= 0 && host->slot[host->last_slot]) {
 281		old_slot = host->slot[host->last_slot];
 282		old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
 283		old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
 284	}
 285
 286	writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
 287	emm_switch = slot->cached_switch;
 288	set_bus_id(&emm_switch, slot->bus_id);
 289	do_switch(host, emm_switch);
 290
 291	emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
 292		     FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
 293	writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
 294
 295	host->last_slot = slot->bus_id;
 296}
 297
 298static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
 299		    u64 dbuf)
 300{
 301	struct sg_mapping_iter *smi = &host->smi;
 302	int data_len = req->data->blocks * req->data->blksz;
 303	int bytes_xfered, shift = -1;
 304	u64 dat = 0;
 305
 306	/* Auto inc from offset zero */
 307	writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host));
 308
 309	for (bytes_xfered = 0; bytes_xfered < data_len;) {
 310		if (smi->consumed >= smi->length) {
 311			if (!sg_miter_next(smi))
 312				break;
 313			smi->consumed = 0;
 314		}
 315
 316		if (shift < 0) {
 317			dat = readq(host->base + MIO_EMM_BUF_DAT(host));
 318			shift = 56;
 319		}
 320
 321		while (smi->consumed < smi->length && shift >= 0) {
 322			((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff;
 323			bytes_xfered++;
 324			smi->consumed++;
 325			shift -= 8;
 326		}
 327	}
 328
 329	sg_miter_stop(smi);
 330	req->data->bytes_xfered = bytes_xfered;
 331	req->data->error = 0;
 332}
 333
 334static void do_write(struct mmc_request *req)
 335{
 336	req->data->bytes_xfered = req->data->blocks * req->data->blksz;
 337	req->data->error = 0;
 338}
 339
 340static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
 341			     u64 rsp_sts)
 342{
 343	u64 rsp_hi, rsp_lo;
 344
 345	if (!(rsp_sts & MIO_EMM_RSP_STS_RSP_VAL))
 346		return;
 347
 348	rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host));
 349
 350	switch (FIELD_GET(MIO_EMM_RSP_STS_RSP_TYPE, rsp_sts)) {
 351	case 1:
 352	case 3:
 353		req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
 354		req->cmd->resp[1] = 0;
 355		req->cmd->resp[2] = 0;
 356		req->cmd->resp[3] = 0;
 357		break;
 358	case 2:
 359		req->cmd->resp[3] = rsp_lo & 0xffffffff;
 360		req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
 361		rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host));
 362		req->cmd->resp[1] = rsp_hi & 0xffffffff;
 363		req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
 364		break;
 365	}
 366}
 367
 368static int get_dma_dir(struct mmc_data *data)
 369{
 370	return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 371}
 372
 373static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
 374{
 375	data->bytes_xfered = data->blocks * data->blksz;
 376	data->error = 0;
 
 377	return 1;
 378}
 379
 380static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
 381{
 382	u64 fifo_cfg;
 383	int count;
 384
 385	/* Check if there are any pending requests left */
 386	fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
 387	count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg);
 388	if (count)
 389		dev_err(host->dev, "%u requests still pending\n", count);
 390
 391	data->bytes_xfered = data->blocks * data->blksz;
 392	data->error = 0;
 393
 394	/* Clear and disable FIFO */
 395	writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
 396	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
 397	return 1;
 398}
 399
 400static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data)
 401{
 402	if (host->use_sg && data->sg_len > 1)
 403		return finish_dma_sg(host, data);
 404	else
 405		return finish_dma_single(host, data);
 406}
 407
 408static int check_status(u64 rsp_sts)
 409{
 410	if (rsp_sts & MIO_EMM_RSP_STS_RSP_BAD_STS ||
 411	    rsp_sts & MIO_EMM_RSP_STS_RSP_CRC_ERR ||
 412	    rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
 413		return -EILSEQ;
 414	if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
 415	    rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
 416		return -ETIMEDOUT;
 417	if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
 418		return -EIO;
 419	return 0;
 420}
 421
 422/* Try to clean up failed DMA. */
 423static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
 424{
 425	u64 emm_dma;
 426
 427	emm_dma = readq(host->base + MIO_EMM_DMA(host));
 428	emm_dma |= FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
 429		   FIELD_PREP(MIO_EMM_DMA_DAT_NULL, 1);
 430	set_bus_id(&emm_dma, get_bus_id(rsp_sts));
 431	writeq(emm_dma, host->base + MIO_EMM_DMA(host));
 432}
 433
 434irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
 435{
 436	struct cvm_mmc_host *host = dev_id;
 437	struct mmc_request *req;
 438	unsigned long flags = 0;
 439	u64 emm_int, rsp_sts;
 440	bool host_done;
 441
 442	if (host->need_irq_handler_lock)
 443		spin_lock_irqsave(&host->irq_handler_lock, flags);
 444	else
 445		__acquire(&host->irq_handler_lock);
 446
 447	/* Clear interrupt bits (write 1 clears ). */
 448	emm_int = readq(host->base + MIO_EMM_INT(host));
 449	writeq(emm_int, host->base + MIO_EMM_INT(host));
 450
 451	if (emm_int & MIO_EMM_INT_SWITCH_ERR)
 452		check_switch_errors(host);
 453
 454	req = host->current_req;
 455	if (!req)
 456		goto out;
 457
 458	rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
 459	/*
 460	 * dma_val set means DMA is still in progress. Don't touch
 461	 * the request and wait for the interrupt indicating that
 462	 * the DMA is finished.
 463	 */
 464	if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
 465		goto out;
 466
 467	if (!host->dma_active && req->data &&
 468	    (emm_int & MIO_EMM_INT_BUF_DONE)) {
 469		unsigned int type = (rsp_sts >> 7) & 3;
 470
 471		if (type == 1)
 472			do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
 473		else if (type == 2)
 474			do_write(req);
 475	}
 476
 477	host_done = emm_int & MIO_EMM_INT_CMD_DONE ||
 478		    emm_int & MIO_EMM_INT_DMA_DONE ||
 479		    emm_int & MIO_EMM_INT_CMD_ERR  ||
 480		    emm_int & MIO_EMM_INT_DMA_ERR;
 481
 482	if (!(host_done && req->done))
 483		goto no_req_done;
 484
 485	req->cmd->error = check_status(rsp_sts);
 486
 487	if (host->dma_active && req->data)
 488		if (!finish_dma(host, req->data))
 489			goto no_req_done;
 490
 491	set_cmd_response(host, req, rsp_sts);
 492	if ((emm_int & MIO_EMM_INT_DMA_ERR) &&
 493	    (rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
 494		cleanup_dma(host, rsp_sts);
 495
 496	host->current_req = NULL;
 497	req->done(req);
 498
 499no_req_done:
 500	if (host->dmar_fixup_done)
 501		host->dmar_fixup_done(host);
 502	if (host_done)
 503		host->release_bus(host);
 504out:
 505	if (host->need_irq_handler_lock)
 506		spin_unlock_irqrestore(&host->irq_handler_lock, flags);
 507	else
 508		__release(&host->irq_handler_lock);
 509	return IRQ_RETVAL(emm_int != 0);
 510}
 511
 512/*
 513 * Program DMA_CFG and if needed DMA_ADR.
 514 * Returns 0 on error, DMA address otherwise.
 515 */
 516static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
 517{
 518	u64 dma_cfg, addr;
 519	int count, rw;
 520
 521	count = dma_map_sg(host->dev, data->sg, data->sg_len,
 522			   get_dma_dir(data));
 523	if (!count)
 524		return 0;
 525
 526	rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
 527	dma_cfg = FIELD_PREP(MIO_EMM_DMA_CFG_EN, 1) |
 528		  FIELD_PREP(MIO_EMM_DMA_CFG_RW, rw);
 529#ifdef __LITTLE_ENDIAN
 530	dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ENDIAN, 1);
 531#endif
 532	dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_SIZE,
 533			      (sg_dma_len(&data->sg[0]) / 8) - 1);
 534
 535	addr = sg_dma_address(&data->sg[0]);
 536	if (!host->big_dma_addr)
 537		dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ADR, addr);
 538	writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
 539
 540	pr_debug("[%s] sg_dma_len: %u  total sg_elem: %d\n",
 541		 (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count);
 542
 543	if (host->big_dma_addr)
 544		writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host));
 545	return addr;
 546}
 547
 548/*
 549 * Queue complete sg list into the FIFO.
 550 * Returns 0 on error, 1 otherwise.
 551 */
 552static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
 553{
 554	struct scatterlist *sg;
 555	u64 fifo_cmd, addr;
 556	int count, i, rw;
 557
 558	count = dma_map_sg(host->dev, data->sg, data->sg_len,
 559			   get_dma_dir(data));
 560	if (!count)
 561		return 0;
 562	if (count > 16)
 563		goto error;
 564
 565	/* Enable FIFO by removing CLR bit */
 566	writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
 567
 568	for_each_sg(data->sg, sg, count, i) {
 569		/* Program DMA address */
 570		addr = sg_dma_address(sg);
 571		if (addr & 7)
 572			goto error;
 573		writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host));
 574
 575		/*
 576		 * If we have scatter-gather support we also have an extra
 577		 * register for the DMA addr, so no need to check
 578		 * host->big_dma_addr here.
 579		 */
 580		rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
 581		fifo_cmd = FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_RW, rw);
 582
 583		/* enable interrupts on the last element */
 584		fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_INTDIS,
 585				       (i + 1 == count) ? 0 : 1);
 586
 587#ifdef __LITTLE_ENDIAN
 588		fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_ENDIAN, 1);
 589#endif
 590		fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_SIZE,
 591				       sg_dma_len(sg) / 8 - 1);
 592		/*
 593		 * The write copies the address and the command to the FIFO
 594		 * and increments the FIFO's COUNT field.
 595		 */
 596		writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host));
 597		pr_debug("[%s] sg_dma_len: %u  sg_elem: %d/%d\n",
 598			 (rw) ? "W" : "R", sg_dma_len(sg), i, count);
 599	}
 600
 601	/*
 602	 * In difference to prepare_dma_single we don't return the
 603	 * address here, as it would not make sense for scatter-gather.
 604	 * The dma fixup is only required on models that don't support
 605	 * scatter-gather, so that is not a problem.
 606	 */
 607	return 1;
 608
 609error:
 610	WARN_ON_ONCE(1);
 611	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
 612	/* Disable FIFO */
 613	writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
 614	return 0;
 615}
 616
 617static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data)
 618{
 619	if (host->use_sg && data->sg_len > 1)
 620		return prepare_dma_sg(host, data);
 621	else
 622		return prepare_dma_single(host, data);
 623}
 624
 625static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq)
 626{
 627	struct cvm_mmc_slot *slot = mmc_priv(mmc);
 628	u64 emm_dma;
 629
 630	emm_dma = FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
 631		  FIELD_PREP(MIO_EMM_DMA_SECTOR,
 632			     mmc_card_is_blockaddr(mmc->card) ? 1 : 0) |
 633		  FIELD_PREP(MIO_EMM_DMA_RW,
 634			     (mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) |
 635		  FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) |
 636		  FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg);
 637	set_bus_id(&emm_dma, slot->bus_id);
 638
 639	if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) &&
 640	    (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
 641		emm_dma |= FIELD_PREP(MIO_EMM_DMA_MULTI, 1);
 642
 643	pr_debug("[%s] blocks: %u  multi: %d\n",
 644		(emm_dma & MIO_EMM_DMA_RW) ? "W" : "R",
 645		 mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0);
 646	return emm_dma;
 647}
 648
 649static void cvm_mmc_dma_request(struct mmc_host *mmc,
 650				struct mmc_request *mrq)
 651{
 652	struct cvm_mmc_slot *slot = mmc_priv(mmc);
 653	struct cvm_mmc_host *host = slot->host;
 654	struct mmc_data *data;
 655	u64 emm_dma, addr;
 656
 657	if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
 658	    !mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
 659		dev_err(&mmc->card->dev,
 660			"Error: cmv_mmc_dma_request no data\n");
 661		goto error;
 662	}
 663
 664	cvm_mmc_switch_to(slot);
 665
 666	data = mrq->data;
 667	pr_debug("DMA request  blocks: %d  block_size: %d  total_size: %d\n",
 668		 data->blocks, data->blksz, data->blocks * data->blksz);
 669	if (data->timeout_ns)
 670		set_wdog(slot, data->timeout_ns);
 671
 672	WARN_ON(host->current_req);
 673	host->current_req = mrq;
 674
 675	emm_dma = prepare_ext_dma(mmc, mrq);
 676	addr = prepare_dma(host, data);
 677	if (!addr) {
 678		dev_err(host->dev, "prepare_dma failed\n");
 679		goto error;
 680	}
 681
 682	host->dma_active = true;
 683	host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
 684			 MIO_EMM_INT_DMA_ERR);
 685
 686	if (host->dmar_fixup)
 687		host->dmar_fixup(host, mrq->cmd, data, addr);
 688
 689	/*
 690	 * If we have a valid SD card in the slot, we set the response
 691	 * bit mask to check for CRC errors and timeouts only.
 692	 * Otherwise, use the default power reset value.
 693	 */
 694	if (mmc_card_sd(mmc->card))
 695		writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host));
 696	else
 697		writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
 698	writeq(emm_dma, host->base + MIO_EMM_DMA(host));
 699	return;
 700
 701error:
 702	mrq->cmd->error = -EINVAL;
 703	if (mrq->done)
 704		mrq->done(mrq);
 705	host->release_bus(host);
 706}
 707
 708static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
 709{
 710	sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
 711		       SG_MITER_ATOMIC | SG_MITER_TO_SG);
 712}
 713
 714static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
 715{
 716	unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
 717	struct sg_mapping_iter *smi = &host->smi;
 718	unsigned int bytes_xfered;
 719	int shift = 56;
 720	u64 dat = 0;
 721
 722	/* Copy data to the xmit buffer before issuing the command. */
 723	sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG);
 724
 725	/* Auto inc from offset zero, dbuf zero */
 726	writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host));
 727
 728	for (bytes_xfered = 0; bytes_xfered < data_len;) {
 729		if (smi->consumed >= smi->length) {
 730			if (!sg_miter_next(smi))
 731				break;
 732			smi->consumed = 0;
 733		}
 734
 735		while (smi->consumed < smi->length && shift >= 0) {
 736			dat |= (u64)((u8 *)smi->addr)[smi->consumed] << shift;
 737			bytes_xfered++;
 738			smi->consumed++;
 739			shift -= 8;
 740		}
 741
 742		if (shift < 0) {
 743			writeq(dat, host->base + MIO_EMM_BUF_DAT(host));
 744			shift = 56;
 745			dat = 0;
 746		}
 747	}
 748	sg_miter_stop(smi);
 749}
 750
 751static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 752{
 753	struct cvm_mmc_slot *slot = mmc_priv(mmc);
 754	struct cvm_mmc_host *host = slot->host;
 755	struct mmc_command *cmd = mrq->cmd;
 756	struct cvm_mmc_cr_mods mods;
 757	u64 emm_cmd, rsp_sts;
 758	int retries = 100;
 759
 760	/*
 761	 * Note about locking:
 762	 * All MMC devices share the same bus and controller. Allow only a
 763	 * single user of the bootbus/MMC bus at a time. The lock is acquired
 764	 * on all entry points from the MMC layer.
 765	 *
 766	 * For requests the lock is only released after the completion
 767	 * interrupt!
 768	 */
 769	host->acquire_bus(host);
 770
 771	if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
 772	    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
 773		return cvm_mmc_dma_request(mmc, mrq);
 774
 775	cvm_mmc_switch_to(slot);
 776
 777	mods = cvm_mmc_get_cr_mods(cmd);
 778
 779	WARN_ON(host->current_req);
 780	host->current_req = mrq;
 781
 782	if (cmd->data) {
 783		if (cmd->data->flags & MMC_DATA_READ)
 784			do_read_request(host, mrq);
 785		else
 786			do_write_request(host, mrq);
 787
 788		if (cmd->data->timeout_ns)
 789			set_wdog(slot, cmd->data->timeout_ns);
 790	} else
 791		set_wdog(slot, 0);
 792
 793	host->dma_active = false;
 794	host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
 795
 796	emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
 797		  FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
 798		  FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
 799		  FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) |
 800		  FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg);
 801	set_bus_id(&emm_cmd, slot->bus_id);
 802	if (cmd->data && mmc_cmd_type(cmd) == MMC_CMD_ADTC)
 803		emm_cmd |= FIELD_PREP(MIO_EMM_CMD_OFFSET,
 804				64 - ((cmd->data->blocks * cmd->data->blksz) / 8));
 805
 806	writeq(0, host->base + MIO_EMM_STS_MASK(host));
 807
 808retry:
 809	rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
 810	if (rsp_sts & MIO_EMM_RSP_STS_DMA_VAL ||
 811	    rsp_sts & MIO_EMM_RSP_STS_CMD_VAL ||
 812	    rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL ||
 813	    rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) {
 814		udelay(10);
 815		if (--retries)
 816			goto retry;
 817	}
 818	if (!retries)
 819		dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
 820	writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
 821}
 822
 823static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 824{
 825	struct cvm_mmc_slot *slot = mmc_priv(mmc);
 826	struct cvm_mmc_host *host = slot->host;
 827	int clk_period = 0, power_class = 10, bus_width = 0;
 828	u64 clock, emm_switch;
 829
 830	host->acquire_bus(host);
 831	cvm_mmc_switch_to(slot);
 832
 833	/* Set the power state */
 834	switch (ios->power_mode) {
 835	case MMC_POWER_ON:
 836		break;
 837
 838	case MMC_POWER_OFF:
 839		cvm_mmc_reset_bus(slot);
 840		if (host->global_pwr_gpiod)
 841			host->set_shared_power(host, 0);
 842		else if (!IS_ERR(mmc->supply.vmmc))
 843			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
 844		break;
 845
 846	case MMC_POWER_UP:
 847		if (host->global_pwr_gpiod)
 848			host->set_shared_power(host, 1);
 849		else if (!IS_ERR(mmc->supply.vmmc))
 850			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
 851		break;
 852	}
 853
 854	/* Convert bus width to HW definition */
 855	switch (ios->bus_width) {
 856	case MMC_BUS_WIDTH_8:
 857		bus_width = 2;
 858		break;
 859	case MMC_BUS_WIDTH_4:
 860		bus_width = 1;
 861		break;
 862	case MMC_BUS_WIDTH_1:
 863		bus_width = 0;
 864		break;
 865	}
 866
 867	/* DDR is available for 4/8 bit bus width */
 868	if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52)
 869		bus_width |= 4;
 870
 871	/* Change the clock frequency. */
 872	clock = ios->clock;
 873	if (clock > 52000000)
 874		clock = 52000000;
 875	slot->clock = clock;
 876
 877	if (clock)
 878		clk_period = (host->sys_freq + clock - 1) / (2 * clock);
 879
 880	emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
 881				(ios->timing == MMC_TIMING_MMC_HS)) |
 882		     FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
 883		     FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
 884		     FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
 885		     FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
 886	set_bus_id(&emm_switch, slot->bus_id);
 887
 888	if (!switch_val_changed(slot, emm_switch))
 889		goto out;
 890
 891	set_wdog(slot, 0);
 892	do_switch(host, emm_switch);
 893	slot->cached_switch = emm_switch;
 894out:
 895	host->release_bus(host);
 896}
 897
 898static const struct mmc_host_ops cvm_mmc_ops = {
 899	.request        = cvm_mmc_request,
 900	.set_ios        = cvm_mmc_set_ios,
 901	.get_ro		= mmc_gpio_get_ro,
 902	.get_cd		= mmc_gpio_get_cd,
 903};
 904
 905static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
 906{
 907	struct mmc_host *mmc = slot->mmc;
 908
 909	clock = min(clock, mmc->f_max);
 910	clock = max(clock, mmc->f_min);
 911	slot->clock = clock;
 912}
 913
 914static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
 915{
 916	struct cvm_mmc_host *host = slot->host;
 917	u64 emm_switch;
 918
 919	/* Enable this bus slot. */
 920	host->emm_cfg |= (1ull << slot->bus_id);
 921	writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
 922	udelay(10);
 923
 924	/* Program initial clock speed and power. */
 925	cvm_mmc_set_clock(slot, slot->mmc->f_min);
 926	emm_switch = FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, 10);
 927	emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
 928				 (host->sys_freq / slot->clock) / 2);
 929	emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
 930				 (host->sys_freq / slot->clock) / 2);
 931
 932	/* Make the changes take effect on this bus slot. */
 933	set_bus_id(&emm_switch, slot->bus_id);
 934	do_switch(host, emm_switch);
 935
 936	slot->cached_switch = emm_switch;
 937
 938	/*
 939	 * Set watchdog timeout value and default reset value
 940	 * for the mask register. Finally, set the CARD_RCA
 941	 * bit so that we can get the card address relative
 942	 * to the CMD register for CMD7 transactions.
 943	 */
 944	set_wdog(slot, 0);
 945	writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
 946	writeq(1, host->base + MIO_EMM_RCA(host));
 947	return 0;
 948}
 949
 950static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
 951{
 952	u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
 953	struct device_node *node = dev->of_node;
 954	struct mmc_host *mmc = slot->mmc;
 955	u64 clock_period;
 956	int ret;
 957
 958	ret = of_property_read_u32(node, "reg", &id);
 959	if (ret) {
 960		dev_err(dev, "Missing or invalid reg property on %pOF\n", node);
 961		return ret;
 962	}
 963
 964	if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
 965		dev_err(dev, "Invalid reg property on %pOF\n", node);
 966		return -EINVAL;
 967	}
 968
 969	ret = mmc_regulator_get_supply(mmc);
 970	if (ret)
 971		return ret;
 972	/*
 973	 * Legacy Octeon firmware has no regulator entry, fall-back to
 974	 * a hard-coded voltage to get a sane OCR.
 975	 */
 976	if (IS_ERR(mmc->supply.vmmc))
 977		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 978
 979	/* Common MMC bindings */
 980	ret = mmc_of_parse(mmc);
 981	if (ret)
 982		return ret;
 983
 984	/* Set bus width */
 985	if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
 986		of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
 987		if (bus_width == 8)
 988			mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
 989		else if (bus_width == 4)
 990			mmc->caps |= MMC_CAP_4_BIT_DATA;
 991	}
 992
 993	/* Set maximum and minimum frequency */
 994	if (!mmc->f_max)
 995		of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
 996	if (!mmc->f_max || mmc->f_max > 52000000)
 997		mmc->f_max = 52000000;
 998	mmc->f_min = 400000;
 999
1000	/* Sampling register settings, period in picoseconds */
1001	clock_period = 1000000000000ull / slot->host->sys_freq;
1002	of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
1003	of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
1004	slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
1005	slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
1006
1007	return id;
1008}
1009
1010int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
1011{
1012	struct cvm_mmc_slot *slot;
1013	struct mmc_host *mmc;
1014	int ret, id;
1015
1016	mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
1017	if (!mmc)
1018		return -ENOMEM;
1019
1020	slot = mmc_priv(mmc);
1021	slot->mmc = mmc;
1022	slot->host = host;
1023
1024	ret = cvm_mmc_of_parse(dev, slot);
1025	if (ret < 0)
1026		goto error;
1027	id = ret;
1028
1029	/* Set up host parameters */
1030	mmc->ops = &cvm_mmc_ops;
1031
1032	/*
1033	 * We only have a 3.3v supply, we cannot support any
1034	 * of the UHS modes. We do support the high speed DDR
1035	 * modes up to 52MHz.
1036	 *
1037	 * Disable bounce buffers for max_segs = 1
1038	 */
1039	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1040		     MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
1041		     MMC_CAP_3_3V_DDR;
1042
1043	if (host->use_sg)
1044		mmc->max_segs = 16;
1045	else
1046		mmc->max_segs = 1;
1047
1048	/* DMA size field can address up to 8 MB */
1049	mmc->max_seg_size = 8 * 1024 * 1024;
 
1050	mmc->max_req_size = mmc->max_seg_size;
1051	/* External DMA is in 512 byte blocks */
1052	mmc->max_blk_size = 512;
1053	/* DMA block count field is 15 bits */
1054	mmc->max_blk_count = 32767;
1055
1056	slot->clock = mmc->f_min;
1057	slot->bus_id = id;
1058	slot->cached_rca = 1;
1059
1060	host->acquire_bus(host);
1061	host->slot[id] = slot;
1062	cvm_mmc_switch_to(slot);
1063	cvm_mmc_init_lowlevel(slot);
1064	host->release_bus(host);
1065
1066	ret = mmc_add_host(mmc);
1067	if (ret) {
1068		dev_err(dev, "mmc_add_host() returned %d\n", ret);
1069		slot->host->slot[id] = NULL;
1070		goto error;
1071	}
1072	return 0;
1073
1074error:
1075	mmc_free_host(slot->mmc);
1076	return ret;
1077}
1078
1079int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot)
1080{
1081	mmc_remove_host(slot->mmc);
1082	slot->host->slot[slot->bus_id] = NULL;
1083	mmc_free_host(slot->mmc);
1084	return 0;
1085}
v5.4
   1/*
   2 * Shared part of driver for MMC/SDHC controller on Cavium OCTEON and
   3 * ThunderX SOCs.
   4 *
   5 * This file is subject to the terms and conditions of the GNU General Public
   6 * License.  See the file "COPYING" in the main directory of this archive
   7 * for more details.
   8 *
   9 * Copyright (C) 2012-2017 Cavium Inc.
  10 * Authors:
  11 *   David Daney <david.daney@cavium.com>
  12 *   Peter Swain <pswain@cavium.com>
  13 *   Steven J. Hill <steven.hill@cavium.com>
  14 *   Jan Glauber <jglauber@cavium.com>
  15 */
  16#include <linux/bitfield.h>
  17#include <linux/delay.h>
  18#include <linux/dma-direction.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/gpio/consumer.h>
  21#include <linux/interrupt.h>
  22#include <linux/mmc/mmc.h>
  23#include <linux/mmc/slot-gpio.h>
  24#include <linux/module.h>
  25#include <linux/regulator/consumer.h>
  26#include <linux/scatterlist.h>
  27#include <linux/time.h>
  28
  29#include "cavium.h"
  30
  31const char *cvm_mmc_irq_names[] = {
  32	"MMC Buffer",
  33	"MMC Command",
  34	"MMC DMA",
  35	"MMC Command Error",
  36	"MMC DMA Error",
  37	"MMC Switch",
  38	"MMC Switch Error",
  39	"MMC DMA int Fifo",
  40	"MMC DMA int",
  41};
  42
  43/*
  44 * The Cavium MMC host hardware assumes that all commands have fixed
  45 * command and response types.  These are correct if MMC devices are
  46 * being used.  However, non-MMC devices like SD use command and
  47 * response types that are unexpected by the host hardware.
  48 *
  49 * The command and response types can be overridden by supplying an
  50 * XOR value that is applied to the type.  We calculate the XOR value
  51 * from the values in this table and the flags passed from the MMC
  52 * core.
  53 */
  54static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
  55	{0, 0},		/* CMD0 */
  56	{0, 3},		/* CMD1 */
  57	{0, 2},		/* CMD2 */
  58	{0, 1},		/* CMD3 */
  59	{0, 0},		/* CMD4 */
  60	{0, 1},		/* CMD5 */
  61	{0, 1},		/* CMD6 */
  62	{0, 1},		/* CMD7 */
  63	{1, 1},		/* CMD8 */
  64	{0, 2},		/* CMD9 */
  65	{0, 2},		/* CMD10 */
  66	{1, 1},		/* CMD11 */
  67	{0, 1},		/* CMD12 */
  68	{0, 1},		/* CMD13 */
  69	{1, 1},		/* CMD14 */
  70	{0, 0},		/* CMD15 */
  71	{0, 1},		/* CMD16 */
  72	{1, 1},		/* CMD17 */
  73	{1, 1},		/* CMD18 */
  74	{3, 1},		/* CMD19 */
  75	{2, 1},		/* CMD20 */
  76	{0, 0},		/* CMD21 */
  77	{0, 0},		/* CMD22 */
  78	{0, 1},		/* CMD23 */
  79	{2, 1},		/* CMD24 */
  80	{2, 1},		/* CMD25 */
  81	{2, 1},		/* CMD26 */
  82	{2, 1},		/* CMD27 */
  83	{0, 1},		/* CMD28 */
  84	{0, 1},		/* CMD29 */
  85	{1, 1},		/* CMD30 */
  86	{1, 1},		/* CMD31 */
  87	{0, 0},		/* CMD32 */
  88	{0, 0},		/* CMD33 */
  89	{0, 0},		/* CMD34 */
  90	{0, 1},		/* CMD35 */
  91	{0, 1},		/* CMD36 */
  92	{0, 0},		/* CMD37 */
  93	{0, 1},		/* CMD38 */
  94	{0, 4},		/* CMD39 */
  95	{0, 5},		/* CMD40 */
  96	{0, 0},		/* CMD41 */
  97	{2, 1},		/* CMD42 */
  98	{0, 0},		/* CMD43 */
  99	{0, 0},		/* CMD44 */
 100	{0, 0},		/* CMD45 */
 101	{0, 0},		/* CMD46 */
 102	{0, 0},		/* CMD47 */
 103	{0, 0},		/* CMD48 */
 104	{0, 0},		/* CMD49 */
 105	{0, 0},		/* CMD50 */
 106	{0, 0},		/* CMD51 */
 107	{0, 0},		/* CMD52 */
 108	{0, 0},		/* CMD53 */
 109	{0, 0},		/* CMD54 */
 110	{0, 1},		/* CMD55 */
 111	{0xff, 0xff},	/* CMD56 */
 112	{0, 0},		/* CMD57 */
 113	{0, 0},		/* CMD58 */
 114	{0, 0},		/* CMD59 */
 115	{0, 0},		/* CMD60 */
 116	{0, 0},		/* CMD61 */
 117	{0, 0},		/* CMD62 */
 118	{0, 0}		/* CMD63 */
 119};
 120
 121static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
 122{
 123	struct cvm_mmc_cr_type *cr;
 124	u8 hardware_ctype, hardware_rtype;
 125	u8 desired_ctype = 0, desired_rtype = 0;
 126	struct cvm_mmc_cr_mods r;
 127
 128	cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f);
 129	hardware_ctype = cr->ctype;
 130	hardware_rtype = cr->rtype;
 131	if (cmd->opcode == MMC_GEN_CMD)
 132		hardware_ctype = (cmd->arg & 1) ? 1 : 2;
 133
 134	switch (mmc_cmd_type(cmd)) {
 135	case MMC_CMD_ADTC:
 136		desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
 137		break;
 138	case MMC_CMD_AC:
 139	case MMC_CMD_BC:
 140	case MMC_CMD_BCR:
 141		desired_ctype = 0;
 142		break;
 143	}
 144
 145	switch (mmc_resp_type(cmd)) {
 146	case MMC_RSP_NONE:
 147		desired_rtype = 0;
 148		break;
 149	case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
 150	case MMC_RSP_R1B:
 151		desired_rtype = 1;
 152		break;
 153	case MMC_RSP_R2:
 154		desired_rtype = 2;
 155		break;
 156	case MMC_RSP_R3: /* MMC_RSP_R4 */
 157		desired_rtype = 3;
 158		break;
 159	}
 160	r.ctype_xor = desired_ctype ^ hardware_ctype;
 161	r.rtype_xor = desired_rtype ^ hardware_rtype;
 162	return r;
 163}
 164
 165static void check_switch_errors(struct cvm_mmc_host *host)
 166{
 167	u64 emm_switch;
 168
 169	emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
 170	if (emm_switch & MIO_EMM_SWITCH_ERR0)
 171		dev_err(host->dev, "Switch power class error\n");
 172	if (emm_switch & MIO_EMM_SWITCH_ERR1)
 173		dev_err(host->dev, "Switch hs timing error\n");
 174	if (emm_switch & MIO_EMM_SWITCH_ERR2)
 175		dev_err(host->dev, "Switch bus width error\n");
 176}
 177
 178static void clear_bus_id(u64 *reg)
 179{
 180	u64 bus_id_mask = GENMASK_ULL(61, 60);
 181
 182	*reg &= ~bus_id_mask;
 183}
 184
 185static void set_bus_id(u64 *reg, int bus_id)
 186{
 187	clear_bus_id(reg);
 188	*reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
 189}
 190
 191static int get_bus_id(u64 reg)
 192{
 193	return FIELD_GET(GENMASK_ULL(61, 60), reg);
 194}
 195
 196/*
 197 * We never set the switch_exe bit since that would interfere
 198 * with the commands send by the MMC core.
 199 */
 200static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
 201{
 202	int retries = 100;
 203	u64 rsp_sts;
 204	int bus_id;
 205
 206	/*
 207	 * Modes setting only taken from slot 0. Work around that hardware
 208	 * issue by first switching to slot 0.
 209	 */
 210	bus_id = get_bus_id(emm_switch);
 211	clear_bus_id(&emm_switch);
 212	writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
 213
 214	set_bus_id(&emm_switch, bus_id);
 215	writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
 216
 217	/* wait for the switch to finish */
 218	do {
 219		rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
 220		if (!(rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL))
 221			break;
 222		udelay(10);
 223	} while (--retries);
 224
 225	check_switch_errors(host);
 226}
 227
 228static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
 229{
 230	/* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
 231	u64 match = 0x3001070fffffffffull;
 232
 233	return (slot->cached_switch & match) != (new_val & match);
 234}
 235
 236static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
 237{
 238	u64 timeout;
 239
 240	if (!slot->clock)
 241		return;
 242
 243	if (ns)
 244		timeout = (slot->clock * ns) / NSEC_PER_SEC;
 245	else
 246		timeout = (slot->clock * 850ull) / 1000ull;
 247	writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
 248}
 249
 250static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
 251{
 252	struct cvm_mmc_host *host = slot->host;
 253	u64 emm_switch, wdog;
 254
 255	emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
 256	emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
 257			MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
 258	set_bus_id(&emm_switch, slot->bus_id);
 259
 260	wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
 261	do_switch(slot->host, emm_switch);
 262
 263	slot->cached_switch = emm_switch;
 264
 265	msleep(20);
 266
 267	writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
 268}
 269
 270/* Switch to another slot if needed */
 271static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
 272{
 273	struct cvm_mmc_host *host = slot->host;
 274	struct cvm_mmc_slot *old_slot;
 275	u64 emm_sample, emm_switch;
 276
 277	if (slot->bus_id == host->last_slot)
 278		return;
 279
 280	if (host->last_slot >= 0 && host->slot[host->last_slot]) {
 281		old_slot = host->slot[host->last_slot];
 282		old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
 283		old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
 284	}
 285
 286	writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
 287	emm_switch = slot->cached_switch;
 288	set_bus_id(&emm_switch, slot->bus_id);
 289	do_switch(host, emm_switch);
 290
 291	emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
 292		     FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
 293	writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
 294
 295	host->last_slot = slot->bus_id;
 296}
 297
 298static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
 299		    u64 dbuf)
 300{
 301	struct sg_mapping_iter *smi = &host->smi;
 302	int data_len = req->data->blocks * req->data->blksz;
 303	int bytes_xfered, shift = -1;
 304	u64 dat = 0;
 305
 306	/* Auto inc from offset zero */
 307	writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host));
 308
 309	for (bytes_xfered = 0; bytes_xfered < data_len;) {
 310		if (smi->consumed >= smi->length) {
 311			if (!sg_miter_next(smi))
 312				break;
 313			smi->consumed = 0;
 314		}
 315
 316		if (shift < 0) {
 317			dat = readq(host->base + MIO_EMM_BUF_DAT(host));
 318			shift = 56;
 319		}
 320
 321		while (smi->consumed < smi->length && shift >= 0) {
 322			((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff;
 323			bytes_xfered++;
 324			smi->consumed++;
 325			shift -= 8;
 326		}
 327	}
 328
 329	sg_miter_stop(smi);
 330	req->data->bytes_xfered = bytes_xfered;
 331	req->data->error = 0;
 332}
 333
 334static void do_write(struct mmc_request *req)
 335{
 336	req->data->bytes_xfered = req->data->blocks * req->data->blksz;
 337	req->data->error = 0;
 338}
 339
 340static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
 341			     u64 rsp_sts)
 342{
 343	u64 rsp_hi, rsp_lo;
 344
 345	if (!(rsp_sts & MIO_EMM_RSP_STS_RSP_VAL))
 346		return;
 347
 348	rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host));
 349
 350	switch (FIELD_GET(MIO_EMM_RSP_STS_RSP_TYPE, rsp_sts)) {
 351	case 1:
 352	case 3:
 353		req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
 354		req->cmd->resp[1] = 0;
 355		req->cmd->resp[2] = 0;
 356		req->cmd->resp[3] = 0;
 357		break;
 358	case 2:
 359		req->cmd->resp[3] = rsp_lo & 0xffffffff;
 360		req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
 361		rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host));
 362		req->cmd->resp[1] = rsp_hi & 0xffffffff;
 363		req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
 364		break;
 365	}
 366}
 367
 368static int get_dma_dir(struct mmc_data *data)
 369{
 370	return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 371}
 372
 373static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
 374{
 375	data->bytes_xfered = data->blocks * data->blksz;
 376	data->error = 0;
 377	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
 378	return 1;
 379}
 380
 381static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
 382{
 383	u64 fifo_cfg;
 384	int count;
 385
 386	/* Check if there are any pending requests left */
 387	fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
 388	count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg);
 389	if (count)
 390		dev_err(host->dev, "%u requests still pending\n", count);
 391
 392	data->bytes_xfered = data->blocks * data->blksz;
 393	data->error = 0;
 394
 395	/* Clear and disable FIFO */
 396	writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
 397	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
 398	return 1;
 399}
 400
 401static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data)
 402{
 403	if (host->use_sg && data->sg_len > 1)
 404		return finish_dma_sg(host, data);
 405	else
 406		return finish_dma_single(host, data);
 407}
 408
 409static int check_status(u64 rsp_sts)
 410{
 411	if (rsp_sts & MIO_EMM_RSP_STS_RSP_BAD_STS ||
 412	    rsp_sts & MIO_EMM_RSP_STS_RSP_CRC_ERR ||
 413	    rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
 414		return -EILSEQ;
 415	if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
 416	    rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
 417		return -ETIMEDOUT;
 418	if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
 419		return -EIO;
 420	return 0;
 421}
 422
 423/* Try to clean up failed DMA. */
 424static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
 425{
 426	u64 emm_dma;
 427
 428	emm_dma = readq(host->base + MIO_EMM_DMA(host));
 429	emm_dma |= FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
 430		   FIELD_PREP(MIO_EMM_DMA_DAT_NULL, 1);
 431	set_bus_id(&emm_dma, get_bus_id(rsp_sts));
 432	writeq(emm_dma, host->base + MIO_EMM_DMA(host));
 433}
 434
 435irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
 436{
 437	struct cvm_mmc_host *host = dev_id;
 438	struct mmc_request *req;
 439	unsigned long flags = 0;
 440	u64 emm_int, rsp_sts;
 441	bool host_done;
 442
 443	if (host->need_irq_handler_lock)
 444		spin_lock_irqsave(&host->irq_handler_lock, flags);
 445	else
 446		__acquire(&host->irq_handler_lock);
 447
 448	/* Clear interrupt bits (write 1 clears ). */
 449	emm_int = readq(host->base + MIO_EMM_INT(host));
 450	writeq(emm_int, host->base + MIO_EMM_INT(host));
 451
 452	if (emm_int & MIO_EMM_INT_SWITCH_ERR)
 453		check_switch_errors(host);
 454
 455	req = host->current_req;
 456	if (!req)
 457		goto out;
 458
 459	rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
 460	/*
 461	 * dma_val set means DMA is still in progress. Don't touch
 462	 * the request and wait for the interrupt indicating that
 463	 * the DMA is finished.
 464	 */
 465	if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
 466		goto out;
 467
 468	if (!host->dma_active && req->data &&
 469	    (emm_int & MIO_EMM_INT_BUF_DONE)) {
 470		unsigned int type = (rsp_sts >> 7) & 3;
 471
 472		if (type == 1)
 473			do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
 474		else if (type == 2)
 475			do_write(req);
 476	}
 477
 478	host_done = emm_int & MIO_EMM_INT_CMD_DONE ||
 479		    emm_int & MIO_EMM_INT_DMA_DONE ||
 480		    emm_int & MIO_EMM_INT_CMD_ERR  ||
 481		    emm_int & MIO_EMM_INT_DMA_ERR;
 482
 483	if (!(host_done && req->done))
 484		goto no_req_done;
 485
 486	req->cmd->error = check_status(rsp_sts);
 487
 488	if (host->dma_active && req->data)
 489		if (!finish_dma(host, req->data))
 490			goto no_req_done;
 491
 492	set_cmd_response(host, req, rsp_sts);
 493	if ((emm_int & MIO_EMM_INT_DMA_ERR) &&
 494	    (rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
 495		cleanup_dma(host, rsp_sts);
 496
 497	host->current_req = NULL;
 498	req->done(req);
 499
 500no_req_done:
 501	if (host->dmar_fixup_done)
 502		host->dmar_fixup_done(host);
 503	if (host_done)
 504		host->release_bus(host);
 505out:
 506	if (host->need_irq_handler_lock)
 507		spin_unlock_irqrestore(&host->irq_handler_lock, flags);
 508	else
 509		__release(&host->irq_handler_lock);
 510	return IRQ_RETVAL(emm_int != 0);
 511}
 512
 513/*
 514 * Program DMA_CFG and if needed DMA_ADR.
 515 * Returns 0 on error, DMA address otherwise.
 516 */
 517static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
 518{
 519	u64 dma_cfg, addr;
 520	int count, rw;
 521
 522	count = dma_map_sg(host->dev, data->sg, data->sg_len,
 523			   get_dma_dir(data));
 524	if (!count)
 525		return 0;
 526
 527	rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
 528	dma_cfg = FIELD_PREP(MIO_EMM_DMA_CFG_EN, 1) |
 529		  FIELD_PREP(MIO_EMM_DMA_CFG_RW, rw);
 530#ifdef __LITTLE_ENDIAN
 531	dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ENDIAN, 1);
 532#endif
 533	dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_SIZE,
 534			      (sg_dma_len(&data->sg[0]) / 8) - 1);
 535
 536	addr = sg_dma_address(&data->sg[0]);
 537	if (!host->big_dma_addr)
 538		dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ADR, addr);
 539	writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
 540
 541	pr_debug("[%s] sg_dma_len: %u  total sg_elem: %d\n",
 542		 (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count);
 543
 544	if (host->big_dma_addr)
 545		writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host));
 546	return addr;
 547}
 548
 549/*
 550 * Queue complete sg list into the FIFO.
 551 * Returns 0 on error, 1 otherwise.
 552 */
 553static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
 554{
 555	struct scatterlist *sg;
 556	u64 fifo_cmd, addr;
 557	int count, i, rw;
 558
 559	count = dma_map_sg(host->dev, data->sg, data->sg_len,
 560			   get_dma_dir(data));
 561	if (!count)
 562		return 0;
 563	if (count > 16)
 564		goto error;
 565
 566	/* Enable FIFO by removing CLR bit */
 567	writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
 568
 569	for_each_sg(data->sg, sg, count, i) {
 570		/* Program DMA address */
 571		addr = sg_dma_address(sg);
 572		if (addr & 7)
 573			goto error;
 574		writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host));
 575
 576		/*
 577		 * If we have scatter-gather support we also have an extra
 578		 * register for the DMA addr, so no need to check
 579		 * host->big_dma_addr here.
 580		 */
 581		rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
 582		fifo_cmd = FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_RW, rw);
 583
 584		/* enable interrupts on the last element */
 585		fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_INTDIS,
 586				       (i + 1 == count) ? 0 : 1);
 587
 588#ifdef __LITTLE_ENDIAN
 589		fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_ENDIAN, 1);
 590#endif
 591		fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_SIZE,
 592				       sg_dma_len(sg) / 8 - 1);
 593		/*
 594		 * The write copies the address and the command to the FIFO
 595		 * and increments the FIFO's COUNT field.
 596		 */
 597		writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host));
 598		pr_debug("[%s] sg_dma_len: %u  sg_elem: %d/%d\n",
 599			 (rw) ? "W" : "R", sg_dma_len(sg), i, count);
 600	}
 601
 602	/*
 603	 * In difference to prepare_dma_single we don't return the
 604	 * address here, as it would not make sense for scatter-gather.
 605	 * The dma fixup is only required on models that don't support
 606	 * scatter-gather, so that is not a problem.
 607	 */
 608	return 1;
 609
 610error:
 611	WARN_ON_ONCE(1);
 612	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
 613	/* Disable FIFO */
 614	writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
 615	return 0;
 616}
 617
 618static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data)
 619{
 620	if (host->use_sg && data->sg_len > 1)
 621		return prepare_dma_sg(host, data);
 622	else
 623		return prepare_dma_single(host, data);
 624}
 625
 626static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq)
 627{
 628	struct cvm_mmc_slot *slot = mmc_priv(mmc);
 629	u64 emm_dma;
 630
 631	emm_dma = FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
 632		  FIELD_PREP(MIO_EMM_DMA_SECTOR,
 633			     mmc_card_is_blockaddr(mmc->card) ? 1 : 0) |
 634		  FIELD_PREP(MIO_EMM_DMA_RW,
 635			     (mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) |
 636		  FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) |
 637		  FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg);
 638	set_bus_id(&emm_dma, slot->bus_id);
 639
 640	if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) &&
 641	    (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
 642		emm_dma |= FIELD_PREP(MIO_EMM_DMA_MULTI, 1);
 643
 644	pr_debug("[%s] blocks: %u  multi: %d\n",
 645		(emm_dma & MIO_EMM_DMA_RW) ? "W" : "R",
 646		 mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0);
 647	return emm_dma;
 648}
 649
 650static void cvm_mmc_dma_request(struct mmc_host *mmc,
 651				struct mmc_request *mrq)
 652{
 653	struct cvm_mmc_slot *slot = mmc_priv(mmc);
 654	struct cvm_mmc_host *host = slot->host;
 655	struct mmc_data *data;
 656	u64 emm_dma, addr;
 657
 658	if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
 659	    !mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
 660		dev_err(&mmc->card->dev,
 661			"Error: cmv_mmc_dma_request no data\n");
 662		goto error;
 663	}
 664
 665	cvm_mmc_switch_to(slot);
 666
 667	data = mrq->data;
 668	pr_debug("DMA request  blocks: %d  block_size: %d  total_size: %d\n",
 669		 data->blocks, data->blksz, data->blocks * data->blksz);
 670	if (data->timeout_ns)
 671		set_wdog(slot, data->timeout_ns);
 672
 673	WARN_ON(host->current_req);
 674	host->current_req = mrq;
 675
 676	emm_dma = prepare_ext_dma(mmc, mrq);
 677	addr = prepare_dma(host, data);
 678	if (!addr) {
 679		dev_err(host->dev, "prepare_dma failed\n");
 680		goto error;
 681	}
 682
 683	host->dma_active = true;
 684	host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
 685			 MIO_EMM_INT_DMA_ERR);
 686
 687	if (host->dmar_fixup)
 688		host->dmar_fixup(host, mrq->cmd, data, addr);
 689
 690	/*
 691	 * If we have a valid SD card in the slot, we set the response
 692	 * bit mask to check for CRC errors and timeouts only.
 693	 * Otherwise, use the default power reset value.
 694	 */
 695	if (mmc_card_sd(mmc->card))
 696		writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host));
 697	else
 698		writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
 699	writeq(emm_dma, host->base + MIO_EMM_DMA(host));
 700	return;
 701
 702error:
 703	mrq->cmd->error = -EINVAL;
 704	if (mrq->done)
 705		mrq->done(mrq);
 706	host->release_bus(host);
 707}
 708
 709static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
 710{
 711	sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
 712		       SG_MITER_ATOMIC | SG_MITER_TO_SG);
 713}
 714
 715static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
 716{
 717	unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
 718	struct sg_mapping_iter *smi = &host->smi;
 719	unsigned int bytes_xfered;
 720	int shift = 56;
 721	u64 dat = 0;
 722
 723	/* Copy data to the xmit buffer before issuing the command. */
 724	sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG);
 725
 726	/* Auto inc from offset zero, dbuf zero */
 727	writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host));
 728
 729	for (bytes_xfered = 0; bytes_xfered < data_len;) {
 730		if (smi->consumed >= smi->length) {
 731			if (!sg_miter_next(smi))
 732				break;
 733			smi->consumed = 0;
 734		}
 735
 736		while (smi->consumed < smi->length && shift >= 0) {
 737			dat |= (u64)((u8 *)smi->addr)[smi->consumed] << shift;
 738			bytes_xfered++;
 739			smi->consumed++;
 740			shift -= 8;
 741		}
 742
 743		if (shift < 0) {
 744			writeq(dat, host->base + MIO_EMM_BUF_DAT(host));
 745			shift = 56;
 746			dat = 0;
 747		}
 748	}
 749	sg_miter_stop(smi);
 750}
 751
 752static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 753{
 754	struct cvm_mmc_slot *slot = mmc_priv(mmc);
 755	struct cvm_mmc_host *host = slot->host;
 756	struct mmc_command *cmd = mrq->cmd;
 757	struct cvm_mmc_cr_mods mods;
 758	u64 emm_cmd, rsp_sts;
 759	int retries = 100;
 760
 761	/*
 762	 * Note about locking:
 763	 * All MMC devices share the same bus and controller. Allow only a
 764	 * single user of the bootbus/MMC bus at a time. The lock is acquired
 765	 * on all entry points from the MMC layer.
 766	 *
 767	 * For requests the lock is only released after the completion
 768	 * interrupt!
 769	 */
 770	host->acquire_bus(host);
 771
 772	if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
 773	    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
 774		return cvm_mmc_dma_request(mmc, mrq);
 775
 776	cvm_mmc_switch_to(slot);
 777
 778	mods = cvm_mmc_get_cr_mods(cmd);
 779
 780	WARN_ON(host->current_req);
 781	host->current_req = mrq;
 782
 783	if (cmd->data) {
 784		if (cmd->data->flags & MMC_DATA_READ)
 785			do_read_request(host, mrq);
 786		else
 787			do_write_request(host, mrq);
 788
 789		if (cmd->data->timeout_ns)
 790			set_wdog(slot, cmd->data->timeout_ns);
 791	} else
 792		set_wdog(slot, 0);
 793
 794	host->dma_active = false;
 795	host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
 796
 797	emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
 798		  FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
 799		  FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
 800		  FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) |
 801		  FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg);
 802	set_bus_id(&emm_cmd, slot->bus_id);
 803	if (cmd->data && mmc_cmd_type(cmd) == MMC_CMD_ADTC)
 804		emm_cmd |= FIELD_PREP(MIO_EMM_CMD_OFFSET,
 805				64 - ((cmd->data->blocks * cmd->data->blksz) / 8));
 806
 807	writeq(0, host->base + MIO_EMM_STS_MASK(host));
 808
 809retry:
 810	rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
 811	if (rsp_sts & MIO_EMM_RSP_STS_DMA_VAL ||
 812	    rsp_sts & MIO_EMM_RSP_STS_CMD_VAL ||
 813	    rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL ||
 814	    rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) {
 815		udelay(10);
 816		if (--retries)
 817			goto retry;
 818	}
 819	if (!retries)
 820		dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
 821	writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
 822}
 823
 824static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 825{
 826	struct cvm_mmc_slot *slot = mmc_priv(mmc);
 827	struct cvm_mmc_host *host = slot->host;
 828	int clk_period = 0, power_class = 10, bus_width = 0;
 829	u64 clock, emm_switch;
 830
 831	host->acquire_bus(host);
 832	cvm_mmc_switch_to(slot);
 833
 834	/* Set the power state */
 835	switch (ios->power_mode) {
 836	case MMC_POWER_ON:
 837		break;
 838
 839	case MMC_POWER_OFF:
 840		cvm_mmc_reset_bus(slot);
 841		if (host->global_pwr_gpiod)
 842			host->set_shared_power(host, 0);
 843		else if (!IS_ERR(mmc->supply.vmmc))
 844			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
 845		break;
 846
 847	case MMC_POWER_UP:
 848		if (host->global_pwr_gpiod)
 849			host->set_shared_power(host, 1);
 850		else if (!IS_ERR(mmc->supply.vmmc))
 851			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
 852		break;
 853	}
 854
 855	/* Convert bus width to HW definition */
 856	switch (ios->bus_width) {
 857	case MMC_BUS_WIDTH_8:
 858		bus_width = 2;
 859		break;
 860	case MMC_BUS_WIDTH_4:
 861		bus_width = 1;
 862		break;
 863	case MMC_BUS_WIDTH_1:
 864		bus_width = 0;
 865		break;
 866	}
 867
 868	/* DDR is available for 4/8 bit bus width */
 869	if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52)
 870		bus_width |= 4;
 871
 872	/* Change the clock frequency. */
 873	clock = ios->clock;
 874	if (clock > 52000000)
 875		clock = 52000000;
 876	slot->clock = clock;
 877
 878	if (clock)
 879		clk_period = (host->sys_freq + clock - 1) / (2 * clock);
 880
 881	emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
 882				(ios->timing == MMC_TIMING_MMC_HS)) |
 883		     FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
 884		     FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
 885		     FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
 886		     FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
 887	set_bus_id(&emm_switch, slot->bus_id);
 888
 889	if (!switch_val_changed(slot, emm_switch))
 890		goto out;
 891
 892	set_wdog(slot, 0);
 893	do_switch(host, emm_switch);
 894	slot->cached_switch = emm_switch;
 895out:
 896	host->release_bus(host);
 897}
 898
 899static const struct mmc_host_ops cvm_mmc_ops = {
 900	.request        = cvm_mmc_request,
 901	.set_ios        = cvm_mmc_set_ios,
 902	.get_ro		= mmc_gpio_get_ro,
 903	.get_cd		= mmc_gpio_get_cd,
 904};
 905
 906static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
 907{
 908	struct mmc_host *mmc = slot->mmc;
 909
 910	clock = min(clock, mmc->f_max);
 911	clock = max(clock, mmc->f_min);
 912	slot->clock = clock;
 913}
 914
 915static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
 916{
 917	struct cvm_mmc_host *host = slot->host;
 918	u64 emm_switch;
 919
 920	/* Enable this bus slot. */
 921	host->emm_cfg |= (1ull << slot->bus_id);
 922	writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
 923	udelay(10);
 924
 925	/* Program initial clock speed and power. */
 926	cvm_mmc_set_clock(slot, slot->mmc->f_min);
 927	emm_switch = FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, 10);
 928	emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
 929				 (host->sys_freq / slot->clock) / 2);
 930	emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
 931				 (host->sys_freq / slot->clock) / 2);
 932
 933	/* Make the changes take effect on this bus slot. */
 934	set_bus_id(&emm_switch, slot->bus_id);
 935	do_switch(host, emm_switch);
 936
 937	slot->cached_switch = emm_switch;
 938
 939	/*
 940	 * Set watchdog timeout value and default reset value
 941	 * for the mask register. Finally, set the CARD_RCA
 942	 * bit so that we can get the card address relative
 943	 * to the CMD register for CMD7 transactions.
 944	 */
 945	set_wdog(slot, 0);
 946	writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
 947	writeq(1, host->base + MIO_EMM_RCA(host));
 948	return 0;
 949}
 950
 951static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
 952{
 953	u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
 954	struct device_node *node = dev->of_node;
 955	struct mmc_host *mmc = slot->mmc;
 956	u64 clock_period;
 957	int ret;
 958
 959	ret = of_property_read_u32(node, "reg", &id);
 960	if (ret) {
 961		dev_err(dev, "Missing or invalid reg property on %pOF\n", node);
 962		return ret;
 963	}
 964
 965	if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
 966		dev_err(dev, "Invalid reg property on %pOF\n", node);
 967		return -EINVAL;
 968	}
 969
 970	ret = mmc_regulator_get_supply(mmc);
 971	if (ret)
 972		return ret;
 973	/*
 974	 * Legacy Octeon firmware has no regulator entry, fall-back to
 975	 * a hard-coded voltage to get a sane OCR.
 976	 */
 977	if (IS_ERR(mmc->supply.vmmc))
 978		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 979
 980	/* Common MMC bindings */
 981	ret = mmc_of_parse(mmc);
 982	if (ret)
 983		return ret;
 984
 985	/* Set bus width */
 986	if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
 987		of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
 988		if (bus_width == 8)
 989			mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
 990		else if (bus_width == 4)
 991			mmc->caps |= MMC_CAP_4_BIT_DATA;
 992	}
 993
 994	/* Set maximum and minimum frequency */
 995	if (!mmc->f_max)
 996		of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
 997	if (!mmc->f_max || mmc->f_max > 52000000)
 998		mmc->f_max = 52000000;
 999	mmc->f_min = 400000;
1000
1001	/* Sampling register settings, period in picoseconds */
1002	clock_period = 1000000000000ull / slot->host->sys_freq;
1003	of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
1004	of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
1005	slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
1006	slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
1007
1008	return id;
1009}
1010
1011int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
1012{
1013	struct cvm_mmc_slot *slot;
1014	struct mmc_host *mmc;
1015	int ret, id;
1016
1017	mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
1018	if (!mmc)
1019		return -ENOMEM;
1020
1021	slot = mmc_priv(mmc);
1022	slot->mmc = mmc;
1023	slot->host = host;
1024
1025	ret = cvm_mmc_of_parse(dev, slot);
1026	if (ret < 0)
1027		goto error;
1028	id = ret;
1029
1030	/* Set up host parameters */
1031	mmc->ops = &cvm_mmc_ops;
1032
1033	/*
1034	 * We only have a 3.3v supply, we cannot support any
1035	 * of the UHS modes. We do support the high speed DDR
1036	 * modes up to 52MHz.
1037	 *
1038	 * Disable bounce buffers for max_segs = 1
1039	 */
1040	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1041		     MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
1042		     MMC_CAP_3_3V_DDR;
1043
1044	if (host->use_sg)
1045		mmc->max_segs = 16;
1046	else
1047		mmc->max_segs = 1;
1048
1049	/* DMA size field can address up to 8 MB */
1050	mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
1051				  dma_get_max_seg_size(host->dev));
1052	mmc->max_req_size = mmc->max_seg_size;
1053	/* External DMA is in 512 byte blocks */
1054	mmc->max_blk_size = 512;
1055	/* DMA block count field is 15 bits */
1056	mmc->max_blk_count = 32767;
1057
1058	slot->clock = mmc->f_min;
1059	slot->bus_id = id;
1060	slot->cached_rca = 1;
1061
1062	host->acquire_bus(host);
1063	host->slot[id] = slot;
1064	cvm_mmc_switch_to(slot);
1065	cvm_mmc_init_lowlevel(slot);
1066	host->release_bus(host);
1067
1068	ret = mmc_add_host(mmc);
1069	if (ret) {
1070		dev_err(dev, "mmc_add_host() returned %d\n", ret);
1071		slot->host->slot[id] = NULL;
1072		goto error;
1073	}
1074	return 0;
1075
1076error:
1077	mmc_free_host(slot->mmc);
1078	return ret;
1079}
1080
1081int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot)
1082{
1083	mmc_remove_host(slot->mmc);
1084	slot->host->slot[slot->bus_id] = NULL;
1085	mmc_free_host(slot->mmc);
1086	return 0;
1087}