Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *  linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver
   3 *
   4 *  This is a driver for the SDHC controller found in Freescale MX2/MX3
   5 *  SoCs. It is basically the same hardware as found on MX1 (imxmmc.c).
   6 *  Unlike the hardware found on MX1, this hardware just works and does
   7 *  not need all the quirks found in imxmmc.c, hence the separate driver.
   8 *
   9 *  Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  10 *  Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
  11 *
  12 *  derived from pxamci.c by Russell King
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License version 2 as
  16 * published by the Free Software Foundation.
  17 *
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/init.h>
  22#include <linux/ioport.h>
  23#include <linux/platform_device.h>
  24#include <linux/interrupt.h>
  25#include <linux/irq.h>
  26#include <linux/blkdev.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/mmc/host.h>
  29#include <linux/mmc/card.h>
  30#include <linux/delay.h>
  31#include <linux/clk.h>
  32#include <linux/io.h>
  33#include <linux/gpio.h>
  34#include <linux/regulator/consumer.h>
  35#include <linux/dmaengine.h>
 
 
 
 
 
 
  36
  37#include <asm/dma.h>
  38#include <asm/irq.h>
  39#include <asm/sizes.h>
  40#include <mach/mmc.h>
  41
  42#include <mach/dma.h>
  43
  44#define DRIVER_NAME "mxc-mmc"
 
  45
  46#define MMC_REG_STR_STP_CLK		0x00
  47#define MMC_REG_STATUS			0x04
  48#define MMC_REG_CLK_RATE		0x08
  49#define MMC_REG_CMD_DAT_CONT		0x0C
  50#define MMC_REG_RES_TO			0x10
  51#define MMC_REG_READ_TO			0x14
  52#define MMC_REG_BLK_LEN			0x18
  53#define MMC_REG_NOB			0x1C
  54#define MMC_REG_REV_NO			0x20
  55#define MMC_REG_INT_CNTR		0x24
  56#define MMC_REG_CMD			0x28
  57#define MMC_REG_ARG			0x2C
  58#define MMC_REG_RES_FIFO		0x34
  59#define MMC_REG_BUFFER_ACCESS		0x38
  60
  61#define STR_STP_CLK_RESET               (1 << 3)
  62#define STR_STP_CLK_START_CLK           (1 << 1)
  63#define STR_STP_CLK_STOP_CLK            (1 << 0)
  64
  65#define STATUS_CARD_INSERTION		(1 << 31)
  66#define STATUS_CARD_REMOVAL		(1 << 30)
  67#define STATUS_YBUF_EMPTY		(1 << 29)
  68#define STATUS_XBUF_EMPTY		(1 << 28)
  69#define STATUS_YBUF_FULL		(1 << 27)
  70#define STATUS_XBUF_FULL		(1 << 26)
  71#define STATUS_BUF_UND_RUN		(1 << 25)
  72#define STATUS_BUF_OVFL			(1 << 24)
  73#define STATUS_SDIO_INT_ACTIVE		(1 << 14)
  74#define STATUS_END_CMD_RESP		(1 << 13)
  75#define STATUS_WRITE_OP_DONE		(1 << 12)
  76#define STATUS_DATA_TRANS_DONE		(1 << 11)
  77#define STATUS_READ_OP_DONE		(1 << 11)
  78#define STATUS_WR_CRC_ERROR_CODE_MASK	(3 << 10)
  79#define STATUS_CARD_BUS_CLK_RUN		(1 << 8)
  80#define STATUS_BUF_READ_RDY		(1 << 7)
  81#define STATUS_BUF_WRITE_RDY		(1 << 6)
  82#define STATUS_RESP_CRC_ERR		(1 << 5)
  83#define STATUS_CRC_READ_ERR		(1 << 3)
  84#define STATUS_CRC_WRITE_ERR		(1 << 2)
  85#define STATUS_TIME_OUT_RESP		(1 << 1)
  86#define STATUS_TIME_OUT_READ		(1 << 0)
  87#define STATUS_ERR_MASK			0x2f
  88
  89#define CMD_DAT_CONT_CMD_RESP_LONG_OFF	(1 << 12)
  90#define CMD_DAT_CONT_STOP_READWAIT	(1 << 11)
  91#define CMD_DAT_CONT_START_READWAIT	(1 << 10)
  92#define CMD_DAT_CONT_BUS_WIDTH_4	(2 << 8)
  93#define CMD_DAT_CONT_INIT		(1 << 7)
  94#define CMD_DAT_CONT_WRITE		(1 << 4)
  95#define CMD_DAT_CONT_DATA_ENABLE	(1 << 3)
  96#define CMD_DAT_CONT_RESPONSE_48BIT_CRC	(1 << 0)
  97#define CMD_DAT_CONT_RESPONSE_136BIT	(2 << 0)
  98#define CMD_DAT_CONT_RESPONSE_48BIT	(3 << 0)
  99
 100#define INT_SDIO_INT_WKP_EN		(1 << 18)
 101#define INT_CARD_INSERTION_WKP_EN	(1 << 17)
 102#define INT_CARD_REMOVAL_WKP_EN		(1 << 16)
 103#define INT_CARD_INSERTION_EN		(1 << 15)
 104#define INT_CARD_REMOVAL_EN		(1 << 14)
 105#define INT_SDIO_IRQ_EN			(1 << 13)
 106#define INT_DAT0_EN			(1 << 12)
 107#define INT_BUF_READ_EN			(1 << 4)
 108#define INT_BUF_WRITE_EN		(1 << 3)
 109#define INT_END_CMD_RES_EN		(1 << 2)
 110#define INT_WRITE_OP_DONE_EN		(1 << 1)
 111#define INT_READ_OP_EN			(1 << 0)
 112
 
 
 
 
 
 
 113struct mxcmci_host {
 114	struct mmc_host		*mmc;
 115	struct resource		*res;
 116	void __iomem		*base;
 117	int			irq;
 118	int			detect_irq;
 119	struct dma_chan		*dma;
 120	struct dma_async_tx_descriptor *desc;
 121	int			do_dma;
 122	int			default_irq_mask;
 123	int			use_sdio;
 124	unsigned int		power_mode;
 125	struct imxmmc_platform_data *pdata;
 126
 127	struct mmc_request	*req;
 128	struct mmc_command	*cmd;
 129	struct mmc_data		*data;
 130
 131	unsigned int		datasize;
 132	unsigned int		dma_dir;
 133
 134	u16			rev_no;
 135	unsigned int		cmdat;
 136
 137	struct clk		*clk;
 
 138
 139	int			clock;
 140
 141	struct work_struct	datawork;
 142	spinlock_t		lock;
 143
 144	struct regulator	*vcc;
 145
 146	int			burstlen;
 147	int			dmareq;
 148	struct dma_slave_config dma_slave_config;
 149	struct imx_dma_data	dma_data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 150};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 151
 152static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
 153
 154static inline void mxcmci_init_ocr(struct mxcmci_host *host)
 155{
 156	host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
 157
 158	if (IS_ERR(host->vcc)) {
 159		host->vcc = NULL;
 160	} else {
 161		host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
 162		if (host->pdata && host->pdata->ocr_avail)
 163			dev_warn(mmc_dev(host->mmc),
 164				"pdata->ocr_avail will not be used\n");
 165	}
 166
 167	if (host->vcc == NULL) {
 168		/* fall-back to platform data */
 169		if (host->pdata && host->pdata->ocr_avail)
 170			host->mmc->ocr_avail = host->pdata->ocr_avail;
 171		else
 172			host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 173	}
 174}
 175
 176static inline void mxcmci_set_power(struct mxcmci_host *host,
 177				    unsigned char power_mode,
 178				    unsigned int vdd)
 179{
 180	if (host->vcc) {
 181		if (power_mode == MMC_POWER_UP)
 182			mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
 183		else if (power_mode == MMC_POWER_OFF)
 184			mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
 185	}
 186
 187	if (host->pdata && host->pdata->setpower)
 188		host->pdata->setpower(mmc_dev(host->mmc), vdd);
 189}
 190
 191static inline int mxcmci_use_dma(struct mxcmci_host *host)
 192{
 193	return host->do_dma;
 194}
 195
 196static void mxcmci_softreset(struct mxcmci_host *host)
 197{
 198	int i;
 199
 200	dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
 201
 202	/* reset sequence */
 203	writew(STR_STP_CLK_RESET, host->base + MMC_REG_STR_STP_CLK);
 204	writew(STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
 205			host->base + MMC_REG_STR_STP_CLK);
 206
 207	for (i = 0; i < 8; i++)
 208		writew(STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
 209
 210	writew(0xff, host->base + MMC_REG_RES_TO);
 211}
 212static int mxcmci_setup_dma(struct mmc_host *mmc);
 213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 214static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
 215{
 216	unsigned int nob = data->blocks;
 217	unsigned int blksz = data->blksz;
 218	unsigned int datasize = nob * blksz;
 219	struct scatterlist *sg;
 
 220	int i, nents;
 221
 222	if (data->flags & MMC_DATA_STREAM)
 223		nob = 0xffff;
 224
 225	host->data = data;
 226	data->bytes_xfered = 0;
 227
 228	writew(nob, host->base + MMC_REG_NOB);
 229	writew(blksz, host->base + MMC_REG_BLK_LEN);
 230	host->datasize = datasize;
 231
 232	if (!mxcmci_use_dma(host))
 233		return 0;
 234
 235	for_each_sg(data->sg, sg, data->sg_len, i) {
 236		if (sg->offset & 3 || sg->length & 3) {
 237			host->do_dma = 0;
 238			return 0;
 239		}
 240	}
 241
 242	if (data->flags & MMC_DATA_READ)
 243		host->dma_dir = DMA_FROM_DEVICE;
 244	else
 
 245		host->dma_dir = DMA_TO_DEVICE;
 
 
 
 
 246
 247	nents = dma_map_sg(host->dma->device->dev, data->sg,
 248				     data->sg_len,  host->dma_dir);
 249	if (nents != data->sg_len)
 250		return -EINVAL;
 251
 252	host->desc = host->dma->device->device_prep_slave_sg(host->dma,
 253		data->sg, data->sg_len, host->dma_dir,
 254		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 255
 256	if (!host->desc) {
 257		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 258				host->dma_dir);
 259		host->do_dma = 0;
 260		return 0; /* Fall back to PIO */
 261	}
 262	wmb();
 263
 264	dmaengine_submit(host->desc);
 
 
 
 265
 266	return 0;
 267}
 268
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 269static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
 270		unsigned int cmdat)
 271{
 272	u32 int_cntr = host->default_irq_mask;
 273	unsigned long flags;
 274
 275	WARN_ON(host->cmd != NULL);
 276	host->cmd = cmd;
 277
 278	switch (mmc_resp_type(cmd)) {
 279	case MMC_RSP_R1: /* short CRC, OPCODE */
 280	case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
 281		cmdat |= CMD_DAT_CONT_RESPONSE_48BIT_CRC;
 282		break;
 283	case MMC_RSP_R2: /* long 136 bit + CRC */
 284		cmdat |= CMD_DAT_CONT_RESPONSE_136BIT;
 285		break;
 286	case MMC_RSP_R3: /* short */
 287		cmdat |= CMD_DAT_CONT_RESPONSE_48BIT;
 288		break;
 289	case MMC_RSP_NONE:
 290		break;
 291	default:
 292		dev_err(mmc_dev(host->mmc), "unhandled response type 0x%x\n",
 293				mmc_resp_type(cmd));
 294		cmd->error = -EINVAL;
 295		return -EINVAL;
 296	}
 297
 298	int_cntr = INT_END_CMD_RES_EN;
 299
 300	if (mxcmci_use_dma(host))
 301		int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN;
 
 
 
 
 
 
 302
 303	spin_lock_irqsave(&host->lock, flags);
 304	if (host->use_sdio)
 305		int_cntr |= INT_SDIO_IRQ_EN;
 306	writel(int_cntr, host->base + MMC_REG_INT_CNTR);
 307	spin_unlock_irqrestore(&host->lock, flags);
 308
 309	writew(cmd->opcode, host->base + MMC_REG_CMD);
 310	writel(cmd->arg, host->base + MMC_REG_ARG);
 311	writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT);
 312
 313	return 0;
 314}
 315
 316static void mxcmci_finish_request(struct mxcmci_host *host,
 317		struct mmc_request *req)
 318{
 319	u32 int_cntr = host->default_irq_mask;
 320	unsigned long flags;
 321
 322	spin_lock_irqsave(&host->lock, flags);
 323	if (host->use_sdio)
 324		int_cntr |= INT_SDIO_IRQ_EN;
 325	writel(int_cntr, host->base + MMC_REG_INT_CNTR);
 326	spin_unlock_irqrestore(&host->lock, flags);
 327
 328	host->req = NULL;
 329	host->cmd = NULL;
 330	host->data = NULL;
 331
 332	mmc_request_done(host->mmc, req);
 333}
 334
 335static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
 336{
 337	struct mmc_data *data = host->data;
 338	int data_error;
 339
 340	if (mxcmci_use_dma(host)) {
 341		dmaengine_terminate_all(host->dma);
 342		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 343				host->dma_dir);
 
 344	}
 345
 346	if (stat & STATUS_ERR_MASK) {
 347		dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
 348				stat);
 349		if (stat & STATUS_CRC_READ_ERR) {
 350			dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
 351			data->error = -EILSEQ;
 352		} else if (stat & STATUS_CRC_WRITE_ERR) {
 353			u32 err_code = (stat >> 9) & 0x3;
 354			if (err_code == 2) { /* No CRC response */
 355				dev_err(mmc_dev(host->mmc),
 356					"%s: No CRC -ETIMEDOUT\n", __func__);
 357				data->error = -ETIMEDOUT;
 358			} else {
 359				dev_err(mmc_dev(host->mmc),
 360					"%s: -EILSEQ\n", __func__);
 361				data->error = -EILSEQ;
 362			}
 363		} else if (stat & STATUS_TIME_OUT_READ) {
 364			dev_err(mmc_dev(host->mmc),
 365				"%s: read -ETIMEDOUT\n", __func__);
 366			data->error = -ETIMEDOUT;
 367		} else {
 368			dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
 369			data->error = -EIO;
 370		}
 371	} else {
 372		data->bytes_xfered = host->datasize;
 373	}
 374
 375	data_error = data->error;
 376
 377	host->data = NULL;
 378
 379	return data_error;
 380}
 381
 382static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat)
 383{
 384	struct mmc_command *cmd = host->cmd;
 385	int i;
 386	u32 a, b, c;
 387
 388	if (!cmd)
 389		return;
 390
 391	if (stat & STATUS_TIME_OUT_RESP) {
 392		dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
 393		cmd->error = -ETIMEDOUT;
 394	} else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
 395		dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
 396		cmd->error = -EILSEQ;
 397	}
 398
 399	if (cmd->flags & MMC_RSP_PRESENT) {
 400		if (cmd->flags & MMC_RSP_136) {
 401			for (i = 0; i < 4; i++) {
 402				a = readw(host->base + MMC_REG_RES_FIFO);
 403				b = readw(host->base + MMC_REG_RES_FIFO);
 404				cmd->resp[i] = a << 16 | b;
 405			}
 406		} else {
 407			a = readw(host->base + MMC_REG_RES_FIFO);
 408			b = readw(host->base + MMC_REG_RES_FIFO);
 409			c = readw(host->base + MMC_REG_RES_FIFO);
 410			cmd->resp[0] = a << 24 | b << 8 | c >> 8;
 411		}
 412	}
 413}
 414
 415static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
 416{
 417	u32 stat;
 418	unsigned long timeout = jiffies + HZ;
 419
 420	do {
 421		stat = readl(host->base + MMC_REG_STATUS);
 422		if (stat & STATUS_ERR_MASK)
 423			return stat;
 424		if (time_after(jiffies, timeout)) {
 425			mxcmci_softreset(host);
 426			mxcmci_set_clk_rate(host, host->clock);
 427			return STATUS_TIME_OUT_READ;
 428		}
 429		if (stat & mask)
 430			return 0;
 431		cpu_relax();
 432	} while (1);
 433}
 434
 435static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
 436{
 437	unsigned int stat;
 438	u32 *buf = _buf;
 439
 440	while (bytes > 3) {
 441		stat = mxcmci_poll_status(host,
 442				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
 443		if (stat)
 444			return stat;
 445		*buf++ = readl(host->base + MMC_REG_BUFFER_ACCESS);
 446		bytes -= 4;
 447	}
 448
 449	if (bytes) {
 450		u8 *b = (u8 *)buf;
 451		u32 tmp;
 452
 453		stat = mxcmci_poll_status(host,
 454				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
 455		if (stat)
 456			return stat;
 457		tmp = readl(host->base + MMC_REG_BUFFER_ACCESS);
 458		memcpy(b, &tmp, bytes);
 459	}
 460
 461	return 0;
 462}
 463
 464static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
 465{
 466	unsigned int stat;
 467	u32 *buf = _buf;
 468
 469	while (bytes > 3) {
 470		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 471		if (stat)
 472			return stat;
 473		writel(*buf++, host->base + MMC_REG_BUFFER_ACCESS);
 474		bytes -= 4;
 475	}
 476
 477	if (bytes) {
 478		u8 *b = (u8 *)buf;
 479		u32 tmp;
 480
 481		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 482		if (stat)
 483			return stat;
 484
 485		memcpy(&tmp, b, bytes);
 486		writel(tmp, host->base + MMC_REG_BUFFER_ACCESS);
 487	}
 488
 489	stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 490	if (stat)
 491		return stat;
 492
 493	return 0;
 494}
 495
 496static int mxcmci_transfer_data(struct mxcmci_host *host)
 497{
 498	struct mmc_data *data = host->req->data;
 499	struct scatterlist *sg;
 500	int stat, i;
 501
 502	host->data = data;
 503	host->datasize = 0;
 504
 505	if (data->flags & MMC_DATA_READ) {
 506		for_each_sg(data->sg, sg, data->sg_len, i) {
 507			stat = mxcmci_pull(host, sg_virt(sg), sg->length);
 508			if (stat)
 509				return stat;
 510			host->datasize += sg->length;
 511		}
 512	} else {
 513		for_each_sg(data->sg, sg, data->sg_len, i) {
 514			stat = mxcmci_push(host, sg_virt(sg), sg->length);
 515			if (stat)
 516				return stat;
 517			host->datasize += sg->length;
 518		}
 519		stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE);
 520		if (stat)
 521			return stat;
 522	}
 523	return 0;
 524}
 525
 526static void mxcmci_datawork(struct work_struct *work)
 527{
 528	struct mxcmci_host *host = container_of(work, struct mxcmci_host,
 529						  datawork);
 530	int datastat = mxcmci_transfer_data(host);
 531
 532	writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
 533		host->base + MMC_REG_STATUS);
 534	mxcmci_finish_data(host, datastat);
 535
 536	if (host->req->stop) {
 537		if (mxcmci_start_cmd(host, host->req->stop, 0)) {
 538			mxcmci_finish_request(host, host->req);
 539			return;
 540		}
 541	} else {
 542		mxcmci_finish_request(host, host->req);
 543	}
 544}
 545
 546static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
 547{
 548	struct mmc_data *data = host->data;
 549	int data_error;
 
 550
 551	if (!data)
 
 
 
 552		return;
 
 
 
 
 
 
 
 
 
 
 553
 554	data_error = mxcmci_finish_data(host, stat);
 555
 
 
 556	mxcmci_read_response(host, stat);
 557	host->cmd = NULL;
 558
 559	if (host->req->stop) {
 560		if (mxcmci_start_cmd(host, host->req->stop, 0)) {
 561			mxcmci_finish_request(host, host->req);
 562			return;
 563		}
 564	} else {
 565		mxcmci_finish_request(host, host->req);
 566	}
 567}
 568
 569static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
 570{
 571	mxcmci_read_response(host, stat);
 572	host->cmd = NULL;
 573
 574	if (!host->data && host->req) {
 575		mxcmci_finish_request(host, host->req);
 576		return;
 577	}
 578
 579	/* For the DMA case the DMA engine handles the data transfer
 580	 * automatically. For non DMA we have to do it ourselves.
 581	 * Don't do it in interrupt context though.
 582	 */
 583	if (!mxcmci_use_dma(host) && host->data)
 584		schedule_work(&host->datawork);
 585
 586}
 587
 588static irqreturn_t mxcmci_irq(int irq, void *devid)
 589{
 590	struct mxcmci_host *host = devid;
 591	unsigned long flags;
 592	bool sdio_irq;
 593	u32 stat;
 594
 595	stat = readl(host->base + MMC_REG_STATUS);
 596	writel(stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
 597			STATUS_WRITE_OP_DONE), host->base + MMC_REG_STATUS);
 
 
 598
 599	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
 600
 601	spin_lock_irqsave(&host->lock, flags);
 602	sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
 603	spin_unlock_irqrestore(&host->lock, flags);
 604
 605	if (mxcmci_use_dma(host) &&
 606	    (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
 607		writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
 608			host->base + MMC_REG_STATUS);
 609
 610	if (sdio_irq) {
 611		writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
 612		mmc_signal_sdio_irq(host->mmc);
 613	}
 614
 615	if (stat & STATUS_END_CMD_RESP)
 616		mxcmci_cmd_done(host, stat);
 617
 618	if (mxcmci_use_dma(host) &&
 619		  (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
 
 620		mxcmci_data_done(host, stat);
 
 621
 622	if (host->default_irq_mask &&
 623		  (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
 624		mmc_detect_change(host->mmc, msecs_to_jiffies(200));
 625
 626	return IRQ_HANDLED;
 627}
 628
 629static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
 630{
 631	struct mxcmci_host *host = mmc_priv(mmc);
 632	unsigned int cmdat = host->cmdat;
 633	int error;
 634
 635	WARN_ON(host->req != NULL);
 636
 637	host->req = req;
 638	host->cmdat &= ~CMD_DAT_CONT_INIT;
 639
 640	if (host->dma)
 641		host->do_dma = 1;
 642
 643	if (req->data) {
 644		error = mxcmci_setup_data(host, req->data);
 645		if (error) {
 646			req->cmd->error = error;
 647			goto out;
 648		}
 649
 650
 651		cmdat |= CMD_DAT_CONT_DATA_ENABLE;
 652
 653		if (req->data->flags & MMC_DATA_WRITE)
 654			cmdat |= CMD_DAT_CONT_WRITE;
 655	}
 656
 657	error = mxcmci_start_cmd(host, req->cmd, cmdat);
 658
 659out:
 660	if (error)
 661		mxcmci_finish_request(host, req);
 662}
 663
 664static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
 665{
 666	unsigned int divider;
 667	int prescaler = 0;
 668	unsigned int clk_in = clk_get_rate(host->clk);
 669
 670	while (prescaler <= 0x800) {
 671		for (divider = 1; divider <= 0xF; divider++) {
 672			int x;
 673
 674			x = (clk_in / (divider + 1));
 675
 676			if (prescaler)
 677				x /= (prescaler * 2);
 678
 679			if (x <= clk_ios)
 680				break;
 681		}
 682		if (divider < 0x10)
 683			break;
 684
 685		if (prescaler == 0)
 686			prescaler = 1;
 687		else
 688			prescaler <<= 1;
 689	}
 690
 691	writew((prescaler << 4) | divider, host->base + MMC_REG_CLK_RATE);
 692
 693	dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n",
 694			prescaler, divider, clk_in, clk_ios);
 695}
 696
 697static int mxcmci_setup_dma(struct mmc_host *mmc)
 698{
 699	struct mxcmci_host *host = mmc_priv(mmc);
 700	struct dma_slave_config *config = &host->dma_slave_config;
 701
 702	config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
 703	config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
 704	config->dst_addr_width = 4;
 705	config->src_addr_width = 4;
 706	config->dst_maxburst = host->burstlen;
 707	config->src_maxburst = host->burstlen;
 
 708
 709	return dmaengine_slave_config(host->dma, config);
 710}
 711
 712static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 713{
 714	struct mxcmci_host *host = mmc_priv(mmc);
 715	int burstlen, ret;
 716
 717	/*
 718	 * use burstlen of 64 (16 words) in 4 bit mode (--> reg value  0)
 719	 * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)
 720	 */
 721	if (ios->bus_width == MMC_BUS_WIDTH_4)
 722		burstlen = 16;
 723	else
 724		burstlen = 4;
 725
 726	if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
 727		host->burstlen = burstlen;
 728		ret = mxcmci_setup_dma(mmc);
 729		if (ret) {
 730			dev_err(mmc_dev(host->mmc),
 731				"failed to config DMA channel. Falling back to PIO\n");
 732			dma_release_channel(host->dma);
 733			host->do_dma = 0;
 
 734		}
 735	}
 736
 737	if (ios->bus_width == MMC_BUS_WIDTH_4)
 738		host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
 739	else
 740		host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
 741
 742	if (host->power_mode != ios->power_mode) {
 743		mxcmci_set_power(host, ios->power_mode, ios->vdd);
 744		host->power_mode = ios->power_mode;
 745
 746		if (ios->power_mode == MMC_POWER_ON)
 747			host->cmdat |= CMD_DAT_CONT_INIT;
 748	}
 749
 750	if (ios->clock) {
 751		mxcmci_set_clk_rate(host, ios->clock);
 752		writew(STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
 753	} else {
 754		writew(STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK);
 755	}
 756
 757	host->clock = ios->clock;
 758}
 759
 760static irqreturn_t mxcmci_detect_irq(int irq, void *data)
 761{
 762	struct mmc_host *mmc = data;
 763
 764	dev_dbg(mmc_dev(mmc), "%s\n", __func__);
 765
 766	mmc_detect_change(mmc, msecs_to_jiffies(250));
 767	return IRQ_HANDLED;
 768}
 769
 770static int mxcmci_get_ro(struct mmc_host *mmc)
 771{
 772	struct mxcmci_host *host = mmc_priv(mmc);
 773
 774	if (host->pdata && host->pdata->get_ro)
 775		return !!host->pdata->get_ro(mmc_dev(mmc));
 776	/*
 777	 * Board doesn't support read only detection; let the mmc core
 778	 * decide what to do.
 
 779	 */
 780	return -ENOSYS;
 781}
 782
 783static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
 784{
 785	struct mxcmci_host *host = mmc_priv(mmc);
 786	unsigned long flags;
 787	u32 int_cntr;
 788
 789	spin_lock_irqsave(&host->lock, flags);
 790	host->use_sdio = enable;
 791	int_cntr = readl(host->base + MMC_REG_INT_CNTR);
 792
 793	if (enable)
 794		int_cntr |= INT_SDIO_IRQ_EN;
 795	else
 796		int_cntr &= ~INT_SDIO_IRQ_EN;
 797
 798	writel(int_cntr, host->base + MMC_REG_INT_CNTR);
 799	spin_unlock_irqrestore(&host->lock, flags);
 800}
 801
 802static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
 803{
 
 
 804	/*
 805	 * MX3 SoCs have a silicon bug which corrupts CRC calculation of
 806	 * multi-block transfers when connected SDIO peripheral doesn't
 807	 * drive the BUSY line as required by the specs.
 808	 * One way to prevent this is to only allow 1-bit transfers.
 809	 */
 810
 811	if (cpu_is_mx3() && card->type == MMC_TYPE_SDIO)
 812		host->caps &= ~MMC_CAP_4_BIT_DATA;
 813	else
 814		host->caps |= MMC_CAP_4_BIT_DATA;
 815}
 816
 817static bool filter(struct dma_chan *chan, void *param)
 818{
 819	struct mxcmci_host *host = param;
 820
 821	if (!imx_dma_is_general_purpose(chan))
 822		return false;
 823
 824	chan->private = &host->dma_data;
 825
 826	return true;
 827}
 828
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 829static const struct mmc_host_ops mxcmci_ops = {
 830	.request		= mxcmci_request,
 831	.set_ios		= mxcmci_set_ios,
 832	.get_ro			= mxcmci_get_ro,
 833	.enable_sdio_irq	= mxcmci_enable_sdio_irq,
 834	.init_card		= mxcmci_init_card,
 835};
 836
 837static int mxcmci_probe(struct platform_device *pdev)
 838{
 839	struct mmc_host *mmc;
 840	struct mxcmci_host *host = NULL;
 841	struct resource *iores, *r;
 842	int ret = 0, irq;
 
 843	dma_cap_mask_t mask;
 
 
 
 
 844
 845	printk(KERN_INFO "i.MX SDHC driver\n");
 846
 847	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 848	irq = platform_get_irq(pdev, 0);
 849	if (!iores || irq < 0)
 850		return -EINVAL;
 851
 852	r = request_mem_region(iores->start, resource_size(iores), pdev->name);
 853	if (!r)
 854		return -EBUSY;
 855
 856	mmc = mmc_alloc_host(sizeof(struct mxcmci_host), &pdev->dev);
 857	if (!mmc) {
 858		ret = -ENOMEM;
 859		goto out_release_mem;
 860	}
 861
 
 
 
 862	mmc->ops = &mxcmci_ops;
 863	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
 
 
 
 
 
 864
 865	/* MMC core transfer sizes tunable parameters */
 866	mmc->max_segs = 64;
 867	mmc->max_blk_size = 2048;
 868	mmc->max_blk_count = 65535;
 869	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
 870	mmc->max_seg_size = mmc->max_req_size;
 871
 872	host = mmc_priv(mmc);
 873	host->base = ioremap(r->start, resource_size(r));
 874	if (!host->base) {
 875		ret = -ENOMEM;
 876		goto out_free;
 877	}
 878
 
 
 
 
 
 
 
 
 
 
 
 879	host->mmc = mmc;
 880	host->pdata = pdev->dev.platform_data;
 881	spin_lock_init(&host->lock);
 882
 
 
 
 
 
 
 883	mxcmci_init_ocr(host);
 884
 885	if (host->pdata && host->pdata->dat3_card_detect)
 886		host->default_irq_mask =
 887			INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN;
 888	else
 889		host->default_irq_mask = 0;
 890
 891	host->res = r;
 892	host->irq = irq;
 893
 894	host->clk = clk_get(&pdev->dev, NULL);
 895	if (IS_ERR(host->clk)) {
 896		ret = PTR_ERR(host->clk);
 
 
 
 
 
 
 897		goto out_iounmap;
 898	}
 899	clk_enable(host->clk);
 
 
 900
 901	mxcmci_softreset(host);
 902
 903	host->rev_no = readw(host->base + MMC_REG_REV_NO);
 904	if (host->rev_no != 0x400) {
 905		ret = -ENODEV;
 906		dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
 907			host->rev_no);
 908		goto out_clk_put;
 909	}
 910
 911	mmc->f_min = clk_get_rate(host->clk) >> 16;
 912	mmc->f_max = clk_get_rate(host->clk) >> 1;
 913
 914	/* recommended in data sheet */
 915	writew(0x2db4, host->base + MMC_REG_READ_TO);
 916
 917	writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR);
 918
 919	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
 920	if (r) {
 921		host->dmareq = r->start;
 922		host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
 923		host->dma_data.priority = DMA_PRIO_LOW;
 924		host->dma_data.dma_request = host->dmareq;
 925		dma_cap_zero(mask);
 926		dma_cap_set(DMA_SLAVE, mask);
 927		host->dma = dma_request_channel(mask, filter, host);
 928		if (host->dma)
 929			mmc->max_seg_size = dma_get_max_seg_size(
 930					host->dma->device->dev);
 
 931	}
 932
 933	if (!host->dma)
 
 
 934		dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
 935
 936	INIT_WORK(&host->datawork, mxcmci_datawork);
 937
 938	ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host);
 939	if (ret)
 940		goto out_free_dma;
 941
 942	platform_set_drvdata(pdev, mmc);
 943
 944	if (host->pdata && host->pdata->init) {
 945		ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq,
 946				host->mmc);
 947		if (ret)
 948			goto out_free_irq;
 949	}
 950
 
 
 
 
 951	mmc_add_host(mmc);
 952
 953	return 0;
 954
 955out_free_irq:
 956	free_irq(host->irq, host);
 957out_free_dma:
 958	if (host->dma)
 959		dma_release_channel(host->dma);
 960out_clk_put:
 961	clk_disable(host->clk);
 962	clk_put(host->clk);
 963out_iounmap:
 964	iounmap(host->base);
 965out_free:
 966	mmc_free_host(mmc);
 967out_release_mem:
 968	release_mem_region(iores->start, resource_size(iores));
 969	return ret;
 970}
 971
 972static int mxcmci_remove(struct platform_device *pdev)
 973{
 974	struct mmc_host *mmc = platform_get_drvdata(pdev);
 975	struct mxcmci_host *host = mmc_priv(mmc);
 976
 977	platform_set_drvdata(pdev, NULL);
 978
 979	mmc_remove_host(mmc);
 980
 981	if (host->vcc)
 982		regulator_put(host->vcc);
 983
 984	if (host->pdata && host->pdata->exit)
 985		host->pdata->exit(&pdev->dev, mmc);
 986
 987	free_irq(host->irq, host);
 988	iounmap(host->base);
 989
 990	if (host->dma)
 991		dma_release_channel(host->dma);
 992
 993	clk_disable(host->clk);
 994	clk_put(host->clk);
 995
 996	release_mem_region(host->res->start, resource_size(host->res));
 997
 998	mmc_free_host(mmc);
 999
1000	return 0;
1001}
1002
1003#ifdef CONFIG_PM
1004static int mxcmci_suspend(struct device *dev)
1005{
1006	struct mmc_host *mmc = dev_get_drvdata(dev);
1007	struct mxcmci_host *host = mmc_priv(mmc);
1008	int ret = 0;
1009
1010	if (mmc)
1011		ret = mmc_suspend_host(mmc);
1012	clk_disable(host->clk);
1013
1014	return ret;
1015}
1016
1017static int mxcmci_resume(struct device *dev)
1018{
1019	struct mmc_host *mmc = dev_get_drvdata(dev);
1020	struct mxcmci_host *host = mmc_priv(mmc);
1021	int ret = 0;
1022
1023	clk_enable(host->clk);
1024	if (mmc)
1025		ret = mmc_resume_host(mmc);
1026
1027	return ret;
 
 
1028}
1029
1030static const struct dev_pm_ops mxcmci_pm_ops = {
1031	.suspend	= mxcmci_suspend,
1032	.resume		= mxcmci_resume,
1033};
1034#endif
1035
1036static struct platform_driver mxcmci_driver = {
1037	.probe		= mxcmci_probe,
1038	.remove		= mxcmci_remove,
 
1039	.driver		= {
1040		.name		= DRIVER_NAME,
1041		.owner		= THIS_MODULE,
1042#ifdef CONFIG_PM
1043		.pm	= &mxcmci_pm_ops,
1044#endif
 
1045	}
1046};
1047
1048static int __init mxcmci_init(void)
1049{
1050	return platform_driver_register(&mxcmci_driver);
1051}
1052
1053static void __exit mxcmci_exit(void)
1054{
1055	platform_driver_unregister(&mxcmci_driver);
1056}
1057
1058module_init(mxcmci_init);
1059module_exit(mxcmci_exit);
1060
1061MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1062MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1063MODULE_LICENSE("GPL");
1064MODULE_ALIAS("platform:imx-mmc");
v3.15
   1/*
   2 *  linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver
   3 *
   4 *  This is a driver for the SDHC controller found in Freescale MX2/MX3
   5 *  SoCs. It is basically the same hardware as found on MX1 (imxmmc.c).
   6 *  Unlike the hardware found on MX1, this hardware just works and does
   7 *  not need all the quirks found in imxmmc.c, hence the separate driver.
   8 *
   9 *  Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  10 *  Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
  11 *
  12 *  derived from pxamci.c by Russell King
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License version 2 as
  16 * published by the Free Software Foundation.
  17 *
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/init.h>
  22#include <linux/ioport.h>
  23#include <linux/platform_device.h>
  24#include <linux/interrupt.h>
  25#include <linux/irq.h>
  26#include <linux/blkdev.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/mmc/host.h>
  29#include <linux/mmc/card.h>
  30#include <linux/delay.h>
  31#include <linux/clk.h>
  32#include <linux/io.h>
  33#include <linux/gpio.h>
  34#include <linux/regulator/consumer.h>
  35#include <linux/dmaengine.h>
  36#include <linux/types.h>
  37#include <linux/of.h>
  38#include <linux/of_device.h>
  39#include <linux/of_dma.h>
  40#include <linux/of_gpio.h>
  41#include <linux/mmc/slot-gpio.h>
  42
  43#include <asm/dma.h>
  44#include <asm/irq.h>
  45#include <linux/platform_data/mmc-mxcmmc.h>
 
  46
  47#include <linux/platform_data/dma-imx.h>
  48
  49#define DRIVER_NAME "mxc-mmc"
  50#define MXCMCI_TIMEOUT_MS 10000
  51
  52#define MMC_REG_STR_STP_CLK		0x00
  53#define MMC_REG_STATUS			0x04
  54#define MMC_REG_CLK_RATE		0x08
  55#define MMC_REG_CMD_DAT_CONT		0x0C
  56#define MMC_REG_RES_TO			0x10
  57#define MMC_REG_READ_TO			0x14
  58#define MMC_REG_BLK_LEN			0x18
  59#define MMC_REG_NOB			0x1C
  60#define MMC_REG_REV_NO			0x20
  61#define MMC_REG_INT_CNTR		0x24
  62#define MMC_REG_CMD			0x28
  63#define MMC_REG_ARG			0x2C
  64#define MMC_REG_RES_FIFO		0x34
  65#define MMC_REG_BUFFER_ACCESS		0x38
  66
  67#define STR_STP_CLK_RESET               (1 << 3)
  68#define STR_STP_CLK_START_CLK           (1 << 1)
  69#define STR_STP_CLK_STOP_CLK            (1 << 0)
  70
  71#define STATUS_CARD_INSERTION		(1 << 31)
  72#define STATUS_CARD_REMOVAL		(1 << 30)
  73#define STATUS_YBUF_EMPTY		(1 << 29)
  74#define STATUS_XBUF_EMPTY		(1 << 28)
  75#define STATUS_YBUF_FULL		(1 << 27)
  76#define STATUS_XBUF_FULL		(1 << 26)
  77#define STATUS_BUF_UND_RUN		(1 << 25)
  78#define STATUS_BUF_OVFL			(1 << 24)
  79#define STATUS_SDIO_INT_ACTIVE		(1 << 14)
  80#define STATUS_END_CMD_RESP		(1 << 13)
  81#define STATUS_WRITE_OP_DONE		(1 << 12)
  82#define STATUS_DATA_TRANS_DONE		(1 << 11)
  83#define STATUS_READ_OP_DONE		(1 << 11)
  84#define STATUS_WR_CRC_ERROR_CODE_MASK	(3 << 10)
  85#define STATUS_CARD_BUS_CLK_RUN		(1 << 8)
  86#define STATUS_BUF_READ_RDY		(1 << 7)
  87#define STATUS_BUF_WRITE_RDY		(1 << 6)
  88#define STATUS_RESP_CRC_ERR		(1 << 5)
  89#define STATUS_CRC_READ_ERR		(1 << 3)
  90#define STATUS_CRC_WRITE_ERR		(1 << 2)
  91#define STATUS_TIME_OUT_RESP		(1 << 1)
  92#define STATUS_TIME_OUT_READ		(1 << 0)
  93#define STATUS_ERR_MASK			0x2f
  94
  95#define CMD_DAT_CONT_CMD_RESP_LONG_OFF	(1 << 12)
  96#define CMD_DAT_CONT_STOP_READWAIT	(1 << 11)
  97#define CMD_DAT_CONT_START_READWAIT	(1 << 10)
  98#define CMD_DAT_CONT_BUS_WIDTH_4	(2 << 8)
  99#define CMD_DAT_CONT_INIT		(1 << 7)
 100#define CMD_DAT_CONT_WRITE		(1 << 4)
 101#define CMD_DAT_CONT_DATA_ENABLE	(1 << 3)
 102#define CMD_DAT_CONT_RESPONSE_48BIT_CRC	(1 << 0)
 103#define CMD_DAT_CONT_RESPONSE_136BIT	(2 << 0)
 104#define CMD_DAT_CONT_RESPONSE_48BIT	(3 << 0)
 105
 106#define INT_SDIO_INT_WKP_EN		(1 << 18)
 107#define INT_CARD_INSERTION_WKP_EN	(1 << 17)
 108#define INT_CARD_REMOVAL_WKP_EN		(1 << 16)
 109#define INT_CARD_INSERTION_EN		(1 << 15)
 110#define INT_CARD_REMOVAL_EN		(1 << 14)
 111#define INT_SDIO_IRQ_EN			(1 << 13)
 112#define INT_DAT0_EN			(1 << 12)
 113#define INT_BUF_READ_EN			(1 << 4)
 114#define INT_BUF_WRITE_EN		(1 << 3)
 115#define INT_END_CMD_RES_EN		(1 << 2)
 116#define INT_WRITE_OP_DONE_EN		(1 << 1)
 117#define INT_READ_OP_EN			(1 << 0)
 118
 119enum mxcmci_type {
 120	IMX21_MMC,
 121	IMX31_MMC,
 122	MPC512X_MMC,
 123};
 124
 125struct mxcmci_host {
 126	struct mmc_host		*mmc;
 127	struct resource		*res;
 128	void __iomem		*base;
 129	int			irq;
 130	int			detect_irq;
 131	struct dma_chan		*dma;
 132	struct dma_async_tx_descriptor *desc;
 133	int			do_dma;
 134	int			default_irq_mask;
 135	int			use_sdio;
 136	unsigned int		power_mode;
 137	struct imxmmc_platform_data *pdata;
 138
 139	struct mmc_request	*req;
 140	struct mmc_command	*cmd;
 141	struct mmc_data		*data;
 142
 143	unsigned int		datasize;
 144	unsigned int		dma_dir;
 145
 146	u16			rev_no;
 147	unsigned int		cmdat;
 148
 149	struct clk		*clk_ipg;
 150	struct clk		*clk_per;
 151
 152	int			clock;
 153
 154	struct work_struct	datawork;
 155	spinlock_t		lock;
 156
 157	struct regulator	*vcc;
 158
 159	int			burstlen;
 160	int			dmareq;
 161	struct dma_slave_config dma_slave_config;
 162	struct imx_dma_data	dma_data;
 163
 164	struct timer_list	watchdog;
 165	enum mxcmci_type	devtype;
 166};
 167
 168static const struct platform_device_id mxcmci_devtype[] = {
 169	{
 170		.name = "imx21-mmc",
 171		.driver_data = IMX21_MMC,
 172	}, {
 173		.name = "imx31-mmc",
 174		.driver_data = IMX31_MMC,
 175	}, {
 176		.name = "mpc512x-sdhc",
 177		.driver_data = MPC512X_MMC,
 178	}, {
 179		/* sentinel */
 180	}
 181};
 182MODULE_DEVICE_TABLE(platform, mxcmci_devtype);
 183
 184static const struct of_device_id mxcmci_of_match[] = {
 185	{
 186		.compatible = "fsl,imx21-mmc",
 187		.data = &mxcmci_devtype[IMX21_MMC],
 188	}, {
 189		.compatible = "fsl,imx31-mmc",
 190		.data = &mxcmci_devtype[IMX31_MMC],
 191	}, {
 192		.compatible = "fsl,mpc5121-sdhc",
 193		.data = &mxcmci_devtype[MPC512X_MMC],
 194	}, {
 195		/* sentinel */
 196	}
 197};
 198MODULE_DEVICE_TABLE(of, mxcmci_of_match);
 199
 200static inline int is_imx31_mmc(struct mxcmci_host *host)
 201{
 202	return host->devtype == IMX31_MMC;
 203}
 204
 205static inline int is_mpc512x_mmc(struct mxcmci_host *host)
 206{
 207	return host->devtype == MPC512X_MMC;
 208}
 209
 210static inline u32 mxcmci_readl(struct mxcmci_host *host, int reg)
 211{
 212	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 213		return ioread32be(host->base + reg);
 214	else
 215		return readl(host->base + reg);
 216}
 217
 218static inline void mxcmci_writel(struct mxcmci_host *host, u32 val, int reg)
 219{
 220	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 221		iowrite32be(val, host->base + reg);
 222	else
 223		writel(val, host->base + reg);
 224}
 225
 226static inline u16 mxcmci_readw(struct mxcmci_host *host, int reg)
 227{
 228	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 229		return ioread32be(host->base + reg);
 230	else
 231		return readw(host->base + reg);
 232}
 233
 234static inline void mxcmci_writew(struct mxcmci_host *host, u16 val, int reg)
 235{
 236	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 237		iowrite32be(val, host->base + reg);
 238	else
 239		writew(val, host->base + reg);
 240}
 241
 242static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
 243
 244static inline void mxcmci_init_ocr(struct mxcmci_host *host)
 245{
 246	host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
 247
 248	if (IS_ERR(host->vcc)) {
 249		host->vcc = NULL;
 250	} else {
 251		host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
 252		if (host->pdata && host->pdata->ocr_avail)
 253			dev_warn(mmc_dev(host->mmc),
 254				"pdata->ocr_avail will not be used\n");
 255	}
 256
 257	if (host->vcc == NULL) {
 258		/* fall-back to platform data */
 259		if (host->pdata && host->pdata->ocr_avail)
 260			host->mmc->ocr_avail = host->pdata->ocr_avail;
 261		else
 262			host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 263	}
 264}
 265
 266static inline void mxcmci_set_power(struct mxcmci_host *host,
 267				    unsigned char power_mode,
 268				    unsigned int vdd)
 269{
 270	if (host->vcc) {
 271		if (power_mode == MMC_POWER_UP)
 272			mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
 273		else if (power_mode == MMC_POWER_OFF)
 274			mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
 275	}
 276
 277	if (host->pdata && host->pdata->setpower)
 278		host->pdata->setpower(mmc_dev(host->mmc), vdd);
 279}
 280
 281static inline int mxcmci_use_dma(struct mxcmci_host *host)
 282{
 283	return host->do_dma;
 284}
 285
 286static void mxcmci_softreset(struct mxcmci_host *host)
 287{
 288	int i;
 289
 290	dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
 291
 292	/* reset sequence */
 293	mxcmci_writew(host, STR_STP_CLK_RESET, MMC_REG_STR_STP_CLK);
 294	mxcmci_writew(host, STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
 295			MMC_REG_STR_STP_CLK);
 296
 297	for (i = 0; i < 8; i++)
 298		mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
 299
 300	mxcmci_writew(host, 0xff, MMC_REG_RES_TO);
 301}
 302static int mxcmci_setup_dma(struct mmc_host *mmc);
 303
 304#if IS_ENABLED(CONFIG_PPC_MPC512x)
 305static inline void buffer_swap32(u32 *buf, int len)
 306{
 307	int i;
 308
 309	for (i = 0; i < ((len + 3) / 4); i++) {
 310		st_le32(buf, *buf);
 311		buf++;
 312	}
 313}
 314
 315static void mxcmci_swap_buffers(struct mmc_data *data)
 316{
 317	struct scatterlist *sg;
 318	int i;
 319
 320	for_each_sg(data->sg, sg, data->sg_len, i)
 321		buffer_swap32(sg_virt(sg), sg->length);
 322}
 323#else
 324static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
 325#endif
 326
 327static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
 328{
 329	unsigned int nob = data->blocks;
 330	unsigned int blksz = data->blksz;
 331	unsigned int datasize = nob * blksz;
 332	struct scatterlist *sg;
 333	enum dma_transfer_direction slave_dirn;
 334	int i, nents;
 335
 336	if (data->flags & MMC_DATA_STREAM)
 337		nob = 0xffff;
 338
 339	host->data = data;
 340	data->bytes_xfered = 0;
 341
 342	mxcmci_writew(host, nob, MMC_REG_NOB);
 343	mxcmci_writew(host, blksz, MMC_REG_BLK_LEN);
 344	host->datasize = datasize;
 345
 346	if (!mxcmci_use_dma(host))
 347		return 0;
 348
 349	for_each_sg(data->sg, sg, data->sg_len, i) {
 350		if (sg->offset & 3 || sg->length & 3 || sg->length < 512) {
 351			host->do_dma = 0;
 352			return 0;
 353		}
 354	}
 355
 356	if (data->flags & MMC_DATA_READ) {
 357		host->dma_dir = DMA_FROM_DEVICE;
 358		slave_dirn = DMA_DEV_TO_MEM;
 359	} else {
 360		host->dma_dir = DMA_TO_DEVICE;
 361		slave_dirn = DMA_MEM_TO_DEV;
 362
 363		mxcmci_swap_buffers(data);
 364	}
 365
 366	nents = dma_map_sg(host->dma->device->dev, data->sg,
 367				     data->sg_len,  host->dma_dir);
 368	if (nents != data->sg_len)
 369		return -EINVAL;
 370
 371	host->desc = dmaengine_prep_slave_sg(host->dma,
 372		data->sg, data->sg_len, slave_dirn,
 373		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 374
 375	if (!host->desc) {
 376		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 377				host->dma_dir);
 378		host->do_dma = 0;
 379		return 0; /* Fall back to PIO */
 380	}
 381	wmb();
 382
 383	dmaengine_submit(host->desc);
 384	dma_async_issue_pending(host->dma);
 385
 386	mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS));
 387
 388	return 0;
 389}
 390
 391static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat);
 392static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat);
 393
 394static void mxcmci_dma_callback(void *data)
 395{
 396	struct mxcmci_host *host = data;
 397	u32 stat;
 398
 399	del_timer(&host->watchdog);
 400
 401	stat = mxcmci_readl(host, MMC_REG_STATUS);
 402	mxcmci_writel(host, stat & ~STATUS_DATA_TRANS_DONE, MMC_REG_STATUS);
 403
 404	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
 405
 406	if (stat & STATUS_READ_OP_DONE)
 407		mxcmci_writel(host, STATUS_READ_OP_DONE, MMC_REG_STATUS);
 408
 409	mxcmci_data_done(host, stat);
 410}
 411
 412static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
 413		unsigned int cmdat)
 414{
 415	u32 int_cntr = host->default_irq_mask;
 416	unsigned long flags;
 417
 418	WARN_ON(host->cmd != NULL);
 419	host->cmd = cmd;
 420
 421	switch (mmc_resp_type(cmd)) {
 422	case MMC_RSP_R1: /* short CRC, OPCODE */
 423	case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
 424		cmdat |= CMD_DAT_CONT_RESPONSE_48BIT_CRC;
 425		break;
 426	case MMC_RSP_R2: /* long 136 bit + CRC */
 427		cmdat |= CMD_DAT_CONT_RESPONSE_136BIT;
 428		break;
 429	case MMC_RSP_R3: /* short */
 430		cmdat |= CMD_DAT_CONT_RESPONSE_48BIT;
 431		break;
 432	case MMC_RSP_NONE:
 433		break;
 434	default:
 435		dev_err(mmc_dev(host->mmc), "unhandled response type 0x%x\n",
 436				mmc_resp_type(cmd));
 437		cmd->error = -EINVAL;
 438		return -EINVAL;
 439	}
 440
 441	int_cntr = INT_END_CMD_RES_EN;
 442
 443	if (mxcmci_use_dma(host)) {
 444		if (host->dma_dir == DMA_FROM_DEVICE) {
 445			host->desc->callback = mxcmci_dma_callback;
 446			host->desc->callback_param = host;
 447		} else {
 448			int_cntr |= INT_WRITE_OP_DONE_EN;
 449		}
 450	}
 451
 452	spin_lock_irqsave(&host->lock, flags);
 453	if (host->use_sdio)
 454		int_cntr |= INT_SDIO_IRQ_EN;
 455	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 456	spin_unlock_irqrestore(&host->lock, flags);
 457
 458	mxcmci_writew(host, cmd->opcode, MMC_REG_CMD);
 459	mxcmci_writel(host, cmd->arg, MMC_REG_ARG);
 460	mxcmci_writew(host, cmdat, MMC_REG_CMD_DAT_CONT);
 461
 462	return 0;
 463}
 464
 465static void mxcmci_finish_request(struct mxcmci_host *host,
 466		struct mmc_request *req)
 467{
 468	u32 int_cntr = host->default_irq_mask;
 469	unsigned long flags;
 470
 471	spin_lock_irqsave(&host->lock, flags);
 472	if (host->use_sdio)
 473		int_cntr |= INT_SDIO_IRQ_EN;
 474	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 475	spin_unlock_irqrestore(&host->lock, flags);
 476
 477	host->req = NULL;
 478	host->cmd = NULL;
 479	host->data = NULL;
 480
 481	mmc_request_done(host->mmc, req);
 482}
 483
 484static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
 485{
 486	struct mmc_data *data = host->data;
 487	int data_error;
 488
 489	if (mxcmci_use_dma(host)) {
 
 490		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 491				host->dma_dir);
 492		mxcmci_swap_buffers(data);
 493	}
 494
 495	if (stat & STATUS_ERR_MASK) {
 496		dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
 497				stat);
 498		if (stat & STATUS_CRC_READ_ERR) {
 499			dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
 500			data->error = -EILSEQ;
 501		} else if (stat & STATUS_CRC_WRITE_ERR) {
 502			u32 err_code = (stat >> 9) & 0x3;
 503			if (err_code == 2) { /* No CRC response */
 504				dev_err(mmc_dev(host->mmc),
 505					"%s: No CRC -ETIMEDOUT\n", __func__);
 506				data->error = -ETIMEDOUT;
 507			} else {
 508				dev_err(mmc_dev(host->mmc),
 509					"%s: -EILSEQ\n", __func__);
 510				data->error = -EILSEQ;
 511			}
 512		} else if (stat & STATUS_TIME_OUT_READ) {
 513			dev_err(mmc_dev(host->mmc),
 514				"%s: read -ETIMEDOUT\n", __func__);
 515			data->error = -ETIMEDOUT;
 516		} else {
 517			dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
 518			data->error = -EIO;
 519		}
 520	} else {
 521		data->bytes_xfered = host->datasize;
 522	}
 523
 524	data_error = data->error;
 525
 526	host->data = NULL;
 527
 528	return data_error;
 529}
 530
 531static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat)
 532{
 533	struct mmc_command *cmd = host->cmd;
 534	int i;
 535	u32 a, b, c;
 536
 537	if (!cmd)
 538		return;
 539
 540	if (stat & STATUS_TIME_OUT_RESP) {
 541		dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
 542		cmd->error = -ETIMEDOUT;
 543	} else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
 544		dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
 545		cmd->error = -EILSEQ;
 546	}
 547
 548	if (cmd->flags & MMC_RSP_PRESENT) {
 549		if (cmd->flags & MMC_RSP_136) {
 550			for (i = 0; i < 4; i++) {
 551				a = mxcmci_readw(host, MMC_REG_RES_FIFO);
 552				b = mxcmci_readw(host, MMC_REG_RES_FIFO);
 553				cmd->resp[i] = a << 16 | b;
 554			}
 555		} else {
 556			a = mxcmci_readw(host, MMC_REG_RES_FIFO);
 557			b = mxcmci_readw(host, MMC_REG_RES_FIFO);
 558			c = mxcmci_readw(host, MMC_REG_RES_FIFO);
 559			cmd->resp[0] = a << 24 | b << 8 | c >> 8;
 560		}
 561	}
 562}
 563
 564static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
 565{
 566	u32 stat;
 567	unsigned long timeout = jiffies + HZ;
 568
 569	do {
 570		stat = mxcmci_readl(host, MMC_REG_STATUS);
 571		if (stat & STATUS_ERR_MASK)
 572			return stat;
 573		if (time_after(jiffies, timeout)) {
 574			mxcmci_softreset(host);
 575			mxcmci_set_clk_rate(host, host->clock);
 576			return STATUS_TIME_OUT_READ;
 577		}
 578		if (stat & mask)
 579			return 0;
 580		cpu_relax();
 581	} while (1);
 582}
 583
 584static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
 585{
 586	unsigned int stat;
 587	u32 *buf = _buf;
 588
 589	while (bytes > 3) {
 590		stat = mxcmci_poll_status(host,
 591				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
 592		if (stat)
 593			return stat;
 594		*buf++ = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
 595		bytes -= 4;
 596	}
 597
 598	if (bytes) {
 599		u8 *b = (u8 *)buf;
 600		u32 tmp;
 601
 602		stat = mxcmci_poll_status(host,
 603				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
 604		if (stat)
 605			return stat;
 606		tmp = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
 607		memcpy(b, &tmp, bytes);
 608	}
 609
 610	return 0;
 611}
 612
 613static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
 614{
 615	unsigned int stat;
 616	u32 *buf = _buf;
 617
 618	while (bytes > 3) {
 619		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 620		if (stat)
 621			return stat;
 622		mxcmci_writel(host, cpu_to_le32(*buf++), MMC_REG_BUFFER_ACCESS);
 623		bytes -= 4;
 624	}
 625
 626	if (bytes) {
 627		u8 *b = (u8 *)buf;
 628		u32 tmp;
 629
 630		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 631		if (stat)
 632			return stat;
 633
 634		memcpy(&tmp, b, bytes);
 635		mxcmci_writel(host, cpu_to_le32(tmp), MMC_REG_BUFFER_ACCESS);
 636	}
 637
 638	stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 639	if (stat)
 640		return stat;
 641
 642	return 0;
 643}
 644
 645static int mxcmci_transfer_data(struct mxcmci_host *host)
 646{
 647	struct mmc_data *data = host->req->data;
 648	struct scatterlist *sg;
 649	int stat, i;
 650
 651	host->data = data;
 652	host->datasize = 0;
 653
 654	if (data->flags & MMC_DATA_READ) {
 655		for_each_sg(data->sg, sg, data->sg_len, i) {
 656			stat = mxcmci_pull(host, sg_virt(sg), sg->length);
 657			if (stat)
 658				return stat;
 659			host->datasize += sg->length;
 660		}
 661	} else {
 662		for_each_sg(data->sg, sg, data->sg_len, i) {
 663			stat = mxcmci_push(host, sg_virt(sg), sg->length);
 664			if (stat)
 665				return stat;
 666			host->datasize += sg->length;
 667		}
 668		stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE);
 669		if (stat)
 670			return stat;
 671	}
 672	return 0;
 673}
 674
 675static void mxcmci_datawork(struct work_struct *work)
 676{
 677	struct mxcmci_host *host = container_of(work, struct mxcmci_host,
 678						  datawork);
 679	int datastat = mxcmci_transfer_data(host);
 680
 681	mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
 682		MMC_REG_STATUS);
 683	mxcmci_finish_data(host, datastat);
 684
 685	if (host->req->stop) {
 686		if (mxcmci_start_cmd(host, host->req->stop, 0)) {
 687			mxcmci_finish_request(host, host->req);
 688			return;
 689		}
 690	} else {
 691		mxcmci_finish_request(host, host->req);
 692	}
 693}
 694
 695static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
 696{
 697	struct mmc_request *req;
 698	int data_error;
 699	unsigned long flags;
 700
 701	spin_lock_irqsave(&host->lock, flags);
 702
 703	if (!host->data) {
 704		spin_unlock_irqrestore(&host->lock, flags);
 705		return;
 706	}
 707
 708	if (!host->req) {
 709		spin_unlock_irqrestore(&host->lock, flags);
 710		return;
 711	}
 712
 713	req = host->req;
 714	if (!req->stop)
 715		host->req = NULL; /* we will handle finish req below */
 716
 717	data_error = mxcmci_finish_data(host, stat);
 718
 719	spin_unlock_irqrestore(&host->lock, flags);
 720
 721	mxcmci_read_response(host, stat);
 722	host->cmd = NULL;
 723
 724	if (req->stop) {
 725		if (mxcmci_start_cmd(host, req->stop, 0)) {
 726			mxcmci_finish_request(host, req);
 727			return;
 728		}
 729	} else {
 730		mxcmci_finish_request(host, req);
 731	}
 732}
 733
 734static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
 735{
 736	mxcmci_read_response(host, stat);
 737	host->cmd = NULL;
 738
 739	if (!host->data && host->req) {
 740		mxcmci_finish_request(host, host->req);
 741		return;
 742	}
 743
 744	/* For the DMA case the DMA engine handles the data transfer
 745	 * automatically. For non DMA we have to do it ourselves.
 746	 * Don't do it in interrupt context though.
 747	 */
 748	if (!mxcmci_use_dma(host) && host->data)
 749		schedule_work(&host->datawork);
 750
 751}
 752
 753static irqreturn_t mxcmci_irq(int irq, void *devid)
 754{
 755	struct mxcmci_host *host = devid;
 756	unsigned long flags;
 757	bool sdio_irq;
 758	u32 stat;
 759
 760	stat = mxcmci_readl(host, MMC_REG_STATUS);
 761	mxcmci_writel(host,
 762		stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
 763			 STATUS_WRITE_OP_DONE),
 764		MMC_REG_STATUS);
 765
 766	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
 767
 768	spin_lock_irqsave(&host->lock, flags);
 769	sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
 770	spin_unlock_irqrestore(&host->lock, flags);
 771
 772	if (mxcmci_use_dma(host) &&
 773	    (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
 774		mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
 775			MMC_REG_STATUS);
 776
 777	if (sdio_irq) {
 778		mxcmci_writel(host, STATUS_SDIO_INT_ACTIVE, MMC_REG_STATUS);
 779		mmc_signal_sdio_irq(host->mmc);
 780	}
 781
 782	if (stat & STATUS_END_CMD_RESP)
 783		mxcmci_cmd_done(host, stat);
 784
 785	if (mxcmci_use_dma(host) &&
 786		  (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) {
 787		del_timer(&host->watchdog);
 788		mxcmci_data_done(host, stat);
 789	}
 790
 791	if (host->default_irq_mask &&
 792		  (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
 793		mmc_detect_change(host->mmc, msecs_to_jiffies(200));
 794
 795	return IRQ_HANDLED;
 796}
 797
 798static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
 799{
 800	struct mxcmci_host *host = mmc_priv(mmc);
 801	unsigned int cmdat = host->cmdat;
 802	int error;
 803
 804	WARN_ON(host->req != NULL);
 805
 806	host->req = req;
 807	host->cmdat &= ~CMD_DAT_CONT_INIT;
 808
 809	if (host->dma)
 810		host->do_dma = 1;
 811
 812	if (req->data) {
 813		error = mxcmci_setup_data(host, req->data);
 814		if (error) {
 815			req->cmd->error = error;
 816			goto out;
 817		}
 818
 819
 820		cmdat |= CMD_DAT_CONT_DATA_ENABLE;
 821
 822		if (req->data->flags & MMC_DATA_WRITE)
 823			cmdat |= CMD_DAT_CONT_WRITE;
 824	}
 825
 826	error = mxcmci_start_cmd(host, req->cmd, cmdat);
 827
 828out:
 829	if (error)
 830		mxcmci_finish_request(host, req);
 831}
 832
 833static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
 834{
 835	unsigned int divider;
 836	int prescaler = 0;
 837	unsigned int clk_in = clk_get_rate(host->clk_per);
 838
 839	while (prescaler <= 0x800) {
 840		for (divider = 1; divider <= 0xF; divider++) {
 841			int x;
 842
 843			x = (clk_in / (divider + 1));
 844
 845			if (prescaler)
 846				x /= (prescaler * 2);
 847
 848			if (x <= clk_ios)
 849				break;
 850		}
 851		if (divider < 0x10)
 852			break;
 853
 854		if (prescaler == 0)
 855			prescaler = 1;
 856		else
 857			prescaler <<= 1;
 858	}
 859
 860	mxcmci_writew(host, (prescaler << 4) | divider, MMC_REG_CLK_RATE);
 861
 862	dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n",
 863			prescaler, divider, clk_in, clk_ios);
 864}
 865
 866static int mxcmci_setup_dma(struct mmc_host *mmc)
 867{
 868	struct mxcmci_host *host = mmc_priv(mmc);
 869	struct dma_slave_config *config = &host->dma_slave_config;
 870
 871	config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
 872	config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
 873	config->dst_addr_width = 4;
 874	config->src_addr_width = 4;
 875	config->dst_maxburst = host->burstlen;
 876	config->src_maxburst = host->burstlen;
 877	config->device_fc = false;
 878
 879	return dmaengine_slave_config(host->dma, config);
 880}
 881
 882static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 883{
 884	struct mxcmci_host *host = mmc_priv(mmc);
 885	int burstlen, ret;
 886
 887	/*
 888	 * use burstlen of 64 (16 words) in 4 bit mode (--> reg value  0)
 889	 * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)
 890	 */
 891	if (ios->bus_width == MMC_BUS_WIDTH_4)
 892		burstlen = 16;
 893	else
 894		burstlen = 4;
 895
 896	if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
 897		host->burstlen = burstlen;
 898		ret = mxcmci_setup_dma(mmc);
 899		if (ret) {
 900			dev_err(mmc_dev(host->mmc),
 901				"failed to config DMA channel. Falling back to PIO\n");
 902			dma_release_channel(host->dma);
 903			host->do_dma = 0;
 904			host->dma = NULL;
 905		}
 906	}
 907
 908	if (ios->bus_width == MMC_BUS_WIDTH_4)
 909		host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
 910	else
 911		host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
 912
 913	if (host->power_mode != ios->power_mode) {
 914		mxcmci_set_power(host, ios->power_mode, ios->vdd);
 915		host->power_mode = ios->power_mode;
 916
 917		if (ios->power_mode == MMC_POWER_ON)
 918			host->cmdat |= CMD_DAT_CONT_INIT;
 919	}
 920
 921	if (ios->clock) {
 922		mxcmci_set_clk_rate(host, ios->clock);
 923		mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
 924	} else {
 925		mxcmci_writew(host, STR_STP_CLK_STOP_CLK, MMC_REG_STR_STP_CLK);
 926	}
 927
 928	host->clock = ios->clock;
 929}
 930
 931static irqreturn_t mxcmci_detect_irq(int irq, void *data)
 932{
 933	struct mmc_host *mmc = data;
 934
 935	dev_dbg(mmc_dev(mmc), "%s\n", __func__);
 936
 937	mmc_detect_change(mmc, msecs_to_jiffies(250));
 938	return IRQ_HANDLED;
 939}
 940
 941static int mxcmci_get_ro(struct mmc_host *mmc)
 942{
 943	struct mxcmci_host *host = mmc_priv(mmc);
 944
 945	if (host->pdata && host->pdata->get_ro)
 946		return !!host->pdata->get_ro(mmc_dev(mmc));
 947	/*
 948	 * If board doesn't support read only detection (no mmc_gpio
 949	 * context or gpio is invalid), then let the mmc core decide
 950	 * what to do.
 951	 */
 952	return mmc_gpio_get_ro(mmc);
 953}
 954
 955static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
 956{
 957	struct mxcmci_host *host = mmc_priv(mmc);
 958	unsigned long flags;
 959	u32 int_cntr;
 960
 961	spin_lock_irqsave(&host->lock, flags);
 962	host->use_sdio = enable;
 963	int_cntr = mxcmci_readl(host, MMC_REG_INT_CNTR);
 964
 965	if (enable)
 966		int_cntr |= INT_SDIO_IRQ_EN;
 967	else
 968		int_cntr &= ~INT_SDIO_IRQ_EN;
 969
 970	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 971	spin_unlock_irqrestore(&host->lock, flags);
 972}
 973
 974static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
 975{
 976	struct mxcmci_host *mxcmci = mmc_priv(host);
 977
 978	/*
 979	 * MX3 SoCs have a silicon bug which corrupts CRC calculation of
 980	 * multi-block transfers when connected SDIO peripheral doesn't
 981	 * drive the BUSY line as required by the specs.
 982	 * One way to prevent this is to only allow 1-bit transfers.
 983	 */
 984
 985	if (is_imx31_mmc(mxcmci) && card->type == MMC_TYPE_SDIO)
 986		host->caps &= ~MMC_CAP_4_BIT_DATA;
 987	else
 988		host->caps |= MMC_CAP_4_BIT_DATA;
 989}
 990
 991static bool filter(struct dma_chan *chan, void *param)
 992{
 993	struct mxcmci_host *host = param;
 994
 995	if (!imx_dma_is_general_purpose(chan))
 996		return false;
 997
 998	chan->private = &host->dma_data;
 999
1000	return true;
1001}
1002
1003static void mxcmci_watchdog(unsigned long data)
1004{
1005	struct mmc_host *mmc = (struct mmc_host *)data;
1006	struct mxcmci_host *host = mmc_priv(mmc);
1007	struct mmc_request *req = host->req;
1008	unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS);
1009
1010	if (host->dma_dir == DMA_FROM_DEVICE) {
1011		dmaengine_terminate_all(host->dma);
1012		dev_err(mmc_dev(host->mmc),
1013			"%s: read time out (status = 0x%08x)\n",
1014			__func__, stat);
1015	} else {
1016		dev_err(mmc_dev(host->mmc),
1017			"%s: write time out (status = 0x%08x)\n",
1018			__func__, stat);
1019		mxcmci_softreset(host);
1020	}
1021
1022	/* Mark transfer as erroneus and inform the upper layers */
1023
1024	if (host->data)
1025		host->data->error = -ETIMEDOUT;
1026	host->req = NULL;
1027	host->cmd = NULL;
1028	host->data = NULL;
1029	mmc_request_done(host->mmc, req);
1030}
1031
1032static const struct mmc_host_ops mxcmci_ops = {
1033	.request		= mxcmci_request,
1034	.set_ios		= mxcmci_set_ios,
1035	.get_ro			= mxcmci_get_ro,
1036	.enable_sdio_irq	= mxcmci_enable_sdio_irq,
1037	.init_card		= mxcmci_init_card,
1038};
1039
1040static int mxcmci_probe(struct platform_device *pdev)
1041{
1042	struct mmc_host *mmc;
1043	struct mxcmci_host *host = NULL;
1044	struct resource *iores, *r;
1045	int ret = 0, irq;
1046	bool dat3_card_detect = false;
1047	dma_cap_mask_t mask;
1048	const struct of_device_id *of_id;
1049	struct imxmmc_platform_data *pdata = pdev->dev.platform_data;
1050
1051	pr_info("i.MX/MPC512x SDHC driver\n");
1052
1053	of_id = of_match_device(mxcmci_of_match, &pdev->dev);
1054
1055	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1056	irq = platform_get_irq(pdev, 0);
1057	if (!iores || irq < 0)
1058		return -EINVAL;
1059
1060	r = request_mem_region(iores->start, resource_size(iores), pdev->name);
1061	if (!r)
1062		return -EBUSY;
1063
1064	mmc = mmc_alloc_host(sizeof(struct mxcmci_host), &pdev->dev);
1065	if (!mmc) {
1066		ret = -ENOMEM;
1067		goto out_release_mem;
1068	}
1069
1070	ret = mmc_of_parse(mmc);
1071	if (ret)
1072		goto out_free;
1073	mmc->ops = &mxcmci_ops;
1074
1075	/* For devicetree parsing, the bus width is read from devicetree */
1076	if (pdata)
1077		mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1078	else
1079		mmc->caps |= MMC_CAP_SDIO_IRQ;
1080
1081	/* MMC core transfer sizes tunable parameters */
 
1082	mmc->max_blk_size = 2048;
1083	mmc->max_blk_count = 65535;
1084	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1085	mmc->max_seg_size = mmc->max_req_size;
1086
1087	host = mmc_priv(mmc);
1088	host->base = ioremap(r->start, resource_size(r));
1089	if (!host->base) {
1090		ret = -ENOMEM;
1091		goto out_free;
1092	}
1093
1094	if (of_id) {
1095		const struct platform_device_id *id_entry = of_id->data;
1096		host->devtype = id_entry->driver_data;
1097	} else {
1098		host->devtype = pdev->id_entry->driver_data;
1099	}
1100
1101	/* adjust max_segs after devtype detection */
1102	if (!is_mpc512x_mmc(host))
1103		mmc->max_segs = 64;
1104
1105	host->mmc = mmc;
1106	host->pdata = pdata;
1107	spin_lock_init(&host->lock);
1108
1109	if (pdata)
1110		dat3_card_detect = pdata->dat3_card_detect;
1111	else if (!(mmc->caps & MMC_CAP_NONREMOVABLE)
1112			&& !of_property_read_bool(pdev->dev.of_node, "cd-gpios"))
1113		dat3_card_detect = true;
1114
1115	mxcmci_init_ocr(host);
1116
1117	if (dat3_card_detect)
1118		host->default_irq_mask =
1119			INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN;
1120	else
1121		host->default_irq_mask = 0;
1122
1123	host->res = r;
1124	host->irq = irq;
1125
1126	host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1127	if (IS_ERR(host->clk_ipg)) {
1128		ret = PTR_ERR(host->clk_ipg);
1129		goto out_iounmap;
1130	}
1131
1132	host->clk_per = devm_clk_get(&pdev->dev, "per");
1133	if (IS_ERR(host->clk_per)) {
1134		ret = PTR_ERR(host->clk_per);
1135		goto out_iounmap;
1136	}
1137
1138	clk_prepare_enable(host->clk_per);
1139	clk_prepare_enable(host->clk_ipg);
1140
1141	mxcmci_softreset(host);
1142
1143	host->rev_no = mxcmci_readw(host, MMC_REG_REV_NO);
1144	if (host->rev_no != 0x400) {
1145		ret = -ENODEV;
1146		dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
1147			host->rev_no);
1148		goto out_clk_put;
1149	}
1150
1151	mmc->f_min = clk_get_rate(host->clk_per) >> 16;
1152	mmc->f_max = clk_get_rate(host->clk_per) >> 1;
1153
1154	/* recommended in data sheet */
1155	mxcmci_writew(host, 0x2db4, MMC_REG_READ_TO);
1156
1157	mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR);
1158
1159	if (!host->pdata) {
1160		host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx");
1161	} else {
1162		r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1163		if (r) {
1164			host->dmareq = r->start;
1165			host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
1166			host->dma_data.priority = DMA_PRIO_LOW;
1167			host->dma_data.dma_request = host->dmareq;
1168			dma_cap_zero(mask);
1169			dma_cap_set(DMA_SLAVE, mask);
1170			host->dma = dma_request_channel(mask, filter, host);
1171		}
1172	}
1173	if (host->dma)
1174		mmc->max_seg_size = dma_get_max_seg_size(
1175				host->dma->device->dev);
1176	else
1177		dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
1178
1179	INIT_WORK(&host->datawork, mxcmci_datawork);
1180
1181	ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host);
1182	if (ret)
1183		goto out_free_dma;
1184
1185	platform_set_drvdata(pdev, mmc);
1186
1187	if (host->pdata && host->pdata->init) {
1188		ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq,
1189				host->mmc);
1190		if (ret)
1191			goto out_free_irq;
1192	}
1193
1194	init_timer(&host->watchdog);
1195	host->watchdog.function = &mxcmci_watchdog;
1196	host->watchdog.data = (unsigned long)mmc;
1197
1198	mmc_add_host(mmc);
1199
1200	return 0;
1201
1202out_free_irq:
1203	free_irq(host->irq, host);
1204out_free_dma:
1205	if (host->dma)
1206		dma_release_channel(host->dma);
1207out_clk_put:
1208	clk_disable_unprepare(host->clk_per);
1209	clk_disable_unprepare(host->clk_ipg);
1210out_iounmap:
1211	iounmap(host->base);
1212out_free:
1213	mmc_free_host(mmc);
1214out_release_mem:
1215	release_mem_region(iores->start, resource_size(iores));
1216	return ret;
1217}
1218
1219static int mxcmci_remove(struct platform_device *pdev)
1220{
1221	struct mmc_host *mmc = platform_get_drvdata(pdev);
1222	struct mxcmci_host *host = mmc_priv(mmc);
1223
 
 
1224	mmc_remove_host(mmc);
1225
1226	if (host->vcc)
1227		regulator_put(host->vcc);
1228
1229	if (host->pdata && host->pdata->exit)
1230		host->pdata->exit(&pdev->dev, mmc);
1231
1232	free_irq(host->irq, host);
1233	iounmap(host->base);
1234
1235	if (host->dma)
1236		dma_release_channel(host->dma);
1237
1238	clk_disable_unprepare(host->clk_per);
1239	clk_disable_unprepare(host->clk_ipg);
1240
1241	release_mem_region(host->res->start, resource_size(host->res));
1242
1243	mmc_free_host(mmc);
1244
1245	return 0;
1246}
1247
1248#ifdef CONFIG_PM
1249static int mxcmci_suspend(struct device *dev)
1250{
1251	struct mmc_host *mmc = dev_get_drvdata(dev);
1252	struct mxcmci_host *host = mmc_priv(mmc);
 
1253
1254	clk_disable_unprepare(host->clk_per);
1255	clk_disable_unprepare(host->clk_ipg);
1256	return 0;
 
 
1257}
1258
1259static int mxcmci_resume(struct device *dev)
1260{
1261	struct mmc_host *mmc = dev_get_drvdata(dev);
1262	struct mxcmci_host *host = mmc_priv(mmc);
 
 
 
 
 
1263
1264	clk_prepare_enable(host->clk_per);
1265	clk_prepare_enable(host->clk_ipg);
1266	return 0;
1267}
1268
1269static const struct dev_pm_ops mxcmci_pm_ops = {
1270	.suspend	= mxcmci_suspend,
1271	.resume		= mxcmci_resume,
1272};
1273#endif
1274
1275static struct platform_driver mxcmci_driver = {
1276	.probe		= mxcmci_probe,
1277	.remove		= mxcmci_remove,
1278	.id_table	= mxcmci_devtype,
1279	.driver		= {
1280		.name		= DRIVER_NAME,
1281		.owner		= THIS_MODULE,
1282#ifdef CONFIG_PM
1283		.pm	= &mxcmci_pm_ops,
1284#endif
1285		.of_match_table	= mxcmci_of_match,
1286	}
1287};
1288
1289module_platform_driver(mxcmci_driver);
 
 
 
 
 
 
 
 
 
 
 
1290
1291MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1292MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1293MODULE_LICENSE("GPL");
1294MODULE_ALIAS("platform:mxc-mmc");