Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver
   4 *
   5 *  This is a driver for the SDHC controller found in Freescale MX2/MX3
   6 *  SoCs. It is basically the same hardware as found on MX1 (imxmmc.c).
   7 *  Unlike the hardware found on MX1, this hardware just works and does
   8 *  not need all the quirks found in imxmmc.c, hence the separate driver.
   9 *
  10 *  Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  11 *  Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
  12 *
  13 *  derived from pxamci.c by Russell King
 
 
 
 
 
  14 */
  15
  16#include <linux/module.h>
  17#include <linux/init.h>
  18#include <linux/ioport.h>
  19#include <linux/platform_device.h>
  20#include <linux/highmem.h>
  21#include <linux/interrupt.h>
  22#include <linux/irq.h>
  23#include <linux/blkdev.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/mmc/host.h>
  26#include <linux/mmc/card.h>
  27#include <linux/delay.h>
  28#include <linux/clk.h>
  29#include <linux/io.h>
 
  30#include <linux/regulator/consumer.h>
  31#include <linux/dmaengine.h>
  32#include <linux/types.h>
  33#include <linux/of.h>
  34#include <linux/of_dma.h>
  35#include <linux/mmc/slot-gpio.h>
  36
  37#include <asm/dma.h>
  38#include <asm/irq.h>
  39#include <linux/platform_data/mmc-mxcmmc.h>
 
  40
  41#include <linux/dma/imx-dma.h>
 
  42
  43#define DRIVER_NAME "mxc-mmc"
  44#define MXCMCI_TIMEOUT_MS 10000
  45
  46#define MMC_REG_STR_STP_CLK		0x00
  47#define MMC_REG_STATUS			0x04
  48#define MMC_REG_CLK_RATE		0x08
  49#define MMC_REG_CMD_DAT_CONT		0x0C
  50#define MMC_REG_RES_TO			0x10
  51#define MMC_REG_READ_TO			0x14
  52#define MMC_REG_BLK_LEN			0x18
  53#define MMC_REG_NOB			0x1C
  54#define MMC_REG_REV_NO			0x20
  55#define MMC_REG_INT_CNTR		0x24
  56#define MMC_REG_CMD			0x28
  57#define MMC_REG_ARG			0x2C
  58#define MMC_REG_RES_FIFO		0x34
  59#define MMC_REG_BUFFER_ACCESS		0x38
  60
  61#define STR_STP_CLK_RESET               (1 << 3)
  62#define STR_STP_CLK_START_CLK           (1 << 1)
  63#define STR_STP_CLK_STOP_CLK            (1 << 0)
  64
  65#define STATUS_CARD_INSERTION		(1 << 31)
  66#define STATUS_CARD_REMOVAL		(1 << 30)
  67#define STATUS_YBUF_EMPTY		(1 << 29)
  68#define STATUS_XBUF_EMPTY		(1 << 28)
  69#define STATUS_YBUF_FULL		(1 << 27)
  70#define STATUS_XBUF_FULL		(1 << 26)
  71#define STATUS_BUF_UND_RUN		(1 << 25)
  72#define STATUS_BUF_OVFL			(1 << 24)
  73#define STATUS_SDIO_INT_ACTIVE		(1 << 14)
  74#define STATUS_END_CMD_RESP		(1 << 13)
  75#define STATUS_WRITE_OP_DONE		(1 << 12)
  76#define STATUS_DATA_TRANS_DONE		(1 << 11)
  77#define STATUS_READ_OP_DONE		(1 << 11)
  78#define STATUS_WR_CRC_ERROR_CODE_MASK	(3 << 10)
  79#define STATUS_CARD_BUS_CLK_RUN		(1 << 8)
  80#define STATUS_BUF_READ_RDY		(1 << 7)
  81#define STATUS_BUF_WRITE_RDY		(1 << 6)
  82#define STATUS_RESP_CRC_ERR		(1 << 5)
  83#define STATUS_CRC_READ_ERR		(1 << 3)
  84#define STATUS_CRC_WRITE_ERR		(1 << 2)
  85#define STATUS_TIME_OUT_RESP		(1 << 1)
  86#define STATUS_TIME_OUT_READ		(1 << 0)
  87#define STATUS_ERR_MASK			0x2f
  88
  89#define CMD_DAT_CONT_CMD_RESP_LONG_OFF	(1 << 12)
  90#define CMD_DAT_CONT_STOP_READWAIT	(1 << 11)
  91#define CMD_DAT_CONT_START_READWAIT	(1 << 10)
  92#define CMD_DAT_CONT_BUS_WIDTH_4	(2 << 8)
  93#define CMD_DAT_CONT_INIT		(1 << 7)
  94#define CMD_DAT_CONT_WRITE		(1 << 4)
  95#define CMD_DAT_CONT_DATA_ENABLE	(1 << 3)
  96#define CMD_DAT_CONT_RESPONSE_48BIT_CRC	(1 << 0)
  97#define CMD_DAT_CONT_RESPONSE_136BIT	(2 << 0)
  98#define CMD_DAT_CONT_RESPONSE_48BIT	(3 << 0)
  99
 100#define INT_SDIO_INT_WKP_EN		(1 << 18)
 101#define INT_CARD_INSERTION_WKP_EN	(1 << 17)
 102#define INT_CARD_REMOVAL_WKP_EN		(1 << 16)
 103#define INT_CARD_INSERTION_EN		(1 << 15)
 104#define INT_CARD_REMOVAL_EN		(1 << 14)
 105#define INT_SDIO_IRQ_EN			(1 << 13)
 106#define INT_DAT0_EN			(1 << 12)
 107#define INT_BUF_READ_EN			(1 << 4)
 108#define INT_BUF_WRITE_EN		(1 << 3)
 109#define INT_END_CMD_RES_EN		(1 << 2)
 110#define INT_WRITE_OP_DONE_EN		(1 << 1)
 111#define INT_READ_OP_EN			(1 << 0)
 112
 113enum mxcmci_type {
 114	IMX21_MMC,
 115	IMX31_MMC,
 116	MPC512X_MMC,
 117};
 118
 119struct mxcmci_host {
 120	struct mmc_host		*mmc;
 
 121	void __iomem		*base;
 122	dma_addr_t		phys_base;
 123	int			detect_irq;
 124	struct dma_chan		*dma;
 125	struct dma_async_tx_descriptor *desc;
 126	int			do_dma;
 127	int			default_irq_mask;
 128	int			use_sdio;
 129	unsigned int		power_mode;
 130	struct imxmmc_platform_data *pdata;
 131
 132	struct mmc_request	*req;
 133	struct mmc_command	*cmd;
 134	struct mmc_data		*data;
 135
 136	unsigned int		datasize;
 137	unsigned int		dma_dir;
 138
 139	u16			rev_no;
 140	unsigned int		cmdat;
 141
 142	struct clk		*clk_ipg;
 143	struct clk		*clk_per;
 144
 145	int			clock;
 146
 147	struct work_struct	datawork;
 148	spinlock_t		lock;
 149
 
 
 150	int			burstlen;
 151	int			dmareq;
 152	struct dma_slave_config dma_slave_config;
 153	struct imx_dma_data	dma_data;
 154
 155	struct timer_list	watchdog;
 156	enum mxcmci_type	devtype;
 157};
 158
 159static const struct of_device_id mxcmci_of_match[] = {
 160	{
 161		.compatible = "fsl,imx21-mmc",
 162		.data = (void *) IMX21_MMC,
 163	}, {
 164		.compatible = "fsl,imx31-mmc",
 165		.data = (void *) IMX31_MMC,
 166	}, {
 167		.compatible = "fsl,mpc5121-sdhc",
 168		.data = (void *) MPC512X_MMC,
 169	}, {
 170		/* sentinel */
 171	}
 172};
 173MODULE_DEVICE_TABLE(of, mxcmci_of_match);
 174
 175static inline int is_imx31_mmc(struct mxcmci_host *host)
 176{
 177	return host->devtype == IMX31_MMC;
 178}
 179
 180static inline int is_mpc512x_mmc(struct mxcmci_host *host)
 181{
 182	return host->devtype == MPC512X_MMC;
 183}
 184
 185static inline u32 mxcmci_readl(struct mxcmci_host *host, int reg)
 186{
 187	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 188		return ioread32be(host->base + reg);
 189	else
 190		return readl(host->base + reg);
 191}
 192
 193static inline void mxcmci_writel(struct mxcmci_host *host, u32 val, int reg)
 194{
 195	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 196		iowrite32be(val, host->base + reg);
 197	else
 198		writel(val, host->base + reg);
 199}
 200
 201static inline u16 mxcmci_readw(struct mxcmci_host *host, int reg)
 202{
 203	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 204		return ioread32be(host->base + reg);
 205	else
 206		return readw(host->base + reg);
 207}
 
 208
 209static inline void mxcmci_writew(struct mxcmci_host *host, u16 val, int reg)
 210{
 211	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 212		iowrite32be(val, host->base + reg);
 213	else
 214		writew(val, host->base + reg);
 
 215}
 216
 217static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
 218
 219static void mxcmci_set_power(struct mxcmci_host *host, unsigned int vdd)
 220{
 221	if (!IS_ERR(host->mmc->supply.vmmc)) {
 222		if (host->power_mode == MMC_POWER_UP)
 223			mmc_regulator_set_ocr(host->mmc,
 224					      host->mmc->supply.vmmc, vdd);
 225		else if (host->power_mode == MMC_POWER_OFF)
 226			mmc_regulator_set_ocr(host->mmc,
 227					      host->mmc->supply.vmmc, 0);
 228	}
 229
 230	if (host->pdata && host->pdata->setpower)
 231		host->pdata->setpower(mmc_dev(host->mmc), vdd);
 232}
 233
 234static inline int mxcmci_use_dma(struct mxcmci_host *host)
 235{
 236	return host->do_dma;
 237}
 238
 239static void mxcmci_softreset(struct mxcmci_host *host)
 240{
 241	int i;
 242
 243	dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
 244
 245	/* reset sequence */
 246	mxcmci_writew(host, STR_STP_CLK_RESET, MMC_REG_STR_STP_CLK);
 247	mxcmci_writew(host, STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
 248			MMC_REG_STR_STP_CLK);
 249
 250	for (i = 0; i < 8; i++)
 251		mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
 252
 253	mxcmci_writew(host, 0xff, MMC_REG_RES_TO);
 254}
 255
 256#if IS_ENABLED(CONFIG_PPC_MPC512x)
 257static inline void buffer_swap32(u32 *buf, int len)
 258{
 259	int i;
 260
 261	for (i = 0; i < ((len + 3) / 4); i++) {
 262		*buf = swab32(*buf);
 263		buf++;
 264	}
 265}
 266
 267static void mxcmci_swap_buffers(struct mmc_data *data)
 268{
 269	struct scatterlist *sg;
 270	int i;
 271
 272	for_each_sg(data->sg, sg, data->sg_len, i)
 273		buffer_swap32(sg_virt(sg), sg->length);
 274}
 275#else
 276static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
 277#endif
 278
 279static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
 280{
 281	unsigned int nob = data->blocks;
 282	unsigned int blksz = data->blksz;
 283	unsigned int datasize = nob * blksz;
 284	struct scatterlist *sg;
 285	enum dma_transfer_direction slave_dirn;
 286	int i, nents;
 287
 
 
 
 288	host->data = data;
 289	data->bytes_xfered = 0;
 290
 291	mxcmci_writew(host, nob, MMC_REG_NOB);
 292	mxcmci_writew(host, blksz, MMC_REG_BLK_LEN);
 293	host->datasize = datasize;
 294
 295	if (!mxcmci_use_dma(host))
 296		return 0;
 297
 298	for_each_sg(data->sg, sg, data->sg_len, i) {
 299		if (sg->offset & 3 || sg->length & 3 || sg->length < 512) {
 300			host->do_dma = 0;
 301			return 0;
 302		}
 303	}
 304
 305	if (data->flags & MMC_DATA_READ) {
 306		host->dma_dir = DMA_FROM_DEVICE;
 307		slave_dirn = DMA_DEV_TO_MEM;
 308	} else {
 309		host->dma_dir = DMA_TO_DEVICE;
 310		slave_dirn = DMA_MEM_TO_DEV;
 311
 312		mxcmci_swap_buffers(data);
 313	}
 314
 315	nents = dma_map_sg(host->dma->device->dev, data->sg,
 316				     data->sg_len,  host->dma_dir);
 317	if (nents != data->sg_len)
 318		return -EINVAL;
 319
 320	host->desc = dmaengine_prep_slave_sg(host->dma,
 321		data->sg, data->sg_len, slave_dirn,
 322		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 323
 324	if (!host->desc) {
 325		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 326				host->dma_dir);
 327		host->do_dma = 0;
 328		return 0; /* Fall back to PIO */
 329	}
 330	wmb();
 331
 332	dmaengine_submit(host->desc);
 333	dma_async_issue_pending(host->dma);
 334
 335	mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS));
 336
 337	return 0;
 338}
 339
 340static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat);
 341static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat);
 342
 343static void mxcmci_dma_callback(void *data)
 344{
 345	struct mxcmci_host *host = data;
 346	u32 stat;
 347
 348	del_timer(&host->watchdog);
 349
 350	stat = mxcmci_readl(host, MMC_REG_STATUS);
 351
 352	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
 353
 354	mxcmci_data_done(host, stat);
 355}
 356
 357static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
 358		unsigned int cmdat)
 359{
 360	u32 int_cntr = host->default_irq_mask;
 361	unsigned long flags;
 362
 363	WARN_ON(host->cmd != NULL);
 364	host->cmd = cmd;
 365
 366	switch (mmc_resp_type(cmd)) {
 367	case MMC_RSP_R1: /* short CRC, OPCODE */
 368	case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
 369		cmdat |= CMD_DAT_CONT_RESPONSE_48BIT_CRC;
 370		break;
 371	case MMC_RSP_R2: /* long 136 bit + CRC */
 372		cmdat |= CMD_DAT_CONT_RESPONSE_136BIT;
 373		break;
 374	case MMC_RSP_R3: /* short */
 375		cmdat |= CMD_DAT_CONT_RESPONSE_48BIT;
 376		break;
 377	case MMC_RSP_NONE:
 378		break;
 379	default:
 380		dev_err(mmc_dev(host->mmc), "unhandled response type 0x%x\n",
 381				mmc_resp_type(cmd));
 382		cmd->error = -EINVAL;
 383		return -EINVAL;
 384	}
 385
 386	int_cntr = INT_END_CMD_RES_EN;
 387
 388	if (mxcmci_use_dma(host)) {
 389		if (host->dma_dir == DMA_FROM_DEVICE) {
 390			host->desc->callback = mxcmci_dma_callback;
 391			host->desc->callback_param = host;
 392		} else {
 393			int_cntr |= INT_WRITE_OP_DONE_EN;
 394		}
 395	}
 396
 397	spin_lock_irqsave(&host->lock, flags);
 398	if (host->use_sdio)
 399		int_cntr |= INT_SDIO_IRQ_EN;
 400	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 401	spin_unlock_irqrestore(&host->lock, flags);
 402
 403	mxcmci_writew(host, cmd->opcode, MMC_REG_CMD);
 404	mxcmci_writel(host, cmd->arg, MMC_REG_ARG);
 405	mxcmci_writew(host, cmdat, MMC_REG_CMD_DAT_CONT);
 406
 407	return 0;
 408}
 409
 410static void mxcmci_finish_request(struct mxcmci_host *host,
 411		struct mmc_request *req)
 412{
 413	u32 int_cntr = host->default_irq_mask;
 414	unsigned long flags;
 415
 416	spin_lock_irqsave(&host->lock, flags);
 417	if (host->use_sdio)
 418		int_cntr |= INT_SDIO_IRQ_EN;
 419	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 420	spin_unlock_irqrestore(&host->lock, flags);
 421
 422	host->req = NULL;
 423	host->cmd = NULL;
 424	host->data = NULL;
 425
 426	mmc_request_done(host->mmc, req);
 427}
 428
 429static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
 430{
 431	struct mmc_data *data = host->data;
 432	int data_error;
 433
 434	if (mxcmci_use_dma(host)) {
 
 435		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 436				host->dma_dir);
 437		mxcmci_swap_buffers(data);
 438	}
 439
 440	if (stat & STATUS_ERR_MASK) {
 441		dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
 442				stat);
 443		if (stat & STATUS_CRC_READ_ERR) {
 444			dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
 445			data->error = -EILSEQ;
 446		} else if (stat & STATUS_CRC_WRITE_ERR) {
 447			u32 err_code = (stat >> 9) & 0x3;
 448			if (err_code == 2) { /* No CRC response */
 449				dev_err(mmc_dev(host->mmc),
 450					"%s: No CRC -ETIMEDOUT\n", __func__);
 451				data->error = -ETIMEDOUT;
 452			} else {
 453				dev_err(mmc_dev(host->mmc),
 454					"%s: -EILSEQ\n", __func__);
 455				data->error = -EILSEQ;
 456			}
 457		} else if (stat & STATUS_TIME_OUT_READ) {
 458			dev_err(mmc_dev(host->mmc),
 459				"%s: read -ETIMEDOUT\n", __func__);
 460			data->error = -ETIMEDOUT;
 461		} else {
 462			dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
 463			data->error = -EIO;
 464		}
 465	} else {
 466		data->bytes_xfered = host->datasize;
 467	}
 468
 469	data_error = data->error;
 470
 471	host->data = NULL;
 472
 473	return data_error;
 474}
 475
 476static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat)
 477{
 478	struct mmc_command *cmd = host->cmd;
 479	int i;
 480	u32 a, b, c;
 481
 482	if (!cmd)
 483		return;
 484
 485	if (stat & STATUS_TIME_OUT_RESP) {
 486		dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
 487		cmd->error = -ETIMEDOUT;
 488	} else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
 489		dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
 490		cmd->error = -EILSEQ;
 491	}
 492
 493	if (cmd->flags & MMC_RSP_PRESENT) {
 494		if (cmd->flags & MMC_RSP_136) {
 495			for (i = 0; i < 4; i++) {
 496				a = mxcmci_readw(host, MMC_REG_RES_FIFO);
 497				b = mxcmci_readw(host, MMC_REG_RES_FIFO);
 498				cmd->resp[i] = a << 16 | b;
 499			}
 500		} else {
 501			a = mxcmci_readw(host, MMC_REG_RES_FIFO);
 502			b = mxcmci_readw(host, MMC_REG_RES_FIFO);
 503			c = mxcmci_readw(host, MMC_REG_RES_FIFO);
 504			cmd->resp[0] = a << 24 | b << 8 | c >> 8;
 505		}
 506	}
 507}
 508
 509static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
 510{
 511	u32 stat;
 512	unsigned long timeout = jiffies + HZ;
 513
 514	do {
 515		stat = mxcmci_readl(host, MMC_REG_STATUS);
 516		if (stat & STATUS_ERR_MASK)
 517			return stat;
 518		if (time_after(jiffies, timeout)) {
 519			mxcmci_softreset(host);
 520			mxcmci_set_clk_rate(host, host->clock);
 521			return STATUS_TIME_OUT_READ;
 522		}
 523		if (stat & mask)
 524			return 0;
 525		cpu_relax();
 526	} while (1);
 527}
 528
 529static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
 530{
 531	unsigned int stat;
 532	u32 *buf = _buf;
 533
 534	while (bytes > 3) {
 535		stat = mxcmci_poll_status(host,
 536				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
 537		if (stat)
 538			return stat;
 539		*buf++ = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
 540		bytes -= 4;
 541	}
 542
 543	if (bytes) {
 544		u8 *b = (u8 *)buf;
 545		u32 tmp;
 546
 547		stat = mxcmci_poll_status(host,
 548				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
 549		if (stat)
 550			return stat;
 551		tmp = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
 552		memcpy(b, &tmp, bytes);
 553	}
 554
 555	return 0;
 556}
 557
 558static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
 559{
 560	unsigned int stat;
 561	u32 *buf = _buf;
 562
 563	while (bytes > 3) {
 564		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 565		if (stat)
 566			return stat;
 567		mxcmci_writel(host, cpu_to_le32(*buf++), MMC_REG_BUFFER_ACCESS);
 568		bytes -= 4;
 569	}
 570
 571	if (bytes) {
 572		u8 *b = (u8 *)buf;
 573		u32 tmp;
 574
 575		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 576		if (stat)
 577			return stat;
 578
 579		memcpy(&tmp, b, bytes);
 580		mxcmci_writel(host, cpu_to_le32(tmp), MMC_REG_BUFFER_ACCESS);
 581	}
 582
 583	return mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 
 
 
 
 584}
 585
 586static int mxcmci_transfer_data(struct mxcmci_host *host)
 587{
 588	struct mmc_data *data = host->req->data;
 589	struct scatterlist *sg;
 590	int stat, i;
 591
 592	host->data = data;
 593	host->datasize = 0;
 594
 595	if (data->flags & MMC_DATA_READ) {
 596		for_each_sg(data->sg, sg, data->sg_len, i) {
 597			stat = mxcmci_pull(host, sg_virt(sg), sg->length);
 598			if (stat)
 599				return stat;
 600			host->datasize += sg->length;
 601		}
 602	} else {
 603		for_each_sg(data->sg, sg, data->sg_len, i) {
 604			stat = mxcmci_push(host, sg_virt(sg), sg->length);
 605			if (stat)
 606				return stat;
 607			host->datasize += sg->length;
 608		}
 609		stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE);
 610		if (stat)
 611			return stat;
 612	}
 613	return 0;
 614}
 615
 616static void mxcmci_datawork(struct work_struct *work)
 617{
 618	struct mxcmci_host *host = container_of(work, struct mxcmci_host,
 619						  datawork);
 620	int datastat = mxcmci_transfer_data(host);
 621
 622	mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
 623		MMC_REG_STATUS);
 624	mxcmci_finish_data(host, datastat);
 625
 626	if (host->req->stop) {
 627		if (mxcmci_start_cmd(host, host->req->stop, 0)) {
 628			mxcmci_finish_request(host, host->req);
 629			return;
 630		}
 631	} else {
 632		mxcmci_finish_request(host, host->req);
 633	}
 634}
 635
 636static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
 637{
 638	struct mmc_request *req;
 639	int data_error;
 640	unsigned long flags;
 641
 642	spin_lock_irqsave(&host->lock, flags);
 643
 644	if (!host->data) {
 645		spin_unlock_irqrestore(&host->lock, flags);
 646		return;
 647	}
 648
 649	if (!host->req) {
 650		spin_unlock_irqrestore(&host->lock, flags);
 651		return;
 652	}
 653
 654	req = host->req;
 655	if (!req->stop)
 656		host->req = NULL; /* we will handle finish req below */
 657
 658	data_error = mxcmci_finish_data(host, stat);
 659
 660	spin_unlock_irqrestore(&host->lock, flags);
 661
 662	if (data_error)
 663		return;
 664
 665	mxcmci_read_response(host, stat);
 666	host->cmd = NULL;
 667
 668	if (req->stop) {
 669		if (mxcmci_start_cmd(host, req->stop, 0)) {
 670			mxcmci_finish_request(host, req);
 671			return;
 672		}
 673	} else {
 674		mxcmci_finish_request(host, req);
 675	}
 676}
 677
 678static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
 679{
 680	mxcmci_read_response(host, stat);
 681	host->cmd = NULL;
 682
 683	if (!host->data && host->req) {
 684		mxcmci_finish_request(host, host->req);
 685		return;
 686	}
 687
 688	/* For the DMA case the DMA engine handles the data transfer
 689	 * automatically. For non DMA we have to do it ourselves.
 690	 * Don't do it in interrupt context though.
 691	 */
 692	if (!mxcmci_use_dma(host) && host->data)
 693		schedule_work(&host->datawork);
 694
 695}
 696
 697static irqreturn_t mxcmci_irq(int irq, void *devid)
 698{
 699	struct mxcmci_host *host = devid;
 
 700	bool sdio_irq;
 701	u32 stat;
 702
 703	stat = mxcmci_readl(host, MMC_REG_STATUS);
 704	mxcmci_writel(host,
 705		stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
 706			 STATUS_WRITE_OP_DONE),
 707		MMC_REG_STATUS);
 708
 709	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
 710
 711	spin_lock(&host->lock);
 712	sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
 713	spin_unlock(&host->lock);
 714
 715	if (mxcmci_use_dma(host) && (stat & (STATUS_WRITE_OP_DONE)))
 716		mxcmci_writel(host, STATUS_WRITE_OP_DONE, MMC_REG_STATUS);
 
 
 717
 718	if (sdio_irq) {
 719		mxcmci_writel(host, STATUS_SDIO_INT_ACTIVE, MMC_REG_STATUS);
 720		mmc_signal_sdio_irq(host->mmc);
 721	}
 722
 723	if (stat & STATUS_END_CMD_RESP)
 724		mxcmci_cmd_done(host, stat);
 725
 726	if (mxcmci_use_dma(host) && (stat & STATUS_WRITE_OP_DONE)) {
 727		del_timer(&host->watchdog);
 728		mxcmci_data_done(host, stat);
 729	}
 730
 731	if (host->default_irq_mask &&
 732		  (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
 733		mmc_detect_change(host->mmc, msecs_to_jiffies(200));
 734
 735	return IRQ_HANDLED;
 736}
 737
 738static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
 739{
 740	struct mxcmci_host *host = mmc_priv(mmc);
 741	unsigned int cmdat = host->cmdat;
 742	int error;
 743
 744	WARN_ON(host->req != NULL);
 745
 746	host->req = req;
 747	host->cmdat &= ~CMD_DAT_CONT_INIT;
 748
 749	if (host->dma)
 750		host->do_dma = 1;
 751
 752	if (req->data) {
 753		error = mxcmci_setup_data(host, req->data);
 754		if (error) {
 755			req->cmd->error = error;
 756			goto out;
 757		}
 758
 759
 760		cmdat |= CMD_DAT_CONT_DATA_ENABLE;
 761
 762		if (req->data->flags & MMC_DATA_WRITE)
 763			cmdat |= CMD_DAT_CONT_WRITE;
 764	}
 765
 766	error = mxcmci_start_cmd(host, req->cmd, cmdat);
 767
 768out:
 769	if (error)
 770		mxcmci_finish_request(host, req);
 771}
 772
 773static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
 774{
 775	unsigned int divider;
 776	int prescaler = 0;
 777	unsigned int clk_in = clk_get_rate(host->clk_per);
 778
 779	while (prescaler <= 0x800) {
 780		for (divider = 1; divider <= 0xF; divider++) {
 781			int x;
 782
 783			x = (clk_in / (divider + 1));
 784
 785			if (prescaler)
 786				x /= (prescaler * 2);
 787
 788			if (x <= clk_ios)
 789				break;
 790		}
 791		if (divider < 0x10)
 792			break;
 793
 794		if (prescaler == 0)
 795			prescaler = 1;
 796		else
 797			prescaler <<= 1;
 798	}
 799
 800	mxcmci_writew(host, (prescaler << 4) | divider, MMC_REG_CLK_RATE);
 801
 802	dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n",
 803			prescaler, divider, clk_in, clk_ios);
 804}
 805
 806static int mxcmci_setup_dma(struct mmc_host *mmc)
 807{
 808	struct mxcmci_host *host = mmc_priv(mmc);
 809	struct dma_slave_config *config = &host->dma_slave_config;
 810
 811	config->dst_addr = host->phys_base + MMC_REG_BUFFER_ACCESS;
 812	config->src_addr = host->phys_base + MMC_REG_BUFFER_ACCESS;
 813	config->dst_addr_width = 4;
 814	config->src_addr_width = 4;
 815	config->dst_maxburst = host->burstlen;
 816	config->src_maxburst = host->burstlen;
 817	config->device_fc = false;
 818
 819	return dmaengine_slave_config(host->dma, config);
 820}
 821
 822static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 823{
 824	struct mxcmci_host *host = mmc_priv(mmc);
 825	int burstlen, ret;
 826
 827	/*
 828	 * use burstlen of 64 (16 words) in 4 bit mode (--> reg value  0)
 829	 * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)
 830	 */
 831	if (ios->bus_width == MMC_BUS_WIDTH_4)
 832		burstlen = 16;
 833	else
 834		burstlen = 4;
 835
 836	if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
 837		host->burstlen = burstlen;
 838		ret = mxcmci_setup_dma(mmc);
 839		if (ret) {
 840			dev_err(mmc_dev(host->mmc),
 841				"failed to config DMA channel. Falling back to PIO\n");
 842			dma_release_channel(host->dma);
 843			host->do_dma = 0;
 844			host->dma = NULL;
 845		}
 846	}
 847
 848	if (ios->bus_width == MMC_BUS_WIDTH_4)
 849		host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
 850	else
 851		host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
 852
 853	if (host->power_mode != ios->power_mode) {
 
 854		host->power_mode = ios->power_mode;
 855		mxcmci_set_power(host, ios->vdd);
 856
 857		if (ios->power_mode == MMC_POWER_ON)
 858			host->cmdat |= CMD_DAT_CONT_INIT;
 859	}
 860
 861	if (ios->clock) {
 862		mxcmci_set_clk_rate(host, ios->clock);
 863		mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
 864	} else {
 865		mxcmci_writew(host, STR_STP_CLK_STOP_CLK, MMC_REG_STR_STP_CLK);
 866	}
 867
 868	host->clock = ios->clock;
 869}
 870
 871static irqreturn_t mxcmci_detect_irq(int irq, void *data)
 872{
 873	struct mmc_host *mmc = data;
 874
 875	dev_dbg(mmc_dev(mmc), "%s\n", __func__);
 876
 877	mmc_detect_change(mmc, msecs_to_jiffies(250));
 878	return IRQ_HANDLED;
 879}
 880
 881static int mxcmci_get_ro(struct mmc_host *mmc)
 882{
 883	struct mxcmci_host *host = mmc_priv(mmc);
 884
 885	if (host->pdata && host->pdata->get_ro)
 886		return !!host->pdata->get_ro(mmc_dev(mmc));
 887	/*
 888	 * If board doesn't support read only detection (no mmc_gpio
 889	 * context or gpio is invalid), then let the mmc core decide
 890	 * what to do.
 891	 */
 892	return mmc_gpio_get_ro(mmc);
 893}
 894
 895static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
 896{
 897	struct mxcmci_host *host = mmc_priv(mmc);
 898	unsigned long flags;
 899	u32 int_cntr;
 900
 901	spin_lock_irqsave(&host->lock, flags);
 902	host->use_sdio = enable;
 903	int_cntr = mxcmci_readl(host, MMC_REG_INT_CNTR);
 904
 905	if (enable)
 906		int_cntr |= INT_SDIO_IRQ_EN;
 907	else
 908		int_cntr &= ~INT_SDIO_IRQ_EN;
 909
 910	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 911	spin_unlock_irqrestore(&host->lock, flags);
 912}
 913
 914static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
 915{
 916	struct mxcmci_host *mxcmci = mmc_priv(host);
 917
 918	/*
 919	 * MX3 SoCs have a silicon bug which corrupts CRC calculation of
 920	 * multi-block transfers when connected SDIO peripheral doesn't
 921	 * drive the BUSY line as required by the specs.
 922	 * One way to prevent this is to only allow 1-bit transfers.
 923	 */
 924
 925	if (is_imx31_mmc(mxcmci) && mmc_card_sdio(card))
 926		host->caps &= ~MMC_CAP_4_BIT_DATA;
 927	else
 928		host->caps |= MMC_CAP_4_BIT_DATA;
 929}
 930
 931static bool filter(struct dma_chan *chan, void *param)
 932{
 933	struct mxcmci_host *host = param;
 934
 935	if (!imx_dma_is_general_purpose(chan))
 936		return false;
 937
 938	chan->private = &host->dma_data;
 939
 940	return true;
 941}
 942
 943static void mxcmci_watchdog(struct timer_list *t)
 944{
 945	struct mxcmci_host *host = from_timer(host, t, watchdog);
 946	struct mmc_request *req = host->req;
 947	unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS);
 948
 949	if (host->dma_dir == DMA_FROM_DEVICE) {
 950		dmaengine_terminate_all(host->dma);
 951		dev_err(mmc_dev(host->mmc),
 952			"%s: read time out (status = 0x%08x)\n",
 953			__func__, stat);
 954	} else {
 955		dev_err(mmc_dev(host->mmc),
 956			"%s: write time out (status = 0x%08x)\n",
 957			__func__, stat);
 958		mxcmci_softreset(host);
 959	}
 960
 961	/* Mark transfer as erroneus and inform the upper layers */
 962
 963	if (host->data)
 964		host->data->error = -ETIMEDOUT;
 965	host->req = NULL;
 966	host->cmd = NULL;
 967	host->data = NULL;
 968	mmc_request_done(host->mmc, req);
 969}
 970
 971static const struct mmc_host_ops mxcmci_ops = {
 972	.request		= mxcmci_request,
 973	.set_ios		= mxcmci_set_ios,
 974	.get_ro			= mxcmci_get_ro,
 975	.enable_sdio_irq	= mxcmci_enable_sdio_irq,
 976	.init_card		= mxcmci_init_card,
 977};
 978
 979static int mxcmci_probe(struct platform_device *pdev)
 980{
 981	struct mmc_host *mmc;
 982	struct mxcmci_host *host;
 983	struct resource *res;
 984	int ret = 0, irq;
 985	bool dat3_card_detect = false;
 986	dma_cap_mask_t mask;
 987	struct imxmmc_platform_data *pdata = pdev->dev.platform_data;
 988
 989	pr_info("i.MX/MPC512x SDHC driver\n");
 990
 
 991	irq = platform_get_irq(pdev, 0);
 992	if (irq < 0)
 993		return irq;
 994
 995	mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
 996	if (!mmc)
 997		return -ENOMEM;
 998
 999	host = mmc_priv(mmc);
1000
1001	host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1002	if (IS_ERR(host->base)) {
1003		ret = PTR_ERR(host->base);
1004		goto out_free;
 
 
 
 
1005	}
1006
1007	host->phys_base = res->start;
1008
1009	ret = mmc_of_parse(mmc);
1010	if (ret)
1011		goto out_free;
1012	mmc->ops = &mxcmci_ops;
1013
1014	/* For devicetree parsing, the bus width is read from devicetree */
1015	if (pdata)
1016		mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1017	else
1018		mmc->caps |= MMC_CAP_SDIO_IRQ;
1019
1020	/* MMC core transfer sizes tunable parameters */
 
1021	mmc->max_blk_size = 2048;
1022	mmc->max_blk_count = 65535;
1023	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1024	mmc->max_seg_size = mmc->max_req_size;
1025
1026	host->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
1027
1028	/* adjust max_segs after devtype detection */
1029	if (!is_mpc512x_mmc(host))
1030		mmc->max_segs = 64;
 
1031
1032	host->mmc = mmc;
1033	host->pdata = pdata;
1034	spin_lock_init(&host->lock);
1035
1036	if (pdata)
1037		dat3_card_detect = pdata->dat3_card_detect;
1038	else if (mmc_card_is_removable(mmc)
1039			&& !of_property_read_bool(pdev->dev.of_node, "cd-gpios"))
1040		dat3_card_detect = true;
1041
1042	ret = mmc_regulator_get_supply(mmc);
1043	if (ret)
1044		goto out_free;
1045
1046	if (!mmc->ocr_avail) {
1047		if (pdata && pdata->ocr_avail)
1048			mmc->ocr_avail = pdata->ocr_avail;
1049		else
1050			mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1051	}
1052
1053	if (dat3_card_detect)
1054		host->default_irq_mask =
1055			INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN;
1056	else
1057		host->default_irq_mask = 0;
1058
 
 
 
1059	host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1060	if (IS_ERR(host->clk_ipg)) {
1061		ret = PTR_ERR(host->clk_ipg);
1062		goto out_free;
1063	}
1064
1065	host->clk_per = devm_clk_get(&pdev->dev, "per");
1066	if (IS_ERR(host->clk_per)) {
1067		ret = PTR_ERR(host->clk_per);
1068		goto out_free;
1069	}
1070
1071	ret = clk_prepare_enable(host->clk_per);
1072	if (ret)
1073		goto out_free;
1074
1075	ret = clk_prepare_enable(host->clk_ipg);
1076	if (ret)
1077		goto out_clk_per_put;
1078
1079	mxcmci_softreset(host);
1080
1081	host->rev_no = mxcmci_readw(host, MMC_REG_REV_NO);
1082	if (host->rev_no != 0x400) {
1083		ret = -ENODEV;
1084		dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
1085			host->rev_no);
1086		goto out_clk_put;
1087	}
1088
1089	mmc->f_min = clk_get_rate(host->clk_per) >> 16;
1090	mmc->f_max = clk_get_rate(host->clk_per) >> 1;
1091
1092	/* recommended in data sheet */
1093	mxcmci_writew(host, 0x2db4, MMC_REG_READ_TO);
1094
1095	mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR);
1096
1097	if (!host->pdata) {
1098		host->dma = dma_request_chan(&pdev->dev, "rx-tx");
1099		if (IS_ERR(host->dma)) {
1100			if (PTR_ERR(host->dma) == -EPROBE_DEFER) {
1101				ret = -EPROBE_DEFER;
1102				goto out_clk_put;
1103			}
1104
1105			/* Ignore errors to fall back to PIO mode */
1106			host->dma = NULL;
1107		}
1108	} else {
1109		res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1110		if (res) {
1111			host->dmareq = res->start;
1112			host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
1113			host->dma_data.priority = DMA_PRIO_LOW;
1114			host->dma_data.dma_request = host->dmareq;
1115			dma_cap_zero(mask);
1116			dma_cap_set(DMA_SLAVE, mask);
1117			host->dma = dma_request_channel(mask, filter, host);
1118		}
1119	}
1120	if (host->dma)
1121		mmc->max_seg_size = dma_get_max_seg_size(
1122				host->dma->device->dev);
1123	else
1124		dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
1125
1126	INIT_WORK(&host->datawork, mxcmci_datawork);
1127
1128	ret = devm_request_irq(&pdev->dev, irq, mxcmci_irq, 0,
1129			       dev_name(&pdev->dev), host);
1130	if (ret)
1131		goto out_free_dma;
1132
1133	platform_set_drvdata(pdev, mmc);
1134
1135	if (host->pdata && host->pdata->init) {
1136		ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq,
1137				host->mmc);
1138		if (ret)
1139			goto out_free_dma;
1140	}
1141
1142	timer_setup(&host->watchdog, mxcmci_watchdog, 0);
1143
1144	ret = mmc_add_host(mmc);
1145	if (ret)
1146		goto out_free_dma;
1147
1148	return 0;
1149
 
 
1150out_free_dma:
1151	if (host->dma)
1152		dma_release_channel(host->dma);
1153
1154out_clk_put:
1155	clk_disable_unprepare(host->clk_ipg);
1156out_clk_per_put:
1157	clk_disable_unprepare(host->clk_per);
1158
 
 
1159out_free:
1160	mmc_free_host(mmc);
1161
 
1162	return ret;
1163}
1164
1165static void mxcmci_remove(struct platform_device *pdev)
1166{
1167	struct mmc_host *mmc = platform_get_drvdata(pdev);
1168	struct mxcmci_host *host = mmc_priv(mmc);
1169
 
 
1170	mmc_remove_host(mmc);
1171
 
 
 
1172	if (host->pdata && host->pdata->exit)
1173		host->pdata->exit(&pdev->dev, mmc);
1174
 
 
 
1175	if (host->dma)
1176		dma_release_channel(host->dma);
1177
1178	clk_disable_unprepare(host->clk_per);
1179	clk_disable_unprepare(host->clk_ipg);
1180
 
 
1181	mmc_free_host(mmc);
 
 
1182}
1183
 
1184static int mxcmci_suspend(struct device *dev)
1185{
1186	struct mmc_host *mmc = dev_get_drvdata(dev);
1187	struct mxcmci_host *host = mmc_priv(mmc);
 
1188
 
 
1189	clk_disable_unprepare(host->clk_per);
1190	clk_disable_unprepare(host->clk_ipg);
1191	return 0;
 
1192}
1193
1194static int mxcmci_resume(struct device *dev)
1195{
1196	struct mmc_host *mmc = dev_get_drvdata(dev);
1197	struct mxcmci_host *host = mmc_priv(mmc);
1198	int ret;
1199
1200	ret = clk_prepare_enable(host->clk_per);
1201	if (ret)
1202		return ret;
1203
1204	ret = clk_prepare_enable(host->clk_ipg);
1205	if (ret)
1206		clk_disable_unprepare(host->clk_per);
 
1207
1208	return ret;
1209}
1210
1211static DEFINE_SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);
 
 
 
 
1212
1213static struct platform_driver mxcmci_driver = {
1214	.probe		= mxcmci_probe,
1215	.remove_new	= mxcmci_remove,
1216	.driver		= {
1217		.name		= DRIVER_NAME,
1218		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
1219		.pm	= pm_sleep_ptr(&mxcmci_pm_ops),
1220		.of_match_table	= mxcmci_of_match,
 
1221	}
1222};
1223
1224module_platform_driver(mxcmci_driver);
1225
1226MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1227MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1228MODULE_LICENSE("GPL");
1229MODULE_ALIAS("platform:mxc-mmc");
v3.5.6
 
   1/*
   2 *  linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver
   3 *
   4 *  This is a driver for the SDHC controller found in Freescale MX2/MX3
   5 *  SoCs. It is basically the same hardware as found on MX1 (imxmmc.c).
   6 *  Unlike the hardware found on MX1, this hardware just works and does
   7 *  not need all the quirks found in imxmmc.c, hence the separate driver.
   8 *
   9 *  Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  10 *  Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
  11 *
  12 *  derived from pxamci.c by Russell King
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License version 2 as
  16 * published by the Free Software Foundation.
  17 *
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/init.h>
  22#include <linux/ioport.h>
  23#include <linux/platform_device.h>
 
  24#include <linux/interrupt.h>
  25#include <linux/irq.h>
  26#include <linux/blkdev.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/mmc/host.h>
  29#include <linux/mmc/card.h>
  30#include <linux/delay.h>
  31#include <linux/clk.h>
  32#include <linux/io.h>
  33#include <linux/gpio.h>
  34#include <linux/regulator/consumer.h>
  35#include <linux/dmaengine.h>
  36#include <linux/types.h>
 
 
 
  37
  38#include <asm/dma.h>
  39#include <asm/irq.h>
  40#include <asm/sizes.h>
  41#include <mach/mmc.h>
  42
  43#include <mach/dma.h>
  44#include <mach/hardware.h>
  45
  46#define DRIVER_NAME "mxc-mmc"
 
  47
  48#define MMC_REG_STR_STP_CLK		0x00
  49#define MMC_REG_STATUS			0x04
  50#define MMC_REG_CLK_RATE		0x08
  51#define MMC_REG_CMD_DAT_CONT		0x0C
  52#define MMC_REG_RES_TO			0x10
  53#define MMC_REG_READ_TO			0x14
  54#define MMC_REG_BLK_LEN			0x18
  55#define MMC_REG_NOB			0x1C
  56#define MMC_REG_REV_NO			0x20
  57#define MMC_REG_INT_CNTR		0x24
  58#define MMC_REG_CMD			0x28
  59#define MMC_REG_ARG			0x2C
  60#define MMC_REG_RES_FIFO		0x34
  61#define MMC_REG_BUFFER_ACCESS		0x38
  62
  63#define STR_STP_CLK_RESET               (1 << 3)
  64#define STR_STP_CLK_START_CLK           (1 << 1)
  65#define STR_STP_CLK_STOP_CLK            (1 << 0)
  66
  67#define STATUS_CARD_INSERTION		(1 << 31)
  68#define STATUS_CARD_REMOVAL		(1 << 30)
  69#define STATUS_YBUF_EMPTY		(1 << 29)
  70#define STATUS_XBUF_EMPTY		(1 << 28)
  71#define STATUS_YBUF_FULL		(1 << 27)
  72#define STATUS_XBUF_FULL		(1 << 26)
  73#define STATUS_BUF_UND_RUN		(1 << 25)
  74#define STATUS_BUF_OVFL			(1 << 24)
  75#define STATUS_SDIO_INT_ACTIVE		(1 << 14)
  76#define STATUS_END_CMD_RESP		(1 << 13)
  77#define STATUS_WRITE_OP_DONE		(1 << 12)
  78#define STATUS_DATA_TRANS_DONE		(1 << 11)
  79#define STATUS_READ_OP_DONE		(1 << 11)
  80#define STATUS_WR_CRC_ERROR_CODE_MASK	(3 << 10)
  81#define STATUS_CARD_BUS_CLK_RUN		(1 << 8)
  82#define STATUS_BUF_READ_RDY		(1 << 7)
  83#define STATUS_BUF_WRITE_RDY		(1 << 6)
  84#define STATUS_RESP_CRC_ERR		(1 << 5)
  85#define STATUS_CRC_READ_ERR		(1 << 3)
  86#define STATUS_CRC_WRITE_ERR		(1 << 2)
  87#define STATUS_TIME_OUT_RESP		(1 << 1)
  88#define STATUS_TIME_OUT_READ		(1 << 0)
  89#define STATUS_ERR_MASK			0x2f
  90
  91#define CMD_DAT_CONT_CMD_RESP_LONG_OFF	(1 << 12)
  92#define CMD_DAT_CONT_STOP_READWAIT	(1 << 11)
  93#define CMD_DAT_CONT_START_READWAIT	(1 << 10)
  94#define CMD_DAT_CONT_BUS_WIDTH_4	(2 << 8)
  95#define CMD_DAT_CONT_INIT		(1 << 7)
  96#define CMD_DAT_CONT_WRITE		(1 << 4)
  97#define CMD_DAT_CONT_DATA_ENABLE	(1 << 3)
  98#define CMD_DAT_CONT_RESPONSE_48BIT_CRC	(1 << 0)
  99#define CMD_DAT_CONT_RESPONSE_136BIT	(2 << 0)
 100#define CMD_DAT_CONT_RESPONSE_48BIT	(3 << 0)
 101
 102#define INT_SDIO_INT_WKP_EN		(1 << 18)
 103#define INT_CARD_INSERTION_WKP_EN	(1 << 17)
 104#define INT_CARD_REMOVAL_WKP_EN		(1 << 16)
 105#define INT_CARD_INSERTION_EN		(1 << 15)
 106#define INT_CARD_REMOVAL_EN		(1 << 14)
 107#define INT_SDIO_IRQ_EN			(1 << 13)
 108#define INT_DAT0_EN			(1 << 12)
 109#define INT_BUF_READ_EN			(1 << 4)
 110#define INT_BUF_WRITE_EN		(1 << 3)
 111#define INT_END_CMD_RES_EN		(1 << 2)
 112#define INT_WRITE_OP_DONE_EN		(1 << 1)
 113#define INT_READ_OP_EN			(1 << 0)
 114
 
 
 
 
 
 
 115struct mxcmci_host {
 116	struct mmc_host		*mmc;
 117	struct resource		*res;
 118	void __iomem		*base;
 119	int			irq;
 120	int			detect_irq;
 121	struct dma_chan		*dma;
 122	struct dma_async_tx_descriptor *desc;
 123	int			do_dma;
 124	int			default_irq_mask;
 125	int			use_sdio;
 126	unsigned int		power_mode;
 127	struct imxmmc_platform_data *pdata;
 128
 129	struct mmc_request	*req;
 130	struct mmc_command	*cmd;
 131	struct mmc_data		*data;
 132
 133	unsigned int		datasize;
 134	unsigned int		dma_dir;
 135
 136	u16			rev_no;
 137	unsigned int		cmdat;
 138
 139	struct clk		*clk_ipg;
 140	struct clk		*clk_per;
 141
 142	int			clock;
 143
 144	struct work_struct	datawork;
 145	spinlock_t		lock;
 146
 147	struct regulator	*vcc;
 148
 149	int			burstlen;
 150	int			dmareq;
 151	struct dma_slave_config dma_slave_config;
 152	struct imx_dma_data	dma_data;
 
 
 
 153};
 154
 155static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 156
 157static inline void mxcmci_init_ocr(struct mxcmci_host *host)
 158{
 159	host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
 
 
 
 
 160
 161	if (IS_ERR(host->vcc)) {
 162		host->vcc = NULL;
 163	} else {
 164		host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
 165		if (host->pdata && host->pdata->ocr_avail)
 166			dev_warn(mmc_dev(host->mmc),
 167				"pdata->ocr_avail will not be used\n");
 168	}
 169
 170	if (host->vcc == NULL) {
 171		/* fall-back to platform data */
 172		if (host->pdata && host->pdata->ocr_avail)
 173			host->mmc->ocr_avail = host->pdata->ocr_avail;
 174		else
 175			host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 176	}
 177}
 178
 179static inline void mxcmci_set_power(struct mxcmci_host *host,
 180				    unsigned char power_mode,
 181				    unsigned int vdd)
 182{
 183	if (host->vcc) {
 184		if (power_mode == MMC_POWER_UP)
 185			mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
 186		else if (power_mode == MMC_POWER_OFF)
 187			mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
 
 
 188	}
 189
 190	if (host->pdata && host->pdata->setpower)
 191		host->pdata->setpower(mmc_dev(host->mmc), vdd);
 192}
 193
 194static inline int mxcmci_use_dma(struct mxcmci_host *host)
 195{
 196	return host->do_dma;
 197}
 198
 199static void mxcmci_softreset(struct mxcmci_host *host)
 200{
 201	int i;
 202
 203	dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
 204
 205	/* reset sequence */
 206	writew(STR_STP_CLK_RESET, host->base + MMC_REG_STR_STP_CLK);
 207	writew(STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
 208			host->base + MMC_REG_STR_STP_CLK);
 209
 210	for (i = 0; i < 8; i++)
 211		writew(STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 212
 213	writew(0xff, host->base + MMC_REG_RES_TO);
 
 
 
 
 
 
 214}
 215static int mxcmci_setup_dma(struct mmc_host *mmc);
 
 
 216
 217static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
 218{
 219	unsigned int nob = data->blocks;
 220	unsigned int blksz = data->blksz;
 221	unsigned int datasize = nob * blksz;
 222	struct scatterlist *sg;
 223	enum dma_transfer_direction slave_dirn;
 224	int i, nents;
 225
 226	if (data->flags & MMC_DATA_STREAM)
 227		nob = 0xffff;
 228
 229	host->data = data;
 230	data->bytes_xfered = 0;
 231
 232	writew(nob, host->base + MMC_REG_NOB);
 233	writew(blksz, host->base + MMC_REG_BLK_LEN);
 234	host->datasize = datasize;
 235
 236	if (!mxcmci_use_dma(host))
 237		return 0;
 238
 239	for_each_sg(data->sg, sg, data->sg_len, i) {
 240		if (sg->offset & 3 || sg->length & 3) {
 241			host->do_dma = 0;
 242			return 0;
 243		}
 244	}
 245
 246	if (data->flags & MMC_DATA_READ) {
 247		host->dma_dir = DMA_FROM_DEVICE;
 248		slave_dirn = DMA_DEV_TO_MEM;
 249	} else {
 250		host->dma_dir = DMA_TO_DEVICE;
 251		slave_dirn = DMA_MEM_TO_DEV;
 
 
 252	}
 253
 254	nents = dma_map_sg(host->dma->device->dev, data->sg,
 255				     data->sg_len,  host->dma_dir);
 256	if (nents != data->sg_len)
 257		return -EINVAL;
 258
 259	host->desc = dmaengine_prep_slave_sg(host->dma,
 260		data->sg, data->sg_len, slave_dirn,
 261		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 262
 263	if (!host->desc) {
 264		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 265				host->dma_dir);
 266		host->do_dma = 0;
 267		return 0; /* Fall back to PIO */
 268	}
 269	wmb();
 270
 271	dmaengine_submit(host->desc);
 272	dma_async_issue_pending(host->dma);
 273
 
 
 274	return 0;
 275}
 276
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 277static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
 278		unsigned int cmdat)
 279{
 280	u32 int_cntr = host->default_irq_mask;
 281	unsigned long flags;
 282
 283	WARN_ON(host->cmd != NULL);
 284	host->cmd = cmd;
 285
 286	switch (mmc_resp_type(cmd)) {
 287	case MMC_RSP_R1: /* short CRC, OPCODE */
 288	case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
 289		cmdat |= CMD_DAT_CONT_RESPONSE_48BIT_CRC;
 290		break;
 291	case MMC_RSP_R2: /* long 136 bit + CRC */
 292		cmdat |= CMD_DAT_CONT_RESPONSE_136BIT;
 293		break;
 294	case MMC_RSP_R3: /* short */
 295		cmdat |= CMD_DAT_CONT_RESPONSE_48BIT;
 296		break;
 297	case MMC_RSP_NONE:
 298		break;
 299	default:
 300		dev_err(mmc_dev(host->mmc), "unhandled response type 0x%x\n",
 301				mmc_resp_type(cmd));
 302		cmd->error = -EINVAL;
 303		return -EINVAL;
 304	}
 305
 306	int_cntr = INT_END_CMD_RES_EN;
 307
 308	if (mxcmci_use_dma(host))
 309		int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN;
 
 
 
 
 
 
 310
 311	spin_lock_irqsave(&host->lock, flags);
 312	if (host->use_sdio)
 313		int_cntr |= INT_SDIO_IRQ_EN;
 314	writel(int_cntr, host->base + MMC_REG_INT_CNTR);
 315	spin_unlock_irqrestore(&host->lock, flags);
 316
 317	writew(cmd->opcode, host->base + MMC_REG_CMD);
 318	writel(cmd->arg, host->base + MMC_REG_ARG);
 319	writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT);
 320
 321	return 0;
 322}
 323
 324static void mxcmci_finish_request(struct mxcmci_host *host,
 325		struct mmc_request *req)
 326{
 327	u32 int_cntr = host->default_irq_mask;
 328	unsigned long flags;
 329
 330	spin_lock_irqsave(&host->lock, flags);
 331	if (host->use_sdio)
 332		int_cntr |= INT_SDIO_IRQ_EN;
 333	writel(int_cntr, host->base + MMC_REG_INT_CNTR);
 334	spin_unlock_irqrestore(&host->lock, flags);
 335
 336	host->req = NULL;
 337	host->cmd = NULL;
 338	host->data = NULL;
 339
 340	mmc_request_done(host->mmc, req);
 341}
 342
 343static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
 344{
 345	struct mmc_data *data = host->data;
 346	int data_error;
 347
 348	if (mxcmci_use_dma(host)) {
 349		dmaengine_terminate_all(host->dma);
 350		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 351				host->dma_dir);
 
 352	}
 353
 354	if (stat & STATUS_ERR_MASK) {
 355		dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
 356				stat);
 357		if (stat & STATUS_CRC_READ_ERR) {
 358			dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
 359			data->error = -EILSEQ;
 360		} else if (stat & STATUS_CRC_WRITE_ERR) {
 361			u32 err_code = (stat >> 9) & 0x3;
 362			if (err_code == 2) { /* No CRC response */
 363				dev_err(mmc_dev(host->mmc),
 364					"%s: No CRC -ETIMEDOUT\n", __func__);
 365				data->error = -ETIMEDOUT;
 366			} else {
 367				dev_err(mmc_dev(host->mmc),
 368					"%s: -EILSEQ\n", __func__);
 369				data->error = -EILSEQ;
 370			}
 371		} else if (stat & STATUS_TIME_OUT_READ) {
 372			dev_err(mmc_dev(host->mmc),
 373				"%s: read -ETIMEDOUT\n", __func__);
 374			data->error = -ETIMEDOUT;
 375		} else {
 376			dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
 377			data->error = -EIO;
 378		}
 379	} else {
 380		data->bytes_xfered = host->datasize;
 381	}
 382
 383	data_error = data->error;
 384
 385	host->data = NULL;
 386
 387	return data_error;
 388}
 389
 390static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat)
 391{
 392	struct mmc_command *cmd = host->cmd;
 393	int i;
 394	u32 a, b, c;
 395
 396	if (!cmd)
 397		return;
 398
 399	if (stat & STATUS_TIME_OUT_RESP) {
 400		dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
 401		cmd->error = -ETIMEDOUT;
 402	} else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
 403		dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
 404		cmd->error = -EILSEQ;
 405	}
 406
 407	if (cmd->flags & MMC_RSP_PRESENT) {
 408		if (cmd->flags & MMC_RSP_136) {
 409			for (i = 0; i < 4; i++) {
 410				a = readw(host->base + MMC_REG_RES_FIFO);
 411				b = readw(host->base + MMC_REG_RES_FIFO);
 412				cmd->resp[i] = a << 16 | b;
 413			}
 414		} else {
 415			a = readw(host->base + MMC_REG_RES_FIFO);
 416			b = readw(host->base + MMC_REG_RES_FIFO);
 417			c = readw(host->base + MMC_REG_RES_FIFO);
 418			cmd->resp[0] = a << 24 | b << 8 | c >> 8;
 419		}
 420	}
 421}
 422
 423static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
 424{
 425	u32 stat;
 426	unsigned long timeout = jiffies + HZ;
 427
 428	do {
 429		stat = readl(host->base + MMC_REG_STATUS);
 430		if (stat & STATUS_ERR_MASK)
 431			return stat;
 432		if (time_after(jiffies, timeout)) {
 433			mxcmci_softreset(host);
 434			mxcmci_set_clk_rate(host, host->clock);
 435			return STATUS_TIME_OUT_READ;
 436		}
 437		if (stat & mask)
 438			return 0;
 439		cpu_relax();
 440	} while (1);
 441}
 442
 443static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
 444{
 445	unsigned int stat;
 446	u32 *buf = _buf;
 447
 448	while (bytes > 3) {
 449		stat = mxcmci_poll_status(host,
 450				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
 451		if (stat)
 452			return stat;
 453		*buf++ = readl(host->base + MMC_REG_BUFFER_ACCESS);
 454		bytes -= 4;
 455	}
 456
 457	if (bytes) {
 458		u8 *b = (u8 *)buf;
 459		u32 tmp;
 460
 461		stat = mxcmci_poll_status(host,
 462				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
 463		if (stat)
 464			return stat;
 465		tmp = readl(host->base + MMC_REG_BUFFER_ACCESS);
 466		memcpy(b, &tmp, bytes);
 467	}
 468
 469	return 0;
 470}
 471
 472static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
 473{
 474	unsigned int stat;
 475	u32 *buf = _buf;
 476
 477	while (bytes > 3) {
 478		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 479		if (stat)
 480			return stat;
 481		writel(*buf++, host->base + MMC_REG_BUFFER_ACCESS);
 482		bytes -= 4;
 483	}
 484
 485	if (bytes) {
 486		u8 *b = (u8 *)buf;
 487		u32 tmp;
 488
 489		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 490		if (stat)
 491			return stat;
 492
 493		memcpy(&tmp, b, bytes);
 494		writel(tmp, host->base + MMC_REG_BUFFER_ACCESS);
 495	}
 496
 497	stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 498	if (stat)
 499		return stat;
 500
 501	return 0;
 502}
 503
 504static int mxcmci_transfer_data(struct mxcmci_host *host)
 505{
 506	struct mmc_data *data = host->req->data;
 507	struct scatterlist *sg;
 508	int stat, i;
 509
 510	host->data = data;
 511	host->datasize = 0;
 512
 513	if (data->flags & MMC_DATA_READ) {
 514		for_each_sg(data->sg, sg, data->sg_len, i) {
 515			stat = mxcmci_pull(host, sg_virt(sg), sg->length);
 516			if (stat)
 517				return stat;
 518			host->datasize += sg->length;
 519		}
 520	} else {
 521		for_each_sg(data->sg, sg, data->sg_len, i) {
 522			stat = mxcmci_push(host, sg_virt(sg), sg->length);
 523			if (stat)
 524				return stat;
 525			host->datasize += sg->length;
 526		}
 527		stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE);
 528		if (stat)
 529			return stat;
 530	}
 531	return 0;
 532}
 533
 534static void mxcmci_datawork(struct work_struct *work)
 535{
 536	struct mxcmci_host *host = container_of(work, struct mxcmci_host,
 537						  datawork);
 538	int datastat = mxcmci_transfer_data(host);
 539
 540	writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
 541		host->base + MMC_REG_STATUS);
 542	mxcmci_finish_data(host, datastat);
 543
 544	if (host->req->stop) {
 545		if (mxcmci_start_cmd(host, host->req->stop, 0)) {
 546			mxcmci_finish_request(host, host->req);
 547			return;
 548		}
 549	} else {
 550		mxcmci_finish_request(host, host->req);
 551	}
 552}
 553
 554static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
 555{
 556	struct mmc_data *data = host->data;
 557	int data_error;
 
 
 
 
 
 
 
 
 558
 559	if (!data)
 
 560		return;
 
 
 
 
 
 561
 562	data_error = mxcmci_finish_data(host, stat);
 563
 
 
 
 
 
 564	mxcmci_read_response(host, stat);
 565	host->cmd = NULL;
 566
 567	if (host->req->stop) {
 568		if (mxcmci_start_cmd(host, host->req->stop, 0)) {
 569			mxcmci_finish_request(host, host->req);
 570			return;
 571		}
 572	} else {
 573		mxcmci_finish_request(host, host->req);
 574	}
 575}
 576
 577static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
 578{
 579	mxcmci_read_response(host, stat);
 580	host->cmd = NULL;
 581
 582	if (!host->data && host->req) {
 583		mxcmci_finish_request(host, host->req);
 584		return;
 585	}
 586
 587	/* For the DMA case the DMA engine handles the data transfer
 588	 * automatically. For non DMA we have to do it ourselves.
 589	 * Don't do it in interrupt context though.
 590	 */
 591	if (!mxcmci_use_dma(host) && host->data)
 592		schedule_work(&host->datawork);
 593
 594}
 595
 596static irqreturn_t mxcmci_irq(int irq, void *devid)
 597{
 598	struct mxcmci_host *host = devid;
 599	unsigned long flags;
 600	bool sdio_irq;
 601	u32 stat;
 602
 603	stat = readl(host->base + MMC_REG_STATUS);
 604	writel(stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
 605			STATUS_WRITE_OP_DONE), host->base + MMC_REG_STATUS);
 
 
 606
 607	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
 608
 609	spin_lock_irqsave(&host->lock, flags);
 610	sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
 611	spin_unlock_irqrestore(&host->lock, flags);
 612
 613	if (mxcmci_use_dma(host) &&
 614	    (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
 615		writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
 616			host->base + MMC_REG_STATUS);
 617
 618	if (sdio_irq) {
 619		writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
 620		mmc_signal_sdio_irq(host->mmc);
 621	}
 622
 623	if (stat & STATUS_END_CMD_RESP)
 624		mxcmci_cmd_done(host, stat);
 625
 626	if (mxcmci_use_dma(host) &&
 627		  (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
 628		mxcmci_data_done(host, stat);
 
 629
 630	if (host->default_irq_mask &&
 631		  (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
 632		mmc_detect_change(host->mmc, msecs_to_jiffies(200));
 633
 634	return IRQ_HANDLED;
 635}
 636
 637static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
 638{
 639	struct mxcmci_host *host = mmc_priv(mmc);
 640	unsigned int cmdat = host->cmdat;
 641	int error;
 642
 643	WARN_ON(host->req != NULL);
 644
 645	host->req = req;
 646	host->cmdat &= ~CMD_DAT_CONT_INIT;
 647
 648	if (host->dma)
 649		host->do_dma = 1;
 650
 651	if (req->data) {
 652		error = mxcmci_setup_data(host, req->data);
 653		if (error) {
 654			req->cmd->error = error;
 655			goto out;
 656		}
 657
 658
 659		cmdat |= CMD_DAT_CONT_DATA_ENABLE;
 660
 661		if (req->data->flags & MMC_DATA_WRITE)
 662			cmdat |= CMD_DAT_CONT_WRITE;
 663	}
 664
 665	error = mxcmci_start_cmd(host, req->cmd, cmdat);
 666
 667out:
 668	if (error)
 669		mxcmci_finish_request(host, req);
 670}
 671
 672static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
 673{
 674	unsigned int divider;
 675	int prescaler = 0;
 676	unsigned int clk_in = clk_get_rate(host->clk_per);
 677
 678	while (prescaler <= 0x800) {
 679		for (divider = 1; divider <= 0xF; divider++) {
 680			int x;
 681
 682			x = (clk_in / (divider + 1));
 683
 684			if (prescaler)
 685				x /= (prescaler * 2);
 686
 687			if (x <= clk_ios)
 688				break;
 689		}
 690		if (divider < 0x10)
 691			break;
 692
 693		if (prescaler == 0)
 694			prescaler = 1;
 695		else
 696			prescaler <<= 1;
 697	}
 698
 699	writew((prescaler << 4) | divider, host->base + MMC_REG_CLK_RATE);
 700
 701	dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n",
 702			prescaler, divider, clk_in, clk_ios);
 703}
 704
 705static int mxcmci_setup_dma(struct mmc_host *mmc)
 706{
 707	struct mxcmci_host *host = mmc_priv(mmc);
 708	struct dma_slave_config *config = &host->dma_slave_config;
 709
 710	config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
 711	config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
 712	config->dst_addr_width = 4;
 713	config->src_addr_width = 4;
 714	config->dst_maxburst = host->burstlen;
 715	config->src_maxburst = host->burstlen;
 716	config->device_fc = false;
 717
 718	return dmaengine_slave_config(host->dma, config);
 719}
 720
 721static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 722{
 723	struct mxcmci_host *host = mmc_priv(mmc);
 724	int burstlen, ret;
 725
 726	/*
 727	 * use burstlen of 64 (16 words) in 4 bit mode (--> reg value  0)
 728	 * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)
 729	 */
 730	if (ios->bus_width == MMC_BUS_WIDTH_4)
 731		burstlen = 16;
 732	else
 733		burstlen = 4;
 734
 735	if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
 736		host->burstlen = burstlen;
 737		ret = mxcmci_setup_dma(mmc);
 738		if (ret) {
 739			dev_err(mmc_dev(host->mmc),
 740				"failed to config DMA channel. Falling back to PIO\n");
 741			dma_release_channel(host->dma);
 742			host->do_dma = 0;
 743			host->dma = NULL;
 744		}
 745	}
 746
 747	if (ios->bus_width == MMC_BUS_WIDTH_4)
 748		host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
 749	else
 750		host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
 751
 752	if (host->power_mode != ios->power_mode) {
 753		mxcmci_set_power(host, ios->power_mode, ios->vdd);
 754		host->power_mode = ios->power_mode;
 
 755
 756		if (ios->power_mode == MMC_POWER_ON)
 757			host->cmdat |= CMD_DAT_CONT_INIT;
 758	}
 759
 760	if (ios->clock) {
 761		mxcmci_set_clk_rate(host, ios->clock);
 762		writew(STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
 763	} else {
 764		writew(STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK);
 765	}
 766
 767	host->clock = ios->clock;
 768}
 769
 770static irqreturn_t mxcmci_detect_irq(int irq, void *data)
 771{
 772	struct mmc_host *mmc = data;
 773
 774	dev_dbg(mmc_dev(mmc), "%s\n", __func__);
 775
 776	mmc_detect_change(mmc, msecs_to_jiffies(250));
 777	return IRQ_HANDLED;
 778}
 779
 780static int mxcmci_get_ro(struct mmc_host *mmc)
 781{
 782	struct mxcmci_host *host = mmc_priv(mmc);
 783
 784	if (host->pdata && host->pdata->get_ro)
 785		return !!host->pdata->get_ro(mmc_dev(mmc));
 786	/*
 787	 * Board doesn't support read only detection; let the mmc core
 788	 * decide what to do.
 
 789	 */
 790	return -ENOSYS;
 791}
 792
 793static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
 794{
 795	struct mxcmci_host *host = mmc_priv(mmc);
 796	unsigned long flags;
 797	u32 int_cntr;
 798
 799	spin_lock_irqsave(&host->lock, flags);
 800	host->use_sdio = enable;
 801	int_cntr = readl(host->base + MMC_REG_INT_CNTR);
 802
 803	if (enable)
 804		int_cntr |= INT_SDIO_IRQ_EN;
 805	else
 806		int_cntr &= ~INT_SDIO_IRQ_EN;
 807
 808	writel(int_cntr, host->base + MMC_REG_INT_CNTR);
 809	spin_unlock_irqrestore(&host->lock, flags);
 810}
 811
 812static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
 813{
 
 
 814	/*
 815	 * MX3 SoCs have a silicon bug which corrupts CRC calculation of
 816	 * multi-block transfers when connected SDIO peripheral doesn't
 817	 * drive the BUSY line as required by the specs.
 818	 * One way to prevent this is to only allow 1-bit transfers.
 819	 */
 820
 821	if (cpu_is_mx3() && card->type == MMC_TYPE_SDIO)
 822		host->caps &= ~MMC_CAP_4_BIT_DATA;
 823	else
 824		host->caps |= MMC_CAP_4_BIT_DATA;
 825}
 826
 827static bool filter(struct dma_chan *chan, void *param)
 828{
 829	struct mxcmci_host *host = param;
 830
 831	if (!imx_dma_is_general_purpose(chan))
 832		return false;
 833
 834	chan->private = &host->dma_data;
 835
 836	return true;
 837}
 838
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 839static const struct mmc_host_ops mxcmci_ops = {
 840	.request		= mxcmci_request,
 841	.set_ios		= mxcmci_set_ios,
 842	.get_ro			= mxcmci_get_ro,
 843	.enable_sdio_irq	= mxcmci_enable_sdio_irq,
 844	.init_card		= mxcmci_init_card,
 845};
 846
 847static int mxcmci_probe(struct platform_device *pdev)
 848{
 849	struct mmc_host *mmc;
 850	struct mxcmci_host *host = NULL;
 851	struct resource *iores, *r;
 852	int ret = 0, irq;
 
 853	dma_cap_mask_t mask;
 
 854
 855	pr_info("i.MX SDHC driver\n");
 856
 857	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 858	irq = platform_get_irq(pdev, 0);
 859	if (!iores || irq < 0)
 860		return -EINVAL;
 
 
 
 
 
 
 861
 862	r = request_mem_region(iores->start, resource_size(iores), pdev->name);
 863	if (!r)
 864		return -EBUSY;
 865
 866	mmc = mmc_alloc_host(sizeof(struct mxcmci_host), &pdev->dev);
 867	if (!mmc) {
 868		ret = -ENOMEM;
 869		goto out_release_mem;
 870	}
 871
 
 
 
 
 
 872	mmc->ops = &mxcmci_ops;
 873	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
 
 
 
 
 
 874
 875	/* MMC core transfer sizes tunable parameters */
 876	mmc->max_segs = 64;
 877	mmc->max_blk_size = 2048;
 878	mmc->max_blk_count = 65535;
 879	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
 880	mmc->max_seg_size = mmc->max_req_size;
 881
 882	host = mmc_priv(mmc);
 883	host->base = ioremap(r->start, resource_size(r));
 884	if (!host->base) {
 885		ret = -ENOMEM;
 886		goto out_free;
 887	}
 888
 889	host->mmc = mmc;
 890	host->pdata = pdev->dev.platform_data;
 891	spin_lock_init(&host->lock);
 892
 893	mxcmci_init_ocr(host);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 894
 895	if (host->pdata && host->pdata->dat3_card_detect)
 896		host->default_irq_mask =
 897			INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN;
 898	else
 899		host->default_irq_mask = 0;
 900
 901	host->res = r;
 902	host->irq = irq;
 903
 904	host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
 905	if (IS_ERR(host->clk_ipg)) {
 906		ret = PTR_ERR(host->clk_ipg);
 907		goto out_iounmap;
 908	}
 909
 910	host->clk_per = devm_clk_get(&pdev->dev, "per");
 911	if (IS_ERR(host->clk_per)) {
 912		ret = PTR_ERR(host->clk_per);
 913		goto out_iounmap;
 914	}
 915
 916	clk_prepare_enable(host->clk_per);
 917	clk_prepare_enable(host->clk_ipg);
 
 
 
 
 
 918
 919	mxcmci_softreset(host);
 920
 921	host->rev_no = readw(host->base + MMC_REG_REV_NO);
 922	if (host->rev_no != 0x400) {
 923		ret = -ENODEV;
 924		dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
 925			host->rev_no);
 926		goto out_clk_put;
 927	}
 928
 929	mmc->f_min = clk_get_rate(host->clk_per) >> 16;
 930	mmc->f_max = clk_get_rate(host->clk_per) >> 1;
 931
 932	/* recommended in data sheet */
 933	writew(0x2db4, host->base + MMC_REG_READ_TO);
 934
 935	writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR);
 
 
 
 
 
 
 
 
 936
 937	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
 938	if (r) {
 939		host->dmareq = r->start;
 940		host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
 941		host->dma_data.priority = DMA_PRIO_LOW;
 942		host->dma_data.dma_request = host->dmareq;
 943		dma_cap_zero(mask);
 944		dma_cap_set(DMA_SLAVE, mask);
 945		host->dma = dma_request_channel(mask, filter, host);
 946		if (host->dma)
 947			mmc->max_seg_size = dma_get_max_seg_size(
 948					host->dma->device->dev);
 
 
 949	}
 950
 951	if (!host->dma)
 
 
 952		dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
 953
 954	INIT_WORK(&host->datawork, mxcmci_datawork);
 955
 956	ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host);
 
 957	if (ret)
 958		goto out_free_dma;
 959
 960	platform_set_drvdata(pdev, mmc);
 961
 962	if (host->pdata && host->pdata->init) {
 963		ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq,
 964				host->mmc);
 965		if (ret)
 966			goto out_free_irq;
 967	}
 968
 969	mmc_add_host(mmc);
 
 
 
 
 970
 971	return 0;
 972
 973out_free_irq:
 974	free_irq(host->irq, host);
 975out_free_dma:
 976	if (host->dma)
 977		dma_release_channel(host->dma);
 
 978out_clk_put:
 
 
 979	clk_disable_unprepare(host->clk_per);
 980	clk_disable_unprepare(host->clk_ipg);
 981out_iounmap:
 982	iounmap(host->base);
 983out_free:
 984	mmc_free_host(mmc);
 985out_release_mem:
 986	release_mem_region(iores->start, resource_size(iores));
 987	return ret;
 988}
 989
 990static int mxcmci_remove(struct platform_device *pdev)
 991{
 992	struct mmc_host *mmc = platform_get_drvdata(pdev);
 993	struct mxcmci_host *host = mmc_priv(mmc);
 994
 995	platform_set_drvdata(pdev, NULL);
 996
 997	mmc_remove_host(mmc);
 998
 999	if (host->vcc)
1000		regulator_put(host->vcc);
1001
1002	if (host->pdata && host->pdata->exit)
1003		host->pdata->exit(&pdev->dev, mmc);
1004
1005	free_irq(host->irq, host);
1006	iounmap(host->base);
1007
1008	if (host->dma)
1009		dma_release_channel(host->dma);
1010
1011	clk_disable_unprepare(host->clk_per);
1012	clk_disable_unprepare(host->clk_ipg);
1013
1014	release_mem_region(host->res->start, resource_size(host->res));
1015
1016	mmc_free_host(mmc);
1017
1018	return 0;
1019}
1020
1021#ifdef CONFIG_PM
1022static int mxcmci_suspend(struct device *dev)
1023{
1024	struct mmc_host *mmc = dev_get_drvdata(dev);
1025	struct mxcmci_host *host = mmc_priv(mmc);
1026	int ret = 0;
1027
1028	if (mmc)
1029		ret = mmc_suspend_host(mmc);
1030	clk_disable_unprepare(host->clk_per);
1031	clk_disable_unprepare(host->clk_ipg);
1032
1033	return ret;
1034}
1035
1036static int mxcmci_resume(struct device *dev)
1037{
1038	struct mmc_host *mmc = dev_get_drvdata(dev);
1039	struct mxcmci_host *host = mmc_priv(mmc);
1040	int ret = 0;
 
 
 
 
1041
1042	clk_prepare_enable(host->clk_per);
1043	clk_prepare_enable(host->clk_ipg);
1044	if (mmc)
1045		ret = mmc_resume_host(mmc);
1046
1047	return ret;
1048}
1049
1050static const struct dev_pm_ops mxcmci_pm_ops = {
1051	.suspend	= mxcmci_suspend,
1052	.resume		= mxcmci_resume,
1053};
1054#endif
1055
1056static struct platform_driver mxcmci_driver = {
1057	.probe		= mxcmci_probe,
1058	.remove		= mxcmci_remove,
1059	.driver		= {
1060		.name		= DRIVER_NAME,
1061		.owner		= THIS_MODULE,
1062#ifdef CONFIG_PM
1063		.pm	= &mxcmci_pm_ops,
1064#endif
1065	}
1066};
1067
1068module_platform_driver(mxcmci_driver);
1069
1070MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1071MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1072MODULE_LICENSE("GPL");
1073MODULE_ALIAS("platform:imx-mmc");